query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Private. Resets the traversal state of all nodes in preparation for a new traversal
def _reset_traversal_state(self): for n in self.nodes.values(): n.reset_traversal_state()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None", "def reset_visited(self):\n self.__visited = False", "def reset_graph(self):\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)", "def reset(self):\n for c in self.children:\n c.reset()\n self.marked = False", "def reset(self):\n self.state = EvaluationState.ready\n\n for child in self.children:\n if hasattr(child, \"reset\"):\n child.reset()", "def reset(self):\r\n self.key = None\r\n self.value = None\r\n self.parent = None\r\n self.left_child = None\r\n self.right_child = None\r\n self.color = BLACK\r\n self.size_tree = 0", "def reset(self):\n for parent in self.GetParents():\n parent.reset()", "def reset(self):\r\n self.tree = KDTree()\r\n self.paint()", "def reset(self):\n self.visited = set()\n del self.targets[0]", "def ResetGraph(self):\n self.nodes = []\n self.edges = []\n self.connections = []\n Node.resetNodeCount()\n Edge.resetEdgesCount()", "def reset(self):\n self.nodes = []\n self.start = self.start\n self.end = self.end\n\n for row in self.charMap:\n for c in row:\n if c == \"2\":\n c.c = \"0\"\n self.n_checked = 0", "def reset(self):\n SGMLParser.reset(self)\n self.__depth = 0\n self.__inobject = False\n self.__param = {}\n\n # this a critical data structure,\n self.__nodeTree = [[], ]", "def reset(self):\n\t\tself.graph = OrderedDict()\n\t\tself.bottoms = OrderedDict()\n\t\tself.output_shape = OrderedDict()\n\t\tself.cur_tensor = None\n\t\tself.cur_id = None\n\t\tself.tmp_list = []\n\t\tself.log_init()", "def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def reset(self):\n self.steps = 0\n self.state = 0\n self.trajectory = []", "def reset(self) -> None:\r\n self.tree.delete(*self.tree.get_children())", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "def reset(self):\n self.liidx = 0\n self.clidx = 0", "def reset_states(self) -> None:\n self._metric.reset_states()\n # for each child log\n for child in self.children_real_fake:\n child[0].reset_states()\n child[1].reset_states()", "def reset(self):\n self.visited = False\n self.calculated = False\n self.past_value = self.value\n self.value = 0", "def reset(self):\n self.children.clear()", "def _reset_state(self):\n # Directed graph, (u, v) => v depends on u. u, v are pairs of (rule_name, rule_dir_abs)\n # Used for generating Topological Sort\n self._rule_to_dependency_graph_adjlist = {}\n self._topologically_sorted_build_rule_names = []\n\n # List of (dependency_name, dependency_dir_abs) for each build rule\n self._rule_to_dependency_list = {}\n\n # Space for rough work :P\n self._unresolved_commands = set()", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def clear(self) -> None:\n self.node.prev = self.node.next = self.node", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def reset(self):\n\t\tself.pos = self.start\n\n\t\tself.weighted_n_left = 0.0\n\t\tself.weighted_n_right = self.weighted_n_node_samples\n\n\t\tself.label_count_left \t= np.zeros(self.n_classes)\n\t\tself.label_count_right \t= np.copy(self.label_count_total)", "def reset(cls):\r\n cls._ROOTS_BY_TYPE = {}\r\n cls._TYPES_BY_ROOT = {}\r\n cls._SEARCHED = set()", "def _reset_gradients(self):\n self.grad = None # gradient itself\n self.grad_fn = None # functions to call for gradient\n self.grad_expected = 0 # number of gradients expected from parents\n self.grad_received = 0 # number of gradients received from parents\n self.children = [] # children of node in graph\n self.ctx = AutogradContext() # contexts for AutogradFunctions", "def reset_all(self):\n self._stepsize = _stepsize\n self.reset_f()\n self.reset_s()\n self.reset_u()", "def clear(self):\n self.nodes = list()\n self.inputs = list()\n self.nodes += [self]", "def _reset_topological_order(self):\n self._topological_order = self._input_nodes[:]\n self.sorted = False", "def reset(self):\n TNavigator.reset(self)\n TPen._reset(self)\n self._clear()\n self._drawturtle()\n self._update()", "def reset(self, propagate=True):\n if propagate:\n for c in self.children:\n c.reset()\n self._generate_index = 0\n self._num_resets += 1\n self._delayed_outputs.reset(self.id)", "def reset_state(self):\n for row in range(len(self.state)):\n for column in range(len(self.state[row])):\n self.state[row][column] = None", "def _Restore(self) -> None:\n self._SetNodes(self._nodes)", "def reset(self):\n super(CheckMayaAbstract, self).reset()\n self.errorNodes = list()\n self._errorDict = {}", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset_graph(self):\n raise NotImplementedError", "def reset_state(self):\n self._events = None # We'll have to grab the event handlers again in case they changed.\n self._elements.clear() # Clear any cached elements in case they changed or disappeared.", "def clear(self):\n self._nodes = { }\n self._arcs = set()", "def reset(self):\n for Myturtle in self._turtles:\n Myturtle._setmode(self._mode)\n Myturtle.reset()", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.target_ids = self.target_ids[inv_perm]\n self.new_epoch()", "def clear(self):\r\n self.nodes = collections.defaultdict(list)\r\n self.nodes_mapping = collections.defaultdict(list)\r\n self.edges = 0\r\n #self.children_length={}\r\n self.parents_length = collections.defaultdict(lambda : collections.defaultdict(int))", "def reset(self):\n self.previous = None\n self.state = None\n self.args = None\n self.context = None", "def clear(self):\n while len(self.nodes) > 0:\n self.nodes[0].remove()\n\n self.has_been_modified = False", "def reset_path(self):\n for i in self.grid:\n for y in i:\n y.g = 0\n y.h = 0\n y.f = 0\n y.parent = None\n y.visited = False", "def reset(self):\n self.state.fill(EMPTY)", "def _reset_fuzz_state(self):\n self.total_mutant_index = 0\n if self.fuzz_node:\n self.fuzz_node.reset()", "def reset_step(self):\n # reset all levels\n for l in self.levels:\n l.reset_level()", "def resetWeights(T):\n T.children = [(t,0) for t in T.children]\n for t,w in T.children:\n resetWeights(t)", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def reset(self):\n self.num_steps = 0\n self.world_state = self.action = None", "def restore(self):\n self.nodes.restore()", "def reset(self):\n self._idx = 0", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.new_epoch()", "def reset(self):\n self._current_index = 0", "def reset_graph(self):\n self.graph = OrderedDict()", "def reset(self):\n self.robot_path_ind = 0\n self.goal_path_ind = None\n self.global_plan = Path()", "def reset(self):\n ...", "def reset(self):\n ...", "def _re_init(self):\n self._child_index = 0", "def reset(self):\r\n\t\tself.index = 0", "def _reset_cache(self):\n self._cache = None\n for child in self.children: # pylint: disable=E1101\n child._reset_cache()", "def reset(self, state: nx.Graph = None):\n if state is None:\n self._state = self.init_mol\n else:\n self._state = state\n\n self.action_space.update_actions(self._state, self.observation_space)\n if self.record_path:\n self._path = [self._state]\n self._counter = 0", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def _clear_node(self):\n self._element = None\n self._parent = None\n self._leftchild = None\n self._rightchild = None\n self._height = None", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset():", "def reset():", "def reset():", "def setup_ant(self):\n self.visited_nodes[1:] = []\n self.actual_node = self.start_pos", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def _reset(self):\n pass", "def reset(self):\n self.iterable = None\n self.leftover = None\n self.at_end = False\n self.curr_pos = 0", "def reset_weights(self):\n self.head.reset_weights()", "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def reset(self):\n self._previous_v = 0\n self._previous_m = 0\n self._previous_shape = 0", "def reset(self):\n self.state = self.resolve_et({NFA.START})", "def reset(self):\n self.set_state(self._initial_state)", "def _reset(self) -> None:", "def _reset(self) -> None:", "def reset(self):\n \n pass", "def reset (self):\n self.__inScopeNamespaces = self.__initialScopeNamespaces\n self.__inScopePrefixes = self.__initialScopePrefixes\n self.__mutableInScopeNamespaces = False\n self.__namespacePrefixCounter = 0", "def _reset_stored(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n self._setted = False\n self.ks = None\n self.iss = [0]", "def reset(self):\n self.state = [\n ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R'],\n ['P'] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n ['p'] * 8,\n ['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r']\n ]", "def reset(self):\n self.memory.clear()\n self.relative_base = 0\n self.input_queue.clear()\n self.instr_idx = 0", "def clear_registered_nodes(self):\n self.__nodes.clear()\n self.__names.clear()\n self.__aliases.clear()" ]
[ "0.74285185", "0.73662233", "0.7337941", "0.73155934", "0.7201365", "0.71484256", "0.7138489", "0.71265846", "0.71129096", "0.71065027", "0.7018088", "0.6959817", "0.6881637", "0.6879142", "0.681779", "0.6811828", "0.6801169", "0.67853063", "0.67844963", "0.6764829", "0.6744471", "0.66999733", "0.669001", "0.66898143", "0.66858447", "0.66763467", "0.66492957", "0.66264474", "0.66010964", "0.65809417", "0.65735763", "0.65655535", "0.65619725", "0.6555043", "0.65487367", "0.6546748", "0.6540081", "0.65247446", "0.64977753", "0.64933825", "0.6492012", "0.64786595", "0.64750516", "0.6472938", "0.64666176", "0.6459276", "0.64510244", "0.6444601", "0.643335", "0.64312935", "0.6422653", "0.6420298", "0.6417964", "0.6407706", "0.63988274", "0.63979816", "0.6390844", "0.6385306", "0.6372766", "0.6372766", "0.63695866", "0.6349775", "0.6346527", "0.63367575", "0.6326305", "0.6326305", "0.6326305", "0.6326305", "0.6326305", "0.6326305", "0.6326305", "0.6326305", "0.6316665", "0.6316455", "0.6316375", "0.6316375", "0.6316375", "0.6312619", "0.6305214", "0.6305214", "0.6305214", "0.6305214", "0.63012147", "0.63012147", "0.63012147", "0.6287241", "0.62833816", "0.6280919", "0.62804365", "0.62775415", "0.6277539", "0.6270444", "0.6269166", "0.6269166", "0.6268171", "0.62668586", "0.6263503", "0.62530833", "0.6252831", "0.6251787" ]
0.8812322
0
Perform a depthfirst traversal of the entire graph. The supplied visitor_function callable is called twice per
def depth_first_traversal(self, visitor_function=None): self._reset_traversal_state() self.time = 0 result = False for n in self.nodes.values(): if NodeColor.WHITE == n.color: stack = collections.deque() stack.append(n) while len(stack) > 0: node = stack.pop() if NodeColor.WHITE == node.color: # Need to stay on the stack until we're done exploring things connected to this node stack.append(node) self.time += 1 node.discovery_time = self.time self._visit_enter(node, visitor_function) node.color = NodeColor.GRAY for descendant in self.edges[node]: self.logger.debug( 'Looking at [{}] -> [{} / {}]'.format(node.name, descendant.name, descendant.color)) if NodeColor.WHITE == descendant.color: descendant.predecessor = node stack.append(descendant) elif NodeColor.GRAY == descendant.color: self.logger.debug( 'Found cycle involving edge [{}] -> [{}]'.format(node.name, descendant.name)) result = True elif NodeColor.GRAY == node.color: self.time += 1 node.color = NodeColor.BLACK node.finishing_time = self.time self._visit_exit(node, visitor_function) elif NodeColor.GRAY == n.color: self.logger.info('Found cycle involving node [{}]'.format(n.name)) result = True return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_depth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extendleft(reversed(node.children))", "def _traverse_region_graph(root: Region, fun: Callable[[Region], None]) -> None:\n visited_nodes = set() # Set of visited nodes\n queue: deque = deque()\n queue.append(root)\n\n while queue:\n next_node = queue.popleft()\n if next_node not in visited_nodes:\n fun(next_node)\n\n visited_nodes.add(next_node)\n\n for child in next_node.children:\n queue.append(child)", "def _depth_first_iterate(graph, connected_to_functors, initial_nodes_iter):\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = graph.node[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def depth_first_traverse(self) -> Generator:\n\n def traverse(current_node: Node):\n yield current_node\n for child in current_node.children:\n for item in traverse(child):\n yield item\n\n return traverse(self.root_node)", "def _traverse(cls, node, func):\n # Checking for last node\n if node is None:\n return\n\n func(node.value)\n cls._traverse(node.next_, func)", "def dfs_ast(func):\n def wrapper(self, node):\n new_node = func(self, node)\n for child in ast.iter_child_nodes(new_node):\n self.visit(child)\n return new_node\n\n return wrapper", "def depth_first_traversal(self, callback: Callable[[Tree], None]) -> None:\n nodes_to_visit = []\n nodes_to_visit.append(self)\n\n while nodes_to_visit:\n temp_node = nodes_to_visit.pop()\n callback(temp_node)\n\n # Appending child nodes in reverse order (right to left) to traverse\n # depth first, from left to right. More specifically, the nodes will be visited \n # in ascending order based on The visual example provided below.\n # tree: \n # 1\n # / \\\n # 2 5\n # / \\ / \\\n # 3 4 6 7\n for idx in range(len(temp_node.children) -1, -1, -1):\n nodes_to_visit.append(temp_node.children[idx])", "def dft_recursive(self, starting_vertex):\n \n visited = []\n\n def helper(vert, visited):\n visited.append(vert)\n print(vert)\n\n for child in self.vertices[vert]:\n if child not in visited:\n helper(child, visited)\n\n helper(starting_vertex, visited)", "def traverse(self, fn: Callable[[RTreeNode[T]], None]) -> None:\n self._traverse(self.root, fn)", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ", "def walk_tree(visitor, data_structure):\n if isinstance(data_structure, dict):\n for key in data_structure.keys():\n data_structure[key] = walk_tree(visitor, data_structure[key])\n elif isinstance(data_structure, list):\n for i in xrange(len(data_structure)):\n data_structure[i] = walk_tree(visitor, data_structure[i])\n else:\n data_structure = visitor(data_structure)\n return data_structure", "def _traverse_in_order_recursive(self, node, visit):\n # Traverse left subtree, if it exists\n if node is not None:\n self._traverse_in_order_recursive(node.left_child, visit)\n # Visit this node's data with given function\n visit(node.data)\n # Traverse right subtree, if it exists\n self._traverse_in_order_recursive(node.right_child, visit)", "def traverse(self, visit, *args, **kwargs):\n if not self.__visited:\n visit(self, *args, **kwargs)\n self.__visited = True\n for c in self.parameters:\n c.traverse(visit, *args, **kwargs)\n self.__visited = False", "def traverse(self):\r\n nodes_to_visit = queue.Queue()\r\n nodes_to_visit.put(self.__rootnode)\r\n while nodes_to_visit.empty() is False:\r\n current_node = nodes_to_visit.get()\r\n yield current_node\r\n for child in current_node.children:\r\n nodes_to_visit.put(child)", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None:\n visited = set()\n visited.add(starting_vertex)\n print(starting_vertex)\n for neighb_vert in self.vertices[starting_vertex]:\n if neighb_vert not in visited:\n self.dft_recursive(neighb_vert, visited)", "def traverse(self, func):\n self._traverse(self.head, func)", "def depth_first_recursion(graph, current=\"a\"):\n print(current)\n for node in graph.get(current):\n depth_first_recursion(graph, node)", "def breadth_first_traversal(self, start_node, visitor_function=None, max_depth=None):\n self._reset_traversal_state()\n\n if isinstance(start_node, str):\n start_node = self.nodes[start_node]\n\n if not isinstance(start_node, ProcessNode):\n raise TypeError('Expect start_node to either be a string or a ProcessNode. Got [{}] instead'.format(\n str(type(start_node))))\n\n start_node.discovery_time = 1\n queue = collections.deque()\n queue.appendleft(start_node)\n\n while len(queue) > 0:\n node = queue.pop()\n assert NodeColor.WHITE == node.color\n\n if node.predecessor is not None:\n node.discovery_time = node.predecessor.discovery_time + 1\n\n self._visit_enter(node, visitor_function)\n\n node.color = NodeColor.GRAY\n\n if max_depth is None or node.discovery_time + 1 < max_depth:\n for descendant in self.edges[node]:\n if NodeColor.WHITE == descendant:\n descendant.predecessor = node\n queue.appendleft(descendant)\n\n node.finishing_time = self.time\n node.color = NodeColor.BLACK\n\n self._visit_exit(node, visitor_function)", "def _traverse_level_order_iterative(self, start_node, visit):\n # Create queue to store nodes not yet traversed in level-order\n queue = LinkedQueue()\n # Enqueue given starting node\n queue.enqueue(start_node)\n # Loop until queue is empty\n while queue.is_empty() == False:\n # Dequeue node at front of queue\n node = queue.dequeue()\n # Visit this node's data with given function\n visit(node.data)\n # Enqueue this node's left child, if it exists\n if node.left_child is not None:\n queue.enqueue(node.left_child)\n # Enqueue this node's right child, if it exists\n if node.right_child is not None:\n queue.enqueue(node.right_child)", "def _traverse_pre_order_recursive(self, node, visit):\n\n\n if node is not None:\n visit(node.data) # Visit this node's data with given function\n self._traverse_pre_order_recursive(node.left_child, visit) # Traverse left subtree, if it exists\n self._traverse_pre_order_recursive(node.right_child, visit) # Traverse right subtree, if it exists", "def search(G):\n visited = set()\n \n for v in range(len(G)):\n if v not in visited:\n yield v,v,forward\n visited.add(v)\n stack = [(v,iter(G[v]))]\n while stack:\n parent,children = stack[-1]\n try:\n child = next(children)\n if child in visited:\n yield parent,child,nontree\n else:\n yield parent,child,forward\n visited.add(child)\n stack.append((child,iter(G[child])))\n except StopIteration:\n stack.pop()\n if stack:\n yield stack[-1][0],parent,reverse\n yield v,v,reverse", "def depth_first_search(self):\r\n queue = [self.root]\r\n ordered = []\r\n while queue:\r\n node = queue.pop()\r\n ordered.append(node)\r\n queue.extend(node.children)\r\n \r\n while ordered:\r\n yield ordered.pop()", "def dfs(node, traversal):\n if traversal.terminated: return\n\n g = traversal.graph\n node_key = g.key_func(node)\n traversal.node_state[node_key] = DISCOVERED\n traversal.entry_times[node_key] = traversal.curr_time\n traversal.curr_time += 1\n\n if traversal.should_process_node(node) is not False:\n # Now go through all children\n children = list(traversal.select_children(node, reverse = True))\n # print \"Node, Children: \", g.key_func(node), children\n for n,edge in children:\n child_key = g.key_func(n)\n if traversal.node_state[child_key] != None:\n traversal.process_edge(node, n, edge)\n else: # Node has not even been discovered yet\n traversal.parents[child_key] = node\n traversal.process_edge(node, n, edge)\n dfs(n, traversal)\n\n traversal.node_state[node_key] = PROCESSED\n traversal.curr_time += 1\n traversal.exit_times[node_key] = traversal.curr_time\n traversal.node_processed(node)", "def search(G):\n visited = set()\n\n for v in range(len(G)):\n if v not in visited:\n yield v, v, forward\n visited.add(v)\n stack = [(v, iter(G[v]))]\n while stack:\n parent, children = stack[-1]\n try:\n child = next(children)\n if child in visited:\n yield parent, child, nontree\n else:\n yield parent, child, forward\n visited.add(child)\n stack.append((child, iter(G[child])))\n except StopIteration:\n stack.pop()\n if stack:\n yield stack[-1][0], parent, reverse\n yield v, v, reverse", "def dfs_walk(node: ast.AST) -> Iterator[ast.AST]:\n stack = [node]\n while stack:\n node = stack.pop()\n stack.extend(reversed(list(ast.iter_child_nodes(node))))\n yield node", "def dft_recursive(self, starting_vertex, visited=None):\n \n # for vertex in self.get_neighbors(starting_vertex):\n # if vertex not in visited:\n # visited.add(vertex)\n # self.dft_recursive(vertex, visited)\n # return visited\n if visited == None:\n visited = set()\n print(starting_vertex)\n visited.add(starting_vertex)\n for v in self.get_neighbors(starting_vertex):\n if v not in visited:\n self.dft_recursive(v, visited)", "def walk(node):\n\n traversed_nodes.append(node)\n \n # Do something with node value...\n print node.value\n\n # Recurse on each child node\n for child_node in node.child_nodes:\n if child_node not in traversed_nodes:\n walk(child_node)", "def _explore(self, node, visited, skip_father=None):\n if node in visited:\n return\n\n visited = visited + [node]\n\n fathers_context = AbstractState()\n fathers_context.merge_fathers(node, skip_father, self)\n\n # Exclude path that dont bring further information\n if node in self.visited_all_paths:\n if self.visited_all_paths[node].does_not_bring_new_info(fathers_context):\n return\n else:\n self.visited_all_paths[node] = AbstractState()\n\n self.visited_all_paths[node].add(fathers_context)\n\n node.context[self.KEY] = fathers_context\n\n contains_call = fathers_context.analyze_node(node, self)\n node.context[self.KEY] = fathers_context\n\n sons = node.sons\n if contains_call and node.type in [NodeType.IF, NodeType.IFLOOP]:\n if _filter_if(node):\n son = sons[0]\n self._explore(son, visited, node)\n sons = sons[1:]\n else:\n son = sons[1]\n self._explore(son, visited, node)\n sons = [sons[0]]\n\n for son in sons:\n self._explore(son, visited)", "def dft_recursive(self, starting_vertex, visited=None):\n # First, we set our initial condition\n if visited is None:\n # If no nodes have been visited, we create a set to store the nodes we visit\n visited = set()\n\n # Then we add the starting vertex to the visited set\n visited.add(starting_vertex)\n print(starting_vertex)\n\n # Call the function recursively on neighbors not visited\n # Lastly we write a for loop that will recursively call dft_recursive()\n for neighbor in self.vertices[starting_vertex]:\n # For each vertex, we check to see if any of the neighbors have already been visited\n if neighbor not in visited:\n # And if we find a neighbor that has not been visited, we recursively call dft_recursive() and pass it the neighbor and updated visited set\n self.dft_recursive(neighbor, visited)", "def dft_recursive(self, starting_vertex, visited = None):\n \"\"\"\n Check if Vertex is in visited\n if NOT visited, add to visited set\n Call dft_recursive on every neighbor \n \n\n \"\"\"\n # 1) base case >> where to stop recursion\n # 2) calls itself from within\n # 3) each iteration approaches base case\n\n # 1) base case >> where to stop recursion\n\n # init a set that persists after recursions loops to save visited\n if visited == None:\n visited = set()\n\n if starting_vertex not in visited: # 1) & 3) Check if vertex has NOT been visited\n visited.add(starting_vertex) # if True, add to visited set\n\n print(starting_vertex)\n\n # perform recursion on neighbor\n for n in self.get_neighbors(starting_vertex):\n self.dft_recursive(n, visited) # 2) ", "def DFSUtility(obj,vertex,visited,subGraph):\n visited[vertex] = True\n subGraph.append(vertex)\n for nxtVertex in obj.adjList[vertex]:\n if visited[nxtVertex]:\n continue\n DFSUtility(obj,nxtVertex,visited,subGraph)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)", "def dfs(graph, root, method='dfs', max_depth=10000):\n \n # Get node object from node ID\n root = graph.getnodes(root)\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n visited = []\n stack = [root.nid]\n depth = 0\n \n while stack or depth == max_depth:\n node = stack.pop(stack_pop)\n \n if node not in visited:\n visited.append(node)\n stack.extend(\n [x for x in node_neighbors(graph, node) if x not in visited])\n depth += 1\n \n return visited", "def get_nodes_by_function(graph, func):\n return filter_nodes(graph, function_inclusion_filter_builder(func))", "def dfs(self, start_node, cbfunc):\n visited = set()\n stack = [start_node]\n\n while len(stack) != 0:\n node = stack.pop()\n if node in visited:\n continue\n cbfunc(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n stack.append(neighbor_node)", "def traverse_breadth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extend(node.children)", "def dft_recursive(self, starting_vertex):\n # TODO\n # creating a function inside that includes a list\n # of previously traversed vertices\n def recursive(graph, traversed, vertex):\n # if the vertex is in traversed already, return none\n if vertex in traversed:\n return \n # otherwise we print it out\n print(vertex)\n # append the vertex to our traversed list\n traversed.add(vertex)\n # running the function on the neighbors of the vertex\n for val in graph[vertex]:\n recursive(graph, traversed, val)\n\n recursive(self.vertices, set(), starting_vertex)", "def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)", "def depth_first_graph_search(problem):\n\n\tfrontier = [(Node(problem.initial))] # Stack (implemented as a list)\n\n\texplored = set()\n\twhile frontier:\n\t\tnode = frontier.pop()\n\t\tif problem.goal_test(node.state):\n\t\t\treturn node\n\t\texplored.add(node.state)\n\t\tfrontier.extend(child for child in node.expand(problem)\n\t\t\t\t\t\tif child.state not in explored and child not in frontier)\n\treturn None", "def dft(self, starting_vertex):\n \n visited = []\n stack = Stack()\n\n stack.add(starting_vertex)\n\n while len(stack):\n current = stack.pop()\n\n if current not in visited:\n print(current)\n visited.append(current)\n \n for child in self.vertices[current]:\n if child not in visited:\n stack.add(child)", "def DFS(self, start_vertex):\n yield from self._search(start_vertex, kind='DFS')", "def _DFS(self, curr_vertex, visited, callback):\n visited.add(curr_vertex)\n callback(curr_vertex)\n for vertex in self.neighbors(curr_vertex):\n if vertex not in visited:\n self._DFS(vertex, visited, callback)", "def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:\n for child_node in node.iter_child_nodes():\n self.visit(child_node, *args, **kwargs)", "def DFS(self, start_vertex, verbose=True):\n if start_vertex is None:\n return None\n traversal = []\n visited = set()\n for vertex in self.vertices():\n if vertex not in visited:\n self._DFS(vertex, visited, traversal.append)\n if verbose:\n print('DFS(Graph) =', traversal)\n return traversal", "def graph_depth_first(self,root):\n stack = Stack()\n stack.push(root)\n nodes = []\n \n visited = []\n visited.append(root)\n\n while not stack.is_empty():\n vertex = stack.peek()\n \n nodes.append(vertex.value)\n stack.pop() \n for child in self._adjacency_list.get(vertex.value):\n if not child.start_vertex in visited:\n visited.append(child.start_vertex)\n \n stack.push(child.start_vertex)\n \n\n return nodes", "def dfs(node, all_nodes, depth):\r\n node.depth = depth\r\n to_return = [node,]\r\n for subnode in all_nodes:\r\n if subnode.parent and subnode.parent.id == node.id:\r\n to_return.extend(dfs(subnode, all_nodes, depth+1))\r\n return to_return", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n \n st = Stack()\n mapper = {}\n mapper[problem.getStartState()] = None\n\n st.push(problem.getStartState())\n while not(st.isEmpty()):\n vertex = st.pop()\n \n if (problem.isGoalState(vertex)):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n neigh = problem.getSuccessors(vertex)\n # neigh.reverse()\n # neigh.sort()\n for child in neigh:\n if child[0] not in mapper:\n st.push(child[0])\n mapper[child[0]] = (vertex, child[1])\n # print mapper\n \n # visited = []\n # p = dfsRecursive(problem, problem.getStartState(), st, visited, [])\n # return p\n \n # pathfind = {}\n # st.push(problem.getStartState())\n # iterative approach:\n # while (not st.isEmpty()):\n # point = st.pop() # (x,y)\n # if problem.isGoalState(point):\n # # print point\n # print pathfind\n # # print visited\n # elif (not (point in visited)):\n # visited.append(point)\n # # print pathfind, '\\n'\n # print visited, '\\n'\n # for child in problem.getSuccessors(point):\n # st.push(child[0])\n # pathfind[child[0]] = point #this preemptively adds!\n # util.raiseNotDefined()", "def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def _depth_first_directed(self, graph):\n \n # Figure out which subgraph this is\n sub = next((i+1 for i, g in enumerate(self.get_subgraphs()) if g==graph), None)\n # Log the Subgraph progress\n logger.info('Directing SUBGRAPH {} / {}'.format(sub, len(list(self.get_subgraphs()))))\n\n old_edges = graph.edges()\n dfs_edges = list(nx.traversal.dfs_edges(graph,\n self._graph_priority(graph.nodes())))\n #This debug message could be cleaner\n logger.debug('mapping {} -> {}'.format(old_edges, dfs_edges))\n graph.remove_edges_from(old_edges)\n graph.add_edges_from(dfs_edges)\n \n logger.info('DONE!')\n return graph", "def depth_from_match(function):\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #print self._current_node\n self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap", "def traverse(self, traverser, **kwargs):\n atr = []\n for arg in self.args:\n atr.append(arg.traverse(traverser, **kwargs))\n return traverser.function(self, atr, **kwargs)", "def traverse_level_order(self, fn: Callable[[RTreeNode[T], int], None]) -> None:\n stack = [(self.root, 0)]\n while stack:\n node, level = stack[0]\n stack = stack[1:]\n fn(node, level)\n if not node.is_leaf:\n stack.extend([(entry.child, level + 1) for entry in node.entries])", "def preorder_visit(t: Tree, act: Callable[[Tree], Any]) -> None:\n act(t)\n for child in t.children:\n preorder_visit(child, act)", "def traverse(self, data=True):\n nodes = sorted(self.graph.nodes(), key=lambda x: key_to_numeric(x))\n for node in nodes:\n yield (node, self.graph.node[node]) if data else node", "def recursive_visit(self, node):\n node = self.generic_visit(node)\n\n # walk through the children: either iterate the node or look up the keys\n if hasattr(node, '_dict_keys'):\n for v in node._dict_keys:\n self.recursive_visit(getattr(node, v))\n\n if hasattr(node, '_list_keys'):\n for v in node._list_keys:\n self.recursive_visit(getattr(node, v))\n else:\n iter_target = None\n # need special handling of node.data or node_list in order to walk through all formatting node, e.g. endl\n if hasattr(node, 'node_list'): # use the unproxy list to get all formatting\n iter_target = node.node_list\n elif hasattr(node, 'data'):\n iter_target = node.data\n elif hasattr(node, '__iter__'):\n iter_target = node\n\n if iter_target:\n change_list = []\n for child in iter_target:\n new_node = self.recursive_visit(child)\n if new_node is not child:\n change_list.append((child, new_node))\n\n for original_child, new_child in change_list:\n i = original_child.index_on_parent\n iter_target.remove(original_child)\n iter_target.insert(i, new_child)\n\n return node", "def DFS(obj,vertex,visited=dict()):\n validateVertex(vertex,obj.vertexList)\n #order = []\n #visited = dict()\n subGraph= []\n for ver in obj.vertexList:\n visited[ver] = False\n\n DFSUtility(obj,vertex,visited,subGraph)\n return subGraph", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def levelorder_visit_recursive(t: Tree, act: Callable[[Tree], None]) -> None:\n if t.value is None:\n pass\n else:\n level = 0\n visited = visit_level(t, level, act)\n while visited > 0:\n level += 1\n visited = visit_level(t, level, act)", "def depth_first_traversal(self, start):\n return self.recursive_dft(start, [])", "def visit_children(self, func):\n for child in self._children:\n func(child)", "def visitNodes(self) -> None:\n\n for node in self.nodesMap_.values():\n self.dump_node(node)", "def in_order_traverse(root):\n stack = deque([root])\n visited = set()\n while stack:\n node = stack.pop()\n if node is None:\n continue\n if node.index in visited:\n print(node.index, end=' ')\n continue\n visited.add(node.index)\n stack.append(node.right)\n stack.append(node)\n stack.append(node.left)", "def visit(self, node, node_map):\n node_type = node.__class__.__name__\n method = 'visit_' + node_type \n visitor = getattr(self, method, self.generic_visit)\n \n return visitor(node, node_map)", "def apply_tree(tree: dict, func: Callable, args: Optional[Tuple] = None, kwargs: Optional[Mapping] = None) -> None:\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n func(item, *args, **kwargs)\n explored.add(uid)\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def trees(vertices):\n from sage.graphs.trees import TreeIterator\n return iter(TreeIterator(vertices))", "def __dfs(self, subtree, path):\n if isinstance(subtree, list):\n for node in subtree:\n for child in self.__dfs(node, path + \"[\" + str(subtree.index(node)) + \"]\"):\n yield child\n elif isinstance(subtree, dict):\n for node in subtree:\n for child in self.__dfs(subtree[node], path + \"/\" + node):\n yield child\n else: # Leaf node\n yield (subtree, path)", "def depth_first_traversal_iterative(self, start):\n try:\n res = []\n stack = Stack([start])\n track = set()\n while stack.top:\n cur_node = stack.pop()\n if cur_node not in track:\n res.append(cur_node)\n track.add(cur_node)\n for child in reversed(self.node_dict[cur_node]):\n stack.push(child)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n stack = util.Stack() # stack for searshing the graph\n visited = [] # Keep track of visited nodes\n start =problem.getStartState() # The start node\n stack.push((start, [])) # the sart state and empty path list is pushed to the stack\n \n while stack:\n (vrtx, path) = stack.pop() # Pop tfrom the stack , vrtx: the poped node for expantion.\n if vrtx not in visited: # if the node is visited alraedy \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx):\n stack.push((successor[0], path+[successor]))\n util.raiseNotDefined()", "def preorder_visit(t: Tree, act: Callable[[Tree], None]) -> None:\n if t.value is None:\n pass\n else:\n act(t)\n for subtree in t.children:\n preorder_visit(subtree, act)", "def visit(self, node):\n method_name = 'visit_' + type(node).__name__\n visit_method = getattr(self, method_name, self.generic_visit)\n return visit_method(node)", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.Stack()\n start_node = problem.getStartState()\n\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,[]))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n explored.add(node[0])\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1][:]\n actions.append(action)\n new_node = (nextState, actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def traverse_tree(self, root):\n\n\t\tself.pre_stage()\n\t\troot.visit(self)\n\t\tself.post_stage()", "def depthOrBreadthFirstSearch(problem, container):\n firstNode = (problem.getStartState(), None, 0, None)#state, action to reach, incremental cost, parent node\n container.push(firstNode)\n visitedStates = []\n while (not container.isEmpty()):\n if problem.getNodesExpandedNum() > MAX_NODES_TO_EXPLORE:\n return None\n curNode = container.pop()\n if (problem.isGoalState(curNode[0])):\n return getStatePathFromNode(curNode, problem)\n for successor in problem.getSuccessors(curNode[0]):\n if not successor[0] in visitedStates:\n successorNode = (successor[0], successor[1], successor[2], curNode)\n visitedStates.append(successor[0])\n container.push(successorNode)\n return None", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n\n frontier = Stack()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n\n start = node(problem.getStartState(),'','')\n frontier.push(start)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n if achou == False:\n successor = node(vertex[0],path.path,vertex[1])\n frontier.push(successor)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions", "def _traverse_tree(self):\n if not self.children:\n yield self\n for child in self.children:\n yield from child._traverse_tree()", "def __dependency_traverse(self, v, visited, results):\n if v in visited:\n return\n visited.add(v)\n # Create list of input data\n data = []\n for p in v.predecessors():\n # Traverse dependencies that have not been visited yet\n self.__dependency_traverse(p, visited, results)\n data.append(results[p])\n data = data[0] if len(data) == 1 else data\n # Flatten list of input data if there is only one element\n results[v] = self.traverse_vertex(v, data)\n # Traverse transforms that require this dependency and have not been visited\n for n in v.successors():\n self.__dependency_traverse(n, visited, results)", "def visual_traverse(self, curnode, graph, lastnode, cfg):\n if curnode in self.visited:\n if lastnode is not None:\n graph.edge(str(id(lastnode)), str(id(curnode)))\n return\n self.visited.add(curnode)\n tmp_str = str(self.c2py[curnode])\n if self.c2py[curnode].isOp:\n name = self.c2py[curnode].name\n # default render configuration\n render_cfg = {'shape': 'ellipse', 'style': 'solid'}\n if name in cfg:\n render_cfg = cfg[name]\n graph.node(str(id(curnode)), tmp_str, **render_cfg)\n else:\n graph.node(str(id(curnode)), tmp_str, shape='box',\n color='lightblue', style='dashed')\n if lastnode is not None:\n graph.edge(str(id(lastnode)), str(id(curnode)))\n if curnode in self.forward_edge:\n for _next in self.forward_edge[curnode]:\n self.visual_traverse(_next, graph, curnode, cfg)", "def iter_dfs(self, depth=0):\n yield self, depth\n yield from self.left.iter_dfs(depth=depth + 1)\n yield from self.right.iter_dfs(depth=depth + 1)", "def accept(visitor):", "def accept(visitor):", "def _traverse_node_tree(self, cur_node, search_node_list):\n for _, sub_node in cur_node.get_children():\n sub_nodes = []\n self._traverse_node_tree(sub_node, sub_nodes)\n sub_node_dict = {\n 'name': sub_node.node_name,\n 'type': sub_node.node_type,\n 'is_dynamic_shape_node': sub_node.is_dynamic_shape_node,\n 'nodes': sub_nodes\n }\n search_node_list.append(sub_node_dict)", "def _calculate_tree_traversal(nonterminal_to_dfas):\n # Map from grammar rule (nonterminal) name to a set of tokens.\n first_plans = {}\n\n nonterminals = list(nonterminal_to_dfas.keys())\n nonterminals.sort()\n for nonterminal in nonterminals:\n if nonterminal not in first_plans:\n _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal)\n\n # Now that we have calculated the first terminals, we are sure that\n # there is no left recursion.\n\n for dfas in nonterminal_to_dfas.values():\n for dfa_state in dfas:\n transitions = dfa_state.transitions\n for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items():\n for transition, pushes in first_plans[nonterminal].items():\n if transition in transitions:\n prev_plan = transitions[transition]\n # Make sure these are sorted so that error messages are\n # at least deterministic\n choices = sorted([\n (\n prev_plan.dfa_pushes[0].from_rule\n if prev_plan.dfa_pushes\n else prev_plan.next_dfa.from_rule\n ),\n (\n pushes[0].from_rule\n if pushes else next_dfa.from_rule\n ),\n ])\n raise ValueError(\n \"Rule %s is ambiguous; given a %s token, we \"\n \"can't determine if we should evaluate %s or %s.\"\n % (\n (\n dfa_state.from_rule,\n transition,\n ) + tuple(choices)\n )\n )\n transitions[transition] = DFAPlan(next_dfa, pushes)", "def depthFirstSearch(problem):\n\t#print(\"Start:\", problem.getStartState())\n\t#print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n\t#print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\t\n\n\t\"*** YOUR CODE HERE ***\"\n\n\t# Create the stack, and visited array to keep track of visited nodes.\n\tdfsStack = util.Stack()\n\tvisited = []\n\t# Get the first state in the graph, push to the stack\n\tfirst = problem.getStartState()\n\tdfsStack.push([first, [], 0])\n\n\t# While the stack is not empty, pop the first node from the stack, and check if that state\n # is the goal state. If so, return the actions for that node. Otherwise, append that state\n # to the visited array, get its successors, and push them to the stack.\n\twhile not dfsStack.isEmpty():\n\t\tNewNode = dfsStack.pop()\n\t\tif((problem.isGoalState(NewNode[0]) == True)):\n\t\t\treturn NewNode[1]\n\t\tif(NewNode[0] not in visited):\n\t\t\tvisited.append(NewNode[0])\n\t\t\tfor NextNode in problem.getSuccessors(NewNode[0]):\n\t\t\t\tif NextNode[0] not in visited:\n\t\t\t\t\tdfsStack.push((NextNode[0], NewNode[1] + [NextNode[1]], NextNode[2]))", "def find_DFS(self, value):\n \n to_visit = [self]\n \n while to_visit:\n curr = to_visit.pop() # DFS -> .pop() from end -> stack\n \n if curr.value == value:\n return curr\n \n to_visit.extend(curr.children)", "def walk_tree(tree,\n leaf_func=lambda x: None,\n pre_nonleaf_func=lambda x: None,\n post_nonleaf_func=lambda x: None):\n tree = deepcopy(tree)\n\n def walk(node):\n # Depth First Traversal of an NLTK Tree.\n if is_leaf_node(node):\n leaf_func(node)\n else:\n pre_nonleaf_func(node)\n if len(node) > 0:\n for child in node:\n walk(child)\n post_nonleaf_func(node)\n\n walk(tree)\n return tree", "def general_search(fringe, visited, limiting_depth):\n node_to_be_explored = fringe[0]\n node_state = node_to_be_explored['state']\n visited[node_state] = node_to_be_explored\n if goal_test(node_to_be_explored['state']):\n return generate_path(node_to_be_explored, visited)\n current_depth = node_to_be_explored['depth']\n if current_depth == limiting_depth:\n return False\n children = [\n {\n 'state': child_state,\n 'parent': node_state,\n 'depth': current_depth + 1,\n }\n for child_state in operator(node_state)]\n for child in children:\n if child['state'] in visited:\n continue\n fringe_copy = [child] + fringe[1:]\n visited_copy = visited.copy()\n solution = general_search(fringe_copy, visited_copy, limiting_depth)\n if solution:\n return solution\n else:\n continue\n return False", "def _pfs_nodes(cls, graph, source, size, priority):\n if size < 1:\n return iter(())\n\n # use min-heap to implement (max) priority queue\n # use insertion order to break priority tie\n queue = []\n counter = itertools.count()\n push = lambda priority, node: heappush(queue, (-priority, next(counter), node))\n pop = partial(heappop, queue)\n\n visited = set()\n enqueued = set()\n push(priority(source), source)\n\n while queue and len(visited) < size:\n _, _, node = pop()\n\n if node in visited:\n continue\n\n visited.add(node)\n\n for neighbor in graph[node]:\n if neighbor not in enqueued:\n enqueued.add(neighbor)\n push(priority(neighbor), neighbor)\n\n return iter(visited)", "def dfs_visit(self, node):\n super(MutantGenerator, self).generic_visit(node)", "def topological_sort_visit(graph, thisKey, topologicalKeyList):\n\n\t# Discover the Node at thisKey.\n\tglobal time\n\ttime += 1\n\tthisNode = graph.vertexMap[thisKey]\n\tthisNode.searchStatus = \"exploring\"\n\n\t# Explore each undiscovered adjacent Node and set their parent attributes.\n\n\t# The keys are ordered here to enforce an easily predictable traversal.\n\t# This is not necessary and reduces efficiency, but makes testing very straightforward. \n\t# For the purposes of this program this loss in efficiency is acceptable.\n\tsortedAdjacentKeys = list(graph.adjacentKeys(thisKey)); sortedAdjacentKeys.sort()\n\tfor adjacentKey in sortedAdjacentKeys:\n\t\tadjacentNode = graph.vertexMap[adjacentKey]\n\t\tif adjacentNode.searchStatus == \"undiscovered\":\n\t\t\tadjacentNode.parent = thisNode\n\t\t\ttopological_sort_visit(graph, adjacentKey, topologicalKeyList)\n\n\t# All adjacent Nodes have been explored.\n\ttime += 1\n\tthisNode.finishTime = time\n\tthisNode.searchStatus = \"finished\"\n\ttopologicalKeyList.insert(0, thisKey)", "def dft_recursive(self, starting_vertex, cache = None):\n \"\"\"\n If this is the first repetition create a cache set. If the \n current vertex is not in the cache add it and print the \n vertex. For every edge the vertex has run another repetition.\n \"\"\"\n if not cache:\n cache = set()\n if starting_vertex not in cache:\n cache.add(starting_vertex)\n print(starting_vertex)\n for edge in self.get_neighbors(starting_vertex):\n if edge not in cache:\n self.dft_recursive(edge, cache)", "def DFS(self, nDepth, treenode, state):\n \n visited = []\n visited.insert(0, (state, treenode))\n \n for index in range(0, nDepth-1): \n actions = self.priorProb(state)\n treenode.expansion(actions)\n treenode.updateU_value(actions)\n treenode, action = treenode.selection() \n state = state.do_move(action).copy()\n visited.insert(0, (state, treenode)) \n \n for index in range(0, len(visited)-1): \n if(visited[index][1].isLeaf() == True):\n value = self.leafEvaluation(visited[index][0])\n else: \n value = visited[index][1].backUp(value)\n visited[-1][1].updateQ_value(value)\n visited[-1][1].updateVisits()\n return visited[-1][1]", "def in_order_traversal(self):\n root = self.root\n self.traverse = self.in_order_traversal_node(root)\n return self.traverse", "def digraph_walker_backwards(graph, element, call_back):\r\n call_back(graph, element)\r\n for predecessor in graph.predecessors(element):\r\n call_back(graph, predecessor)\r\n for predecessor in graph.predecessors(element):\r\n digraph_walker_backwards(graph, predecessor, call_back)", "def bfs(self, start_node, visit_func, distance_func = None):\n from collections import deque\n\n distances = dict()\n distances[start_node] = 0\n visited = set()\n qu = deque()\n qu.appendleft(start_node)\n while len(qu) != 0:\n node = qu.pop()\n if node in visited:\n continue\n visit_func(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n qu.appendleft(neighbor_node)\n if neighbor_node not in distances.keys():\n distances[neighbor_node] = distances[node] + 1\n if distance_func:\n distance_func(distances)", "def _dfs_postorder(node, visited):\n\t# print(\"_dfs_postorder\")\n\tif node.lo is not None:\n\t\tyield from _dfs_postorder(node.lo, visited)\n\tif node.hi is not None:\n\t\tyield from _dfs_postorder(node.hi, visited)\n\tif node not in visited:\n\t\tvisited.add(node)\n\t\tyield node", "def nested_object_traversal(obj: any, leaf_function: Callable, leaf_type: type):\n if isinstance(obj, (list, tuple)):\n result = [Role.nested_object_traversal(elem, leaf_function, leaf_type) for elem in obj]\n return type(obj)(result)\n elif isinstance(obj, dict):\n return {\n k: Role.nested_object_traversal(v, leaf_function, leaf_type)\n for k, v in sorted(obj.items())\n }\n elif isinstance(obj, leaf_type):\n return leaf_function(obj)\n else:\n return obj", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state #state of the game\n self.parent = parent #parent of the node\n self.action = action #action that led to that node\n self.pathCost = pathCost #total cost of tha path until that node\n\n def solution(self): #return the path to the goal node\n path = [] #path is a list of actions\n tempNode = self #temp node is the goal node\n while tempNode.state != problem.getStartState(): #until we get to the initial node\n path.insert(0, tempNode.action) #insert at the start of the list\n tempNode = tempNode.parent #go to the parent of the node\n return path #return list of actions\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost #total cost is the total cost of the parent + the cost of the last action\n child = Node(successor, parent, action, pathCost) #create new child node\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0) #create initial node with start state and no parent\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Stack() #dfs uses a stack\n frontier.push(initialNode) #insert initial node to the stack\n explored = set() #explored nodes are added to a set\n\n while not frontier.isEmpty(): #while stack is not empty\n nextNode = frontier.pop() #extract the last node entered\n explored.add(nextNode.state) #add the state of the node to the explored set\n for successor, action, stepCost in problem.getSuccessors(nextNode.state): #for every successor create a new child\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list: #if child is not already explored or is not in the stack\n if problem.isGoalState(child.state): # if node is goal node we return the path of actions\n return child.solution()\n frontier.push(child) #insert it into the stack\n\n return [] #if stack is empty\n util.raiseNotDefined()", "def _preorder_traverse_to_list_helper(self, node, depth):\n\t\t#visit node\n\t\tl = []\n\t\tif (node):\n\t\t\tl.append(node.value())\n\t\telse:\n\t\t\tl.append(None)\n\n\t\t#anon function for this thing\n\t\tfakechild = lambda:self._preorder_traverse_to_list_helper(None, depth + 1)\n\n\t\t#call on children\n\t\tif (node):\n\t\t\tif (node.lchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.lchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (lchild)\n\t\t\t\t\tl += fakechild()\n\t\t\tif (node.rchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.rchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (rchild)\n\t\t\t\t\tl += fakechild()\n\t\telse:\n\t\t\tif (depth < self._depth):\n\t\t\t\t#recurse with None for empty children (lchild) and (rchild)\n\t\t\t\t#l += fakechild() #need to call twice?\n\t\t\t\tl += fakechild()\n\t\treturn l", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n #Stack to hold the node that have been visited along with the path taken from the start node to reach that node.\n stack = Stack()\n #Set to hold the node explored.\n explorednode = set()\n #Get the start node.\n startnode = problem.getStartState()\n #Push the starting node on the Stack along with an empty set to know the direction in order to reach the node.\n stack.push((startnode,[]))\n #Loop till the stack is empty\n while stack.isEmpty() is not True:\n #Pop the currentnode and the direction from the stack\n currentnode, direction = stack.pop()\n #We will now add the node to set of explored node.\n explorednode.add(currentnode)\n #If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n #print currentnode, direction\n #The direction holds the way to reach till the goal from the start node.\n #print direction\n return direction\n #Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n #If the successor(child) is not explored\n if successor not in explorednode:\n #Add the successor to the stack along with the path to reach it.\n stack.push((successor, direction + [action]))" ]
[ "0.69131184", "0.6532276", "0.63960713", "0.6223167", "0.61571103", "0.59844995", "0.59705156", "0.59194154", "0.59114933", "0.5801044", "0.5797877", "0.5770219", "0.5733677", "0.5723791", "0.5710354", "0.5705172", "0.5666923", "0.5657166", "0.5648695", "0.5614636", "0.5611956", "0.5606374", "0.56013817", "0.5598395", "0.55939597", "0.55873716", "0.5580017", "0.5577118", "0.5574694", "0.5493511", "0.547516", "0.5432841", "0.54279965", "0.54252326", "0.54227895", "0.54084027", "0.53715825", "0.5364562", "0.53545535", "0.5350613", "0.5348631", "0.5341483", "0.5323369", "0.5315198", "0.52971524", "0.5294052", "0.52881026", "0.5280087", "0.5276805", "0.5263173", "0.5249407", "0.5241385", "0.52411866", "0.5220054", "0.52096695", "0.52082974", "0.52026343", "0.51967835", "0.5184704", "0.51834065", "0.5181917", "0.5155338", "0.5148484", "0.5146426", "0.51439726", "0.514312", "0.5142998", "0.5131501", "0.5127356", "0.51267874", "0.51233083", "0.5104955", "0.5103475", "0.5099425", "0.50981355", "0.50967556", "0.5096226", "0.50961757", "0.5095527", "0.5092561", "0.5092561", "0.50889564", "0.5088435", "0.507859", "0.5056429", "0.5053731", "0.5045997", "0.5039978", "0.5034514", "0.5033186", "0.50217164", "0.50189376", "0.5009944", "0.50058514", "0.5000202", "0.49951535", "0.49909043", "0.4990781", "0.49845096", "0.49832982" ]
0.69286025
0
Perform BFS, starting at the specified node.
def breadth_first_traversal(self, start_node, visitor_function=None, max_depth=None): self._reset_traversal_state() if isinstance(start_node, str): start_node = self.nodes[start_node] if not isinstance(start_node, ProcessNode): raise TypeError('Expect start_node to either be a string or a ProcessNode. Got [{}] instead'.format( str(type(start_node)))) start_node.discovery_time = 1 queue = collections.deque() queue.appendleft(start_node) while len(queue) > 0: node = queue.pop() assert NodeColor.WHITE == node.color if node.predecessor is not None: node.discovery_time = node.predecessor.discovery_time + 1 self._visit_enter(node, visitor_function) node.color = NodeColor.GRAY if max_depth is None or node.discovery_time + 1 < max_depth: for descendant in self.edges[node]: if NodeColor.WHITE == descendant: descendant.predecessor = node queue.appendleft(descendant) node.finishing_time = self.time node.color = NodeColor.BLACK self._visit_exit(node, visitor_function)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs(maze, current_node):\n q = collections.deque()\n\n q.append(current_node)\n\n while len(q) > 0:\n current_node = q.popleft()\n maze[current_node.row][current_node.cell] = 1\n yield maze\n\n for neighbour in get_neighbours(maze, current_node):\n if maze[neighbour.row][neighbour.cell] == 2:\n backtrack(maze, neighbour)\n yield maze\n return\n else:\n q.append(neighbour)\n maze[neighbour.row][neighbour.cell] = -2\n\n yield maze\n maze[current_node.row][current_node.cell] = -3\n time.sleep(args.speed)", "def bfs(graph, start_node):\n start_node.distance = 0\n start.set_predecessor(None)\n queue = list()\n queue.append(start_node)\n while (len(queue) > 0):\n current_vertex = queue.pop()\n current_vertex.setState = \"visiting\"\n for vertex in current_vertex.links():\n if (vertex.getState == \"unvisited\"):\n vertex.setState == \"tobevisited\"\n vertex.set_predecessor(current_vertex)\n vertex.distance = current_vertex.distance + 1\n queue.append(vertex)\n current_vertex.setState = \"visited\"", "def bfs(self, startNode):\n queue = Queue()\n\n # Mark all the nodes as not visited\n visited = {}\n for node in self.getNodes():\n visited[node] = False\n\n queue.enqueue(startNode)\n\n while not queue.isEmpty():\n s = queue.dequeue()\n visited[s] = True\n print s,\n\n # enqueue all the adjacent vertices to s\n # if they've not already been visited\n\n for adjacentNode in self.getAdjacent(s):\n if visited[adjacentNode] is False:\n queue.enqueue(adjacentNode)\n visited[adjacentNode] = True", "def _BFS(self, start_vertex, visited, callback):\n queue = []\n queue.insert(0, start_vertex)\n visited.add(start_vertex)\n while queue:\n curr_vertex = queue.pop()\n callback(curr_vertex)\n for vertex in self.neighbors(curr_vertex):\n if vertex not in visited:\n queue.insert(0, vertex)\n visited.add(vertex)", "def bfs(graph, initial_node, dest_node):\n return queue_search(graph, initial_node, dest_node, queue.Queue())", "def BFS(self, start_vertex):\n yield from self._search(start_vertex, kind='BFS')", "def bfs(self, start_node: int, flag: bool) :\n for n in self.dw_graph.get_all_v().values():\n n.visited = False\n queue = [self.dw_graph.nodes[start_node]]\n self.dw_graph.nodes[start_node].visited = True\n node_list = [start_node]\n while queue:\n current = queue.pop()\n if not flag:\n for e in self.dw_graph.all_out_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n else:\n for e in self.dw_graph.all_in_edges_of_node(current.node_id).keys():\n if not self.dw_graph.nodes[e].visited:\n self.dw_graph.nodes[e].visited = True\n queue.append(self.dw_graph.nodes[e])\n node_list.append(e)\n\n return node_list", "def bfs(self, node: \"BSTNode\") -> Iterable[\"BSTNode\"]:\n queue = [node]\n\n while queue:\n current = queue.pop()\n yield current\n if current.left:\n queue.insert(0, current.left)\n if current.right:\n queue.insert(0, current.right)", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def bfs(self, start_node, visit_func, distance_func = None):\n from collections import deque\n\n distances = dict()\n distances[start_node] = 0\n visited = set()\n qu = deque()\n qu.appendleft(start_node)\n while len(qu) != 0:\n node = qu.pop()\n if node in visited:\n continue\n visit_func(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n qu.appendleft(neighbor_node)\n if neighbor_node not in distances.keys():\n distances[neighbor_node] = distances[node] + 1\n if distance_func:\n distance_func(distances)", "def bfs(graph, start):\n queue = deque([start])\n graph.distance[start] = 0\n\n while queue: # not empty\n u = queue.popleft()\n for v in range(graph.size):\n if is_edge(graph, u, v) and graph.distance[v] is None:\n graph.distance[v] = graph.distance[u] + 1\n graph.parent[v] = u\n queue.append(v)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITHM FOR bFS \n Create a queue Q\n enqueue root node to Q\n while Q is not empty:\n dequeu an item v from Q\n mark the item v as visited \n for each node w that is directed from v:\n enqueue w to Q\n \n \n \"\"\"\n\n fringes = util.Queue()\n explored =[]\n fringes.push((problem.getStartState(),[]))\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n goal = currentNode\n pathToGoal = currDir\n #print \"final path is : \", pathToGoal\n\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.append(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n fringes.push((childNode[0],currDir+[childNode[1]]))\n\n\n return pathToGoal", "def bfs(self, starting_vertex, destination_vertex):\n pass # TODO", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Queue()\n return GraphSearch(problem, 'bfs').search(fringe)", "def BFS(graph, start, end, toPrint=False):\n initPath = [start]\n pathQueue = [initPath]\n if toPrint:\n print('current BFS path:', printPath(pathQueue))\n while len(pathQueue) != 0:\n # get and remove oldest element in pathQueue\n tmpPath = pathQueue.pop(0)\n print('Current BFS path:', printPath(tmpPath))\n lastNode = tmpPath[-1]\n if lastNode == end:\n return tmpPath # Explore all paths with n hops \n # before exploring any path with >n hops\n for nextNode in graph.childrenOf(lastNode):\n if next not in tmpPath:\n newPath = tmpPath + [nextNode]\n pathQueue.append(newPath)\n return None", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #Queue to hold the node along with the path taken from the start node to reach that node\n queue = Queue()\n #Set to hold the node explored.\n explorednode = set()\n # Get the start node.\n startnode = problem.getStartState()\n print startnode\n # Push the starting node on the Queue along with an empty set to know the direction in order to reach the node.\n queue.push((startnode,[]))\n\n # Loop till the queue is empty\n while queue.isEmpty() is not True:\n # Pop the currentnode and the direction from the queue\n currentnode, direction = queue.pop()\n # Check if the currentnode is not in explorednode.\n if currentnode not in explorednode:\n # We will now add the node to set of explored node.\n explorednode.add(currentnode)\n # If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n # The direction holds the way to reach till the goal from the start node.\n print currentnode\n return direction\n # Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n # If the successor(child) is not explored\n if successor not in explorednode:\n # Add the successor to the queue along with the path to reach it.\n queue.push((successor, direction + [action]))\n util.raiseNotDefined()", "def bfs(self, starting_vertex, destination_vertex):\n \"\"\" FIFO ir LILO\n Create a queue\n Enqueue PATH to starting Vertex\n Create a set top store visited vertices\n While the queue is NOT empty: e.g. > 0\n Dequeue the first PATH Vertex\n Get Vertex from END of PATH\n Check IF NOT visited:\n Mark as visited\n check if vertex is destination_vertex\n If TRUE, return path\n enqueue PATH to ALL of neighbors \n make COPY of current path\n add neighbor to path copy\n enqueue copy \n \"\"\"\n\n q = Queue() # Create a queue\n q.enqueue([starting_vertex]) # Enqueue starting at vertex into Queue (list)\n visited = set() # Create a set to store visited \n \n while q.size() > 0: # While the queue is NOT empty: \n path = q.dequeue() # Dequeue the first PATH Vertices\n v = path[-1] # Get Vertex from END of PATH\n\n if v not in visited: # Check IF NOT visited:\n visited.add(v) # Mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path, DONE\n\n for n in self.get_neighbors(v): # enqueue PATH to ALL of neighbors\n path_c = path [:] # make COPY of current path\n path_c.append(n) # add neighbor to path copy\n q.enqueue(path_c) # enqueue copy", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()", "def traverse_breadth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extend(node.children)", "def bfs_traversal_queue(self):\n queue = ArrayQueue()\n if not self.root:\n print(\"Tree is empty\")\n return\n queue.enqueue(self.root)\n\n while (queue.length() > 0):\n node = queue.dequeue()\n print(node.data, end=\" \")\n if node.left:\n queue.enqueue(node.left)\n if node.right:\n queue.enqueue(node.right)", "def bfsPath(graph, start, end, toPrint=False):\n return BFS(graph, start, end, toPrint)", "def bfs(self, starting_vertex, destination_vertex):\n # create an empty queue and enqueue A-PATH-TO the starting vertex ID\n # create a Set to store the visited vertices\n # while the queue is not empty ..\n # dequeue the first PATH\n # grab the last vertex from the PATH\n # if that vertex has not been visited ..\n # check if its the target\n #if yes, return path\n #mark it as visited\n # add A PATH TO its neighbots to the back of the queue\n # copt the path\n # append the neighbor to the back\n \n \n # create an empty Queue \n queue = Queue()\n #push the starting vertex ID as list\n queue.enqueue([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n path = queue.dequeue()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n queue.enqueue(new_path)", "def BFS(self, start_vertex, verbose=True):\n if not self.contains(start_vertex):\n return None\n traversal = []\n visited = set()\n for vertex in self.vertices():\n if vertex not in visited:\n self._BFS(vertex, visited, traversal.append)\n if verbose:\n print('BFS(Graph) =', traversal)\n return traversal", "def _bfs_forward(self, start_node):\n visited = {node: (False) for node in self.layer_names}\n queue = [start_node]\n visited[start_node] = True\n while queue:\n node = queue.pop(0)\n if node != start_node:\n input_nodes = self.g.predecessors(node)\n if logging.getLogger().level == logging.DEBUG:\n l = copy(input_nodes)\n None\n cur_layer = getattr(self, node)\n output_pre_layers = []\n output_complete = True\n for n in input_nodes:\n if n not in self.outputs.keys():\n if logging.getLogger().level == logging.DEBUG:\n None\n output_complete = False\n break\n else:\n output_pre_layers.append(self.outputs[n])\n if not output_complete:\n if logging.getLogger().level == logging.DEBUG:\n None\n queue.append(node)\n else:\n cur_output = cur_layer(*output_pre_layers)\n self.outputs[node] = cur_output\n for i in self.g.successors(node):\n if visited[i] == False:\n queue.append(i)\n visited[i] = True\n losses, loss_weights = self._get_losses()\n return [self.outputs[t] for t in self.output_tasks], losses, loss_weights", "def bfs(graph, start, goal):\n\n final = []\n agenda = [[start]]\n\n # Process node queue\n while agenda:\n path = agenda.pop(0)\n\n # Exit if a path is found which reaches the goal\n if path[-1] == goal:\n final = path\n break\n\n # Push the new paths onto the queue\n connected = graph.get_connected_nodes(path[-1])\n for node in connected:\n # Ignore previously visited nodes\n if node not in path:\n agenda.append(path + [node])\n\n # Return the final path or initial empty list\n return final", "def bfs(board, start_position, start_actions, predicate):\n queue = Queue()\n visited = set()\n start_node = PositionNode(None, start_position, None)\n visited.add(start_position)\n # start actions are actions that have not been pruned\n for action in start_actions:\n next_pos = start_node.next(action)\n visited.add(next_pos)\n node = PositionNode(start_node, next_pos, action)\n queue.put(node)\n\n while not queue.empty():\n node = queue.get()\n if predicate.test(board, node.position):\n return node\n for action in [Action.Up.value, Action.Down.value, Action.Left.value, Action.Right.value]:\n next_pos = node.next(action)\n if valid_agent_position(board, next_pos) and next_pos not in visited:\n queue.put(PositionNode(node, next_pos, action))\n visited.add(next_pos)\n return None # no goal node found", "def breadth_first_search(self, start_point=None):\n\n self.initialization_list_empty_nodes(self.labyrinth_statistics[\"number_of_nodes\"])\n \n #If start_point is None, we set it to the node where the agent is in the labyrinth\n if start_point is None:\n start_point = self.agent_node\n\n #Initial situation of the algorithm\n queue = [start_point]\n start_point.status = 1\n start_point.distance_from_start_point = 0\n \n #While the queue is not empty, we analyze the nodes in it to empty it step by step\n while(len(queue) > 0):\n node_to_analyze = queue[0]\n for node in node_to_analyze.connected_to:\n if node.status == 0:\n node.pere = node_to_analyze\n node.distance_from_start_point = queue[0].distance_from_start_point + 1\n node.status = 1\n queue.append(node)\n queue.pop(0)\n node_to_analyze.status = 2", "def bft(self, starting_vertex):\n \"\"\" FIFO is LILO\n Create a queue\n Enqueue starting Vertex\n Create a set top store visited\n \n While the queue is NOT empty: e.g. > 0\n Dequeue the first Vertex\n Check IF NOT visited:\n Mark as visited\n enqueue ALL neighbors found if not already in queue\n \"\"\"\n # FIFO \n q = Queue() # create a queue ( e.g. empty [] )\n q.enqueue(starting_vertex) # Enqeue starting at vertex\n visited = set() # Create a set to store visited\n\n while q.size() > 0: # While the queue is NOT empty:\n # while q: # ERROR: Will add None into v, breaks _get_neighbors\n v = q.dequeue() # dequeue the first vertex\n\n if v not in visited: # Check IF NOT visited: \n print(v)\n visited.add(v) # if NOT visited, add to visited set\n\n for n in self.get_neighbors(v): # loop through all neighbors of v \n # if n not in q.queue: # !!! OPTIMIZATION !!!\n # q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)\n\n q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)", "def bfs(self, starting_vertex, destination_vertex): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([starting_vertex]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ", "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def bft(self, starting_vertex):\n # make a queue\n q = Queue()", "def bft(self, starting_vertex):\n # create an empty queue and enqueue the starting vertex ID\n queue = Queue()\n queue.enqueue(starting_vertex)\n # create an emtpy Set to stoe the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n vert = queue.dequeue()\n # if that vertex has not been visited..\n if vert not in visited:\n # mark it as visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[vert]: # self.get_neighbors(vert)\n queue.enqueue(neighbor)", "def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)", "def bfs(node_list):\n\n global depth_list, length_row_fragment_indices, trsl_instance\n children = []\n sum_length_row_fragment_indices = 0\n probabilistic_average_entropy = sum(\n n.probabilistic_entropy for n in node_list\n )\n for node in node_list:\n sum_length_row_fragment_indices += node.len_data_fragment\n if node.rchild is not None:\n xi[node.predictor_variable_index] += 1\n try:\n sets_count[sets.index(node.set)] += 1\n except ValueError:\n sets.append(node.set)\n sets_count[len(sets) - 1] += 1\n children.append(node.rchild)\n else:\n leaf_nodes.append(node)\n if node.lchild is not None:\n children.append(node.lchild)\n length_fragment_row_indices_list.append(\n sum_length_row_fragment_indices / len(node_list)\n )\n depth_list.append(node_list[0].depth)\n\n tree_data.append(\n {\n 'avg_entropy': probabilistic_average_entropy,\n 'no_of_nodes': len(node_list)\n }\n )\n if len(children) is not 0:\n bfs(children)", "def bfs(g: nx.Graph, start_node: Any) -> list:\r\n\tx = [start_node]\r\n\tqueue = [start_node]\r\n\ttracks = {node: [] for node in g.nodes}\r\n\twhile queue:\r\n\t\telement = queue.pop(0)\r\n\t\ty = list(g.neighbors(element))\r\n\t\tfor node in y:\r\n\t\t\tif node not in x:\r\n\t\t\t\tx.append(node)\r\n\t\t\t\tqueue.append(node)\r\n\t\t\t\ttracks[node].extend((*tracks[element], element))\r\n\treturn x", "def bfs(self, start_word, end_word): # great if you know to result is somewhere close to the root/start\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue\n q.enqueue([start_word]) # set the starting_vertex with enqueue \n\n while q.size() > 0:\n path = q.dequeue() # dequeue and store first path\n v = path[-1] # store the vertex from the end of path \n\n if v == end_word: # if v is equal to the destination_vertex\n return path # return the path \n\n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors \n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n q.enqueue(path_copy) # enqueue the path copy to the queue ", "def breadth_first(self, start_node):\n \n # try:\n if start_node not in self._adjacency_list:\n raise KeyError('Nodes are not in the graph')\n\n q = Queue()\n q.enqueue(start_node)\n visited_nodes = {}\n visited_nodes[start_node] = True\n output = []\n\n while len(q):\n cur = q.dequeue()\n output.append(cur)\n neighbors = self._adjacency_list[cur]\n for n in neighbors:\n if n[0] not in visited_nodes:\n q.enqueue(n[0]) \n visited_nodes[n[0]] = True\n return output\n # except Exception as error:\n # return(f'{error}')", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # BFS is identical to DFS, save for the data structure used to store the frontier\n\n # Frontier stored in a Queue\n frontier = util.Queue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there])\n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n\n frontier.push((coordinates, pathTaken + [direction]))\n\n util.raiseNotDefined()", "def bfs(self, starting_vertex, destination_vertex):\n # creating an empty list of visited vertices\n visited = []\n # creating a queue with the starting vertex in it\n queue = [[starting_vertex]]\n # while we have items in our queueueue\n while queue:\n # pop the first item in the queueueue\n path = queue.pop(0)\n # getting the last value in our path\n node = path[-1]\n # checking to see if it has been seen already or not\n if node not in visited:\n # checking the neighbors of our farthest node\n for n in self.vertices[node]:\n # creating a new path list and appending the nieghbors\n # to it and the queueueueue\n new_path = list(path)\n new_path.append(n)\n queue.append(new_path)\n # if the destination is in the new_path\n # we are done and return the new path\n if n == destination_vertex:\n return new_path\n # adding the node to the visited list\n visited.append(node)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Queue\n q = Queue()\n mapper = {} #child_point : (parent_point, direction_to_child)\n q.push(problem.getStartState())\n mapper[problem.getStartState()] = None #root\n\n while (not q.isEmpty()):\n point = q.pop()\n\n if (problem.isGoalState(point)):\n c = point\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n for child in problem.getSuccessors(point):\n if (child[0] not in mapper):\n q.push(child[0])\n mapper[child[0]] = (point, child[1])\n\n # util.raiseNotDefined()", "def bft(self, starting_vertex):\n # create an empty queueueueueueueueueue class\n to_visit = Queue()\n # create an empty set\n visited = set()\n # populating the queueueueueueue with our starting vertex\n to_visit.enqueue(starting_vertex)\n\n # while loop to run while the queueueueueue is not empty\n while to_visit.size() > 0:\n v = to_visit.dequeue()\n # checking to see if the dequeueueued vertex is in our set or not\n if v not in visited:\n # if it is then it gets printed out\n print(v)\n # it then gets added to the visited set\n visited.add(v)\n # now we are checking the neighbors of the vertex and adding them\n # to the queueueueueueue\n for n in self.vertices[v]:\n to_visit.enqueue(n)", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n borders = Queue()\r\n borders.push(problem.getStartState()) \r\n actionsTree = {problem.getStartState():(None,None)}#action,last state\r\n GoalState = None\r\n while not borders.isEmpty():\r\n currentState = borders.pop()\r\n if problem.isGoalState(currentState):\r\n GoalState = currentState\r\n break\r\n for successor in problem.getSuccessors(currentState):\r\n if successor[0] not in actionsTree:\r\n actionsTree[successor[0]]=(successor[1],currentState)\r\n borders.push(successor[0])\r\n if GoalState is None:\r\n #print 'bfs cannot find goal'\r\n return []\r\n return generatePathFromActionsTree(actionsTree,GoalState)\r\n util.raiseNotDefined()", "def bfs(graph, root, max_depth):\n ###TODO\n pass", "def breadthFirstSearch(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n #explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n queue = util.Queue() # queue for searshing the graph\n visited = [] # keep track of visited nodes\n start =problem.getStartState() # The start node\n queue.push((start, [])) # the sart state and empty path list is pushed to the queue\n \n while queue:\n (vrtx, path) = queue.pop()\n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx) :\n queue.push((successor[0], path+[successor]))\n\n util.raiseNotDefined()", "def shortestPathBFS(start):\n if start is None:\n return None\n\n # keep track of nodes to be checked\n queue = [start]\n start.curr_dist = 0\n\n while queue:\n curr = queue.pop()\n for neighbor in curr.neighbors:\n next_distance = curr.curr_dist + curr.getDistance(neighbor)\n if neighbor.curr_dist == math.inf or neighbor.curr_dist > next_distance:\n neighbor.curr_dist = next_distance\n neighbor.previous = curr\n queue.insert(0, neighbor)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\" \n startState = problem.getStartState()\n visitedNodes = []\n fringe = util.Queue()\n cost = 0 \n if (problem.isGoalState(startState) == True ):\n return [] # No Specific Actions\n else :\n fringe.push((startState , [] , cost ))\n while ( fringe.isEmpty() == False ):\n currentState , actions , cost = fringe.pop()\n \"\"\" get the latest node in the Queue \"\"\"\n \n if ( problem.isGoalState(currentState) == True ): \n \"\"\" check if the node is our goal or not \"\"\"\n #print(\"Final Path : \" + str(actions))\n return actions\n else:\n if ( (currentState in visitedNodes) == False ): \n \"\"\" check if this node is alreay visited or needs to be extended ? \"\"\"\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n if(not node in visitedNodes):\n state , action , cost = node \n if ( not state in visitedNodes):\n fringe.push((state , actions + [action] , cost ))\n \n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #the logic is same as for DFS just that i made use of a Queue data structure\n #Here the queue acts as a FIFO queue\n neighbourNodes = util.Queue()\n moves = []\n neighbourNodes.push((problem.getStartState(),moves))\n seenNodes = set()\n\n while not neighbourNodes.isEmpty():\n poppedNodeState, poppedNodeAction= neighbourNodes.pop()\n if(poppedNodeState in seenNodes):\n continue\n if problem.isGoalState(poppedNodeState):\n return poppedNodeAction\n seenNodes.add(poppedNodeState)\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n if(state in seenNodes):\n continue\n neighbourNodes.push((state, poppedNodeAction+[action]))\n return moves\n #util.raiseNotDefined()", "def bft(self, starting_vertex):\n \n \"\"\"\n Plan:\n - Start at given index. Add that index to the Q.\n - While len(Q) is greater than 0:\n - Check if q[0] has children.\n - If so then make sure children have not been visited, then add those children to the Q.\n - If they have been visited, skip over the child and DO NOT add to Q # !! will result in infinite loop !!\n \"\"\"\n\n queue = Q()\n visited = []\n\n queue.add(starting_vertex)\n\n while len(queue):\n current = queue.first()\n children = self.vertices[current]\n \n if len(children) > 0:\n for child in children:\n if child not in visited:\n queue.add(child)\n else: continue\n\n print(current)\n visited.append(current)\n queue.pop()", "def bfs(graph, startnode):\n dist = {}\n\n # Initialize distances\n for node in graph:\n dist[node] = float('inf')\n dist[startnode] = 0\n\n # Initialize search queue\n queue = deque([startnode])\n\n # Loop until all connected nodes have been explored\n while queue:\n node = queue.popleft()\n for nbr in graph[node]:\n if dist[nbr] == float('inf'):\n dist[nbr] = dist[node] + 1\n queue.append(nbr)\n return dist", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def bfs(self, starting_vertex, destination_vertex):\n # First, we create an empty queue and enqueue the starting vertex as a list\n qq = Queue()\n qq.enqueue([starting_vertex])\n\n # Then we create a set to store the vertices we visit\n visited = set()\n\n # We write a while loop that will run as long as the queue is not empty\n while qq.size() > 0:\n # We dequeue the first vertex and set (v) to it\n v = qq.dequeue()\n # print(\"This is v: \", v)\n # print(\"This is v[-1]: \", v[-1])\n\n # Next we check to see if the vertex we just dequeued has been visited already\n if v[-1] not in visited:\n # If it has not been visited, we check to see if it is the destination we have long been searching for\n if v[-1] == destination_vertex:\n # If it is, we return the list of nodes we followed to arrive at said destination\n return v\n\n # If it's not the node we are looking for, we mark it as visited\n visited.add(v[-1])\n\n # Then add all of its neighbors to the back of the queue\n\n # Lastly, we write a for loop that loops through the neighbors of the current vertex\n for next_vert in self.get_neighbors(v[-1]):\n # For each neighbor, we create a copy of the current path and append the neighbor, allowing us to create multiple paths forward depending on the number of neighbors a vertex has\n new_v = list(v)\n new_v.append(next_vert)\n\n # Then we enqueue the path to the next neighbor\n qq.enqueue(new_v)", "def _breadthfirst(self,root, action=lambda nodes: print(nodes)):\n nodes = []\n breadth = Queue()\n visited = []\n\n breadth.enqueue(root)\n visited.append(root)\n\n while breadth.front:\n front = breadth.dequeue()\n nodes.append(front.value)\n\n for child in self._adjacency_list.get(front.value):\n if not child.start_vertex in visited:\n visited.append(child.start_vertex)\n breadth.enqueue(child.start_vertex) \n\n return nodes", "def bft(self, starting_vertex):\n # First we create an empty queue and enqueue the starting vertex\n qq = Queue()\n qq.enqueue(starting_vertex)\n\n # Then we create a set to store the vertices we visit\n visited = set()\n\n # Here we write a while loop that will run as long as the queue is not empty\n while qq.size() > 0:\n # Dequeue the first vertex\n # We dequeue the first vertex and set (v) to it\n v = qq.dequeue()\n\n # Next we check to see if that vertex has already been visited\n if v not in visited:\n # If if has not been visited, we print it and mark it as visited\n print(v)\n visited.add(v)\n\n # Then we add all of its neighbors to the back of the queue\n for next_vert in self.get_neighbors(v):\n qq.enqueue(next_vert)", "def breadth_first(graph,start, end):\n queue = []\n queue.append([start])\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node == end:\n return path\n for adjacent in graph.get(node, []):\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)", "def bfs(gdict):\n q = deque()\n graph, vertices = create_graph(gdict, BFSVertex)\n\n v = input('Enter the start vertex or none for start with first vertex: ')\n print()\n if not v:\n v = vertices[0]\n\n try:\n v = graph[v]\n except KeyError:\n print('This vertex does not exist.')\n\n print(v)\n v.visit = 1\n q.append(v)\n while q:\n u = q.popleft()\n\n for a in u.adj:\n s = graph[a]\n if s.visit == 0:\n s.visit = 1\n s.distance = u.distance + 1\n s.father = u.name\n q.append(s)\n\n return graph", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # fringe priority queue\n fringe = util.PriorityQueue()\n fringe.push([problem.getStartState()],1) # fringe will have (priority, order, [s0,s1,..])\n\n # closed set\n closed = []\n\n i = 0\n while not fringe.isEmpty():\n\n # get highest priority path for expansion e.g. [s0,s2,s4]\n path_exp = fringe.pop()\n\n # take last node in path e.g. s4\n node_exp = path_exp[-1]\n\n # check goal state\n if problem.isGoalState(node_exp): # check if goal\n actions = actions_for_path(problem,path_exp)\n #import pdb; pdb.set_trace()\n return actions\n\n # add expanded node into closed set e.g. [s0,s1,s2]\n if node_exp not in closed:\n closed.append(node_exp)\n else:\n # if it's in the closed set, don't expand\n continue\n\n # get sucessors to expand fringe\n successors = problem.getSuccessors(node_exp)\n for successor in successors:\n # unpack states, actions\n ss,aa,_ = successor\n if ss not in closed:\n path = path_exp+[ss]\n # expand fringe by adding candidate paths, prioritize by len of path\n fringe.push(path,len(path))\n\n #i+=1\n if i==1000:\n import pdb; pdb.set_trace()\n\n util.raiseNotDefined()", "def bfs(self, start, end):\n\n queue = [start]\n parent = dict()\n\n # Initialize parent dictionary\n for v in iter(self._reachable): parent[v] = None\n parent[start] = start\n\n while len(queue) > 0:\n (x, y) = queue.pop(0)\n if (x, y) == end: break\n\n for v in self.get_reachables(x, y):\n if parent[v] is not None: \n # Vertex v already visited\n continue\n parent[v] = (x, y)\n queue.append(v)\n\n # Reconstruct path\n path = [end]\n vertex = end\n\n while parent[vertex] != vertex:\n if parent[vertex] is None: return []\n path.append(parent[vertex])\n vertex = parent[vertex]\n\n path.reverse()\n return path", "def bfs(self, start):\n output_list = []\n queue = Queue()\n queue.put(start)\n visited = set(start)\n while not queue.empty():\n current_node = queue.get()\n output_list.append(current_node)\n visited.add(current_node)\n for node in self.__graph[current_node].neighbours:\n if node.name not in visited:\n queue.put(node.name)\n return output_list", "def breadthFirstSearch(problem):\n\t\"*** YOUR CODE HERE ***\"\n\n\t# Create the queue, and visited array to keep track of visited nodes.\n\tdfsStack = util.Queue()\n\tvisited = []\n\t# Get the first state in the graph, push to the queue\n\tfirst = problem.getStartState()\n\tdfsStack.push([first, [], 0])\n\n\t# While the queue is not empty, pop the first node from the queue, and check if that state\n # is the goal state. If so, return the actions for that node. Otherwise, append that state\n # to the visited array, get its successors, and push them to the queue.\n\twhile not dfsStack.isEmpty():\n\t\tNewNode = dfsStack.pop()\n\t\tif((problem.isGoalState(NewNode[0]) == True)):\n\t\t\treturn NewNode[1]\n\t\tif(NewNode[0] not in visited):\n\t\t\tvisited.append(NewNode[0])\n\t\t\tfor NextNode in problem.getSuccessors(NewNode[0]):\n\t\t\t\tif NextNode[0] not in visited:\n\t\t\t\t\tdfsStack.push((NextNode[0], NewNode[1] + [NextNode[1]], NextNode[2]))", "def bfs(g,startnode):\n Q = deque('') # initialize Q to be empty queue\n \n inf = float(\"inf\") # define infinity\n result = {}\n # assign infinite length to every node\n for node in g:\n result[node] = inf\n result[startnode] = 0 # assign start node length = 0\n Q.append(startnode) # attach the start node to the queue\n \n while len(Q) > 0:\n j = Q.popleft()\n for neighbor in g[j]:\n if result[neighbor] == inf:\n result[neighbor] = result[j] + 1\n Q.append(neighbor)\n \n return result", "def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # eldest = []\n depth_counter = {} \n starter = 0 \n # visited = []\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n # print(visited)\n starter += 1\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n depth_counter[starter] = path[-1]\n # mark as visited\n visited.add(path[-1])\n # visited.append(path[-1])\n # enqueue all neightbors\n \n if not self.get_neighbors(path[-1]):\n \n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n depth_counter[starter] = path[-1]\n # print(depth_counter)\n # eldest.append(path[-1])\n else:\n # starter += 1\n for next_vert in self.get_neighbors(path[-1]): \n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)\n\n\n return depth_counter[starter]", "def bfs(g: nx.Graph, start_node: Hashable) -> List[Hashable]:\n list_ = list(g.neighbors(start_node))\n len_graph = g.number_of_nodes()\n list2 = [start_node]\n while len(list2) < len_graph:\n for i in range(len(list_) - 1):\n if list_[0] not in list2:\n list2.append(list_[0])\n list_ += list(g.neighbors(list_[0]))\n list_.remove(list_[0])\n # nx.draw(g, with_labels=True)\n # plt.show()\n return list2", "def breadth_first_search(initial_state):\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0#\n num_unconsidered_children = 0#\n\n initial_node = Node(state=initial_state)\n node_deque = collections.deque()\n node_deque.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(node_deque) > 0 and not goal_state_found:\n e = node_deque.popleft()\n #pdb.set_trace()\n if e in list_of_processed_nodes:\n num_unprocessed_nodes += 1\n continue\n else:\n list_of_processed_nodes.append(e)\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=e, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children#\n )\n \n for child_node in list_of_children_nodes:\n #print 'Node {0} with goal status {1}'.format(child_node.index, child_node.state.snake_ate_food)\n if child_node.state.goal_state_reached():\n #print \"Goal state reached with node index {0}\".format(child_node.index)\n goal_state_found = True\n goal_node = child_node\n break\n else:\n #print \"Adding to deque node index {0}\".format(child_node.index)\n node_deque.append(child_node)\n\n if len(node_deque) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n #pdb.set_trace()#\n # Summary & results\n #print '{0} nodes processed!'.format(len(list_of_processed_nodes))\n #print '{0} nodes already visited, skipped!'.format(num_unprocessed_nodes)\n #print '{0} node children skipped!'.format(num_unconsidered_children)\n #os.system('say -v \"Victoria\" \"done\"')\n\n return goal_node, list_of_processed_nodes", "def bfs(maze):\n # TODO: Write your code here.\n start = maze.getStart() \n frontier = [] \n path = [] \n dim = maze.getDimensions()\n objs = maze.getObjectives()\n rows = dim[0]\n cols = dim[1]\n visited = {} # visited as a dictionary\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n frontier.append(start)\n visited[(i,j)] = (-2, -2)\n while frontier:\n pt = frontier.pop(0)\n if maze.isObjective(pt[0], pt[1]) == True:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pt[0], pt[1])\n for i in list_of_neighbors:\n if visited.get(i) == (-1, -1): \n frontier.append(i)\n visited[i] = pt \n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path", "def breadthFirstSearch(problem):\n # Initialization\n startState = problem.getStartState()\n #print \"Start:\", startState\n\n if problem.isGoalState(startState):\n return [] # No action needed\n\n route = util.Stack()\n closed = set([startState])\n queue = util.Queue() # BFS use queue\n\n #print problem.getSuccessors(startState)\n \n for successor in problem.getSuccessors(startState):\n # Use list(old_list) to make a copy of current route\n queue.push((successor, list(route.list)))\n \n # Tree search\n while not queue.isEmpty():\n #print \"Queue: \", queue.list\n ((currentState, action, cost), route.list) = queue.pop()\n \n if currentState in closed:\n continue\n\n #print \"Go\", action\n #print \"In\", currentState\n route.push(action)\n #print \"Route\", route.list\n\n if problem.isGoalState(currentState): # Check for goal condition\n #print \">>Finished<<\", route.list\n #util.pause()\n return route.list # Return the route\n \n # Current state is not goal state\n closed.add(currentState)\n for successor in problem.getSuccessors(currentState):\n if successor[0] in closed:\n #print \"-Closed \", successor\n continue # this state is already expanded\n \n #print \"-Open \", successor\n # Use list(old_list) to make a copy of current route\n queue.push((successor, list(route.list)))", "def bfs(initial_state, dimension=3):\n\t\n\treturn search(initial_state, Frontier(Queue), dimension)", "def bft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue \n q.enqueue(starting_vertex) # set enqueue with the starting vertex\n\n while q.size() > 0: # loop if the size is greater than 0\n v = q.dequeue() # dequeue and store \n\n if v not in visited: # if v has not in the set \n visited.add(v) # add v to the set \n print(v) \n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v]: # loop through neighbors \n q.enqueue(neighbor) # add each neighbor to the end of the que ", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (queue y set)\n openNodes = util.Queue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Metemos el nodo en la cola\n openNodes.push(node)\n\n #Iteramos para cada nodo de la pila\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #Sacamos el nodo de arriba de la pila\n node = openNodes.pop()\n if problem.isGoalState(node.name):\n break\n else: #Expandimos los nodos sucesores del nodo n si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n #Metemos al sucesor en la cola\n openNodes.push(succNode)\n #Metemos el nodo n en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n startState=problem.getStartState()\n currentLocation = startState\n\n #for GSA implementation\n exploredStates = []\n exploredStates.append(startState)\n \n #To transform the graph to stack for better access in BFS\n frontierQueue = util.Queue()\n for frontier in problem.getSuccessors(startState):\n frontierRoute = frontier + (frontier[1],)\n frontierQueue.push(frontierRoute)\n\n currentRoute = []\n\n #start BFS\n while not(frontierQueue.isEmpty()):\n currentStage = frontierQueue.pop()\n currentState = currentStage[0]\n currentRoute = currentStage[3] \n \n if problem.isGoalState(currentState): \n break\n \n if currentState not in exploredStates:\n for frontier in problem.getSuccessors(currentState):\n if frontier[0] not in exploredStates:\n nextRoute = currentRoute + \",\" + frontier[1]\n frontierRoute = frontier + (nextRoute,)\n frontierQueue.push(frontierRoute)\n \n exploredStates.append(currentState)\n return currentRoute.split(\",\")\n\n util.raiseNotDefined()", "def bfs(graph, start, end):\n if start not in graph:\n raise RuntimeError('Unknown start node: {}'.format(start))\n search_queue = deque()\n search_queue += graph[start]\n searched = [start]\n while search_queue:\n item = search_queue.popleft()\n if item not in searched:\n searched.append(item)\n if item == end:\n return searched\n search_queue += graph[item]\n return None", "def breadth_first_traversal(self, cur_node=None):\n if cur_node is None:\n cur_node = self.root\n if cur_node is None:\n return\n q = []\n q.append(cur_node)\n while len(q) > 0:\n cur_node = q.pop(0)\n yield cur_node.data\n if cur_node.left:\n q.append(cur_node.left)\n if cur_node.right:\n q.append(cur_node.right)", "def breadth_first_search(self, vertex):\n\n visited = [False] * self.V\n queue = list()\n # Appending the vertex to an empty queue\n queue.append(vertex)\n\n # Marking the Visiting Vertex as True\n visited[vertex] = True\n print(\"\\n\\nBreadth First Search: \", end=\" \")\n while queue:\n # Popping the First Element in queue\n s = queue.pop(0)\n print(s, end=\" \")\n\n # Visiting the adjacent vertices of queue\n # And Validating if the vertex is visited\n for i in self.adj_list[s]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True", "def dfs(self, start_node, cbfunc):\n visited = set()\n stack = [start_node]\n\n while len(stack) != 0:\n node = stack.pop()\n if node in visited:\n continue\n cbfunc(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n stack.append(neighbor_node)", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited=[]\n \n node=dict()\n start=problem.getStartState()\n node['parent']=None\n node['direction']=None\n node['state']=start\n \n \n qu.push(node)\n lis.append(node)\n \n print qu.list\n while qu.isEmpty()!=True:\n node=qu.pop()\n pos=node['state']\n visited.append(pos)\n print visited\n if problem.isGoalState(pos):\n print \"found\"\n return getPath(problem,node)\n #break\n suc=problem.getSuccessors(pos)\n if suc ==None:\n continue \n \n print suc\n for step in suc:\n #if step not in dic :\n if step[0] not in visited:\n childnode={}\n childnode['parent']=pos\n childnode['direction']=step[1]\n childnode['state']=step[0]\n qu.push(childnode)\n lis.append(childnode)\n \n\n #util.raiseNotDefined()", "def bfs(maze):\n # TODO: Write your code here\n q = queue.Queue()\n q.put(maze.getStart())\n traversed = []\n path = []\n tracker = {maze.getStart(): None} #Tracker needs to contain tuples\n\n while q:\n curr_loc = q.get() \n\n if curr_loc not in traversed: #Add to traversed points list\n traversed.append(curr_loc)\n\n if maze.isObjective(curr_loc[0], curr_loc[1]): #Reached end of maze\n finished = curr_loc \n break\n\n nextpath = maze.getNeighbors(curr_loc[0], curr_loc[1]) #Search neighbor points\n for point in nextpath:\n if point not in traversed and maze.isValidMove(point[0], point[1]):\n q.put(point)\n tracker[point] = curr_loc #Update curr_loc\n\n while finished:\n path.insert(0, finished) \n finished = tracker[finished]\n\n return path", "def breadthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\tfrom game import Directions\r\n\t#i = 0\r\n\tfrontera=util.Queue()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\t#i = i+1\r\n\t\t\t#print (i)\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def breadth_first_search(self):\r\n queue = [self.root]\r\n while queue:\r\n node = queue.pop()\r\n yield node\r\n queue.extend(node.children)", "def bfs(self, starting_vertex, destination_vertex):\n # create a empty queue, and enqueue a PATH to the starting vertex\n neighbors_to_visit = Queue()\n # queue.enqueue([starting_vertex])\n neighbors_to_visit.enqueue([starting_vertex])\n # create a set for visited vertices\n visited = set()\n # while the queue is not empty\n while neighbors_to_visit.size() > 0:\n # dequeue the first PATH\n current_path = neighbors_to_visit.dequeue()\n # grab the last vertex in the path\n current_vertex = current_path[-1]\n # check if its the target\n if current_vertex == destination_vertex:\n # Return the path \n return current_path\n else:\n # if it hasn't been visited\n if current_vertex not in visited:\n # mark it as visited\n visited.add(current_vertex)\n # make new versions of the current path, with each neighbor added to them\n edges = self.get_neighbors(current_vertex)\n for edge in edges:\n # duplicate the path\n path_copy = list(current_path)\n # add the neighbor\n path_copy.append(edge)\n # add the new path to the queue\n neighbors_to_visit.enqueue(path_copy)", "def breadth_first_search(problem):\n fringe = util.Queue()\n return general_search(problem, fringe)", "def breadth_first_list(graph, current=\"a\"):\n queue = []\n queue.append(current)\n while queue:\n current = queue.pop(0)\n print(current)\n for node in graph.get(current):\n queue.append(node)", "def BFS(graph, s, n):\n #Implement queue using list. list.pop() to dequeue and list.insert(0,x) to enqueue.\n visited = [False] * n\n queue = []\n \n queue.insert(0, s)\n visited[s] = True\n while(queue):\n v = queue.pop()\n print(v, end= \" \")\n \n for i in range(len(graph[v])):\n if (not visited[graph[v][i]]):\n queue.insert(0, graph[v][i] )\n visited[graph[v][i]] = True", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n BFS(adjList, s, n)", "def BFS(self,s,t,parent):\n #mark all vertices as not visited\n visited = [False]*(self.ROWS);\n # initialize a queue\n queue = []\n # add source to q and mark it visited\n queue.append(s)\n visited[s] = True\n #Breadth-first-search\n while queue:\n n = queue.pop(0)\n for index,val in enumerate(self.graph[n]):\n if visited[index] == False and val>0:\n queue.append(index)\n visited[index] = True\n parent[index] = n\n #return True if sink was visted\n if visited[t]:\n return True\n else:\n return False", "def bfs(self, vertex_s):\r\n nd_list = list(self.vertices())\r\n visited = dict((node, 0) for node in nd_list)\r\n\r\n nq = deque()\r\n pre_dict, dist = {}, {}\r\n nq.append(vertex_s)\r\n visited[vertex_s]=1\r\n dist[vertex_s] = 0\r\n\r\n loop_counts = 0\r\n while nq:\r\n s = nq.popleft()\r\n for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'\r\n loop_counts += 1\r\n \r\n #if not node in visited:\r\n if not visited[node]:\r\n nq.append(node) # let 'node' in queue\r\n pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'\r\n dist[node] = dist[s] + 1 # shortest path to 'root'\r\n visited[node]=1 # 'node' is visted\r\n #if node in visited and dist[node] == dist[s] + 1: # still within the shortest path\r\n if visited[node] and dist[node] == dist[s] + 1: # still within the shortest path\r\n if s not in pre_dict[node]: # if this path have NOT been recorded, let's do that now\r\n pre_dict[node].append(s) \r\n \r\n if visited[node] and dist[node] > dist[s] + 1: # the previous 'recorded' path is longer than our current path (via node 's'); let's update that path and distance\r\n pre_dict[node] = [s]\r\n dist[node] = dist[s] + 1\r\n #print(\" #loops: %d\" %loop_counts)\r\n #current_bfs[vertex_s] = pre_dict\r\n \r\n return pre_dict", "def DFS_RF(node, basket, stopnode):\n if len(node.subnodes) > 0:\n newnode = node.subnodes[-1]\n else:\n newnode = node.get_leftnode(moveup=True, stopnode=stopnode)\n\n return newnode", "def dfs(self, node):\n self.preOrderNumbers[node] = self.counter\n self.counter = self.counter + 1\n self.stackP.append(node)\n self.stackS.append(node)\n for neighbor_vertex in self.graph.edges[node]:\n if neighbor_vertex not in self.preOrderNumbers:\n self.dfs(neighbor_vertex)\n elif neighbor_vertex in self.notAssignedVertices:\n while self.preOrderNumbers[self.stackP[-1]] > self.preOrderNumbers[neighbor_vertex]:\n self.stackP.pop()\n\n if node == self.stackP[-1]:\n self.stackP.pop()\n component = []\n while node in self.stackS:\n vertex = self.stackS.pop()\n component.append(vertex)\n self.notAssignedVertices.remove(vertex)\n self.scComponents.append(component)", "def bfs(start, goal):\n queue = [(start, [start])]\n visited = set([start])\n while queue:\n (vertex, path) = queue.pop(0)\n if vertex == goal:\n return path\n\n for word in bank:\n count = 0\n for i, c in enumerate(vertex): # Count differences\n if c != word[i]:\n count += 1\n if count == 1: # Valid neighbor\n if word not in visited:\n visited.add(word)\n queue.append((word, path + [word]))\n\n return \"No path found :(\"", "def breadthFirstSearch(problem):\n\n frontier = util.Queue()\n # print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n # print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n # print 'Remove',repr(currNode.state)\n # print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n # print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored) and (succNode.state not in frontierSet):\n \"\"\"Aca si hay que verificar si es que ya esta en la frontera porque es formato FIFO. Entonces los nodos que estan en la lista\n necesariamente van a ser verificados antes de que se vuelva a insertar otro.\n \"\"\"\n frontier.push(succNode)\n # print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)", "def bfs(vertex, graph, distances, shortest_ways, queue=deque()):\n if vertex not in distances:\n distances[vertex] = 0\n shortest_ways[vertex] = vertex\n for neighbour in graph[vertex]:\n if neighbour not in distances:\n queue.append(neighbour)\n distances[neighbour] = distances[vertex] + 1\n shortest_ways[neighbour] = shortest_ways[vertex] + ' ' + vertex + neighbour\n while len(queue) > 0:\n vertex = queue.popleft()\n bfs(vertex, graph, distances, shortest_ways, queue)", "def bfs(maze):\n # TODO: Write your code here\n frontier = Queue()\n visited = []\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives.copy())\n frontier.put(start)\n explored = []\n \n\n while not frontier.empty(): # while frontier queue is not empty\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n\n objectivesLeft.remove(currentCell)\n \n # all objectives found, initialise backtrace and exit loop\n # if len(objectivesLeft) == 0:\n path.append(currentState)\n ret.append(currentCell)\n visited.append(currentState)\n break\n\n # current cell is not objective nor visited\n if visited.count(currentState) == 0:\n explored.append(currentCell)\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n\n # if neighbor is not visited, add it to the frontier\n if visited.count(neighbor) == 0:\n neighbor.setParent(currentState)\n frontier.put(neighbor)\n\n visited.append(currentState)\n\n #backtrace\n while path[0] != start:\n\n currentState = path[0]\n path.insert(0, currentState.parent())\n ret.insert(0, currentState.parent().cell())\n\n return ret", "def modified_bfs(start_id, node_dict, iteration_limit, depth_limit, reduction_factor):\n queue = [NodeWrapper(node_dict[start_id])]\n while queue:\n node_wrapper = queue.pop(0)\n node = node_wrapper.node\n for costumer in node.costumers:\n if costumer.name not in node_wrapper.visited_by:\n node_wrapper.visited_by[costumer.name] = 0\n visit_count = node_wrapper.visited_by[costumer.name]\n if visit_count < iteration_limit and node_wrapper.depth < depth_limit:\n if node.production_drop > node.out_edge_capacity_drop[costumer.name]:\n node.out_edge_capacity_drop[costumer.name] = node.production_drop\n costumer.in_edge_capacity_drop[node.name] = node.production_drop\n calculate_production_drop(costumer, reduction_factor)\n costumer_wrapper = NodeWrapper(costumer, node_wrapper.depth, node_wrapper.visited_by)\n if node.name not in costumer_wrapper.visited_by:\n costumer_wrapper.visited_by[node.name] = 0\n costumer_wrapper.visited_by[node.name] += 1\n costumer_wrapper.depth += 1\n queue.append(costumer_wrapper)", "def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices", "def to_start(self, node):\n if node in self.graph:\n if node in self.keep_index_backward:\n for pred in self.keep_index_backward[node]:\n self.to_start(pred)\n\n if node in self.graph:\n self.start.append(node)\n self.graph.remove_node(node)\n\n if node in self.keep_index_forward:\n for succ in self.keep_index_forward[node]:\n self.to_start(succ)\n self.logger.debug('%s %s\\t(to_start: %s)', self.start, self.end, node)", "def bfs(self, initialSt, goalSt): # Breadth­First Search\n self.__reset_all_variables()\n\n start = time.perf_counter()\n\n frontier = deque() # deque will be treated as a queue\n frontier.append(initialSt)\n explored = set()\n frontier_U_explored = set() # for fasten up the lookup time\n\n max_frontier_size = 0\n max_ram_used = psutil.virtual_memory().used\n\n while len(frontier) != 0:\n currentState = frontier.popleft()\n explored.add(currentState)\n frontier_U_explored.add(currentState)\n\n if goalSt == currentState:\n end = time.perf_counter()\n self.__success(initialSt,\n currentState,\n len(explored)-1,\n len(frontier),\n max_frontier_size,\n frontier[-1].depth,\n end-start,\n max_ram_used,\n \"bfs\")\n return True\n\n for child in currentState.children():\n if child not in frontier_U_explored:\n frontier.append(child)\n\n max_frontier_size = len(frontier) if len(\n frontier) > max_frontier_size else max_frontier_size\n max_ram_used = psutil.virtual_memory().used if psutil.virtual_memory(\n ).used > max_ram_used else max_ram_used\n return False", "def bft(self, starting_vertex):\n # create a plan to visit queue and add starting_vertex to it\n plan_to_visit = Queue()\n plan_to_visit.enqueue(starting_vertex)\n # create a set for visited_vertices\n visited_vertices = set()\n # while the plan_to visit queue is not Empty:\n while plan_to_visit.size() > 0:\n # dequeue the first vertex on the queue\n current_vertex = plan_to_visit.dequeue()\n # if its not been visited\n if current_vertex not in visited_vertices:\n # print the vertex\n print(current_vertex)\n # mark it as visited, (add it to visited_verticles)\n visited_vertices.add(current_vertex)\n # add all unvisited neighbors to the queue\n for neighbor in self.get_neighbors(current_vertex):\n if neighbor not in visited_vertices:\n plan_to_visit.enqueue(neighbor)", "def bfs(self, root_node_id):\n try: \n status = dict.fromkeys(self._nodes.iterkeys(), 0)\n status[root_node_id] = 1 \n L = []\n q = Queue()\n q.enqueue(root_node_id) \n while not q.is_empty():\n curr_node_id = q.dequeue()\n status[curr_node_id] = -1\n L.append(self._nodes[curr_node_id])\n arcs_list = self._inc[curr_node_id]\n record = arcs_list.get_first_record()\n while record is not None:\n arc = record.element\n if status[arc._head] is 0:\n status[arc._head] = 1\n q.enqueue(arc._head)\n record = record._next \n return L\n except KeyError:\n return []" ]
[ "0.712657", "0.7019322", "0.6977231", "0.6904166", "0.6873453", "0.6778046", "0.6764516", "0.67611027", "0.66524756", "0.65571046", "0.6479184", "0.64744705", "0.6444245", "0.6440059", "0.64398944", "0.64012533", "0.6394898", "0.6370323", "0.6339818", "0.63252306", "0.6316992", "0.63094336", "0.6294423", "0.6283701", "0.62831384", "0.6276797", "0.626941", "0.62513506", "0.62432253", "0.6210519", "0.6200652", "0.6186659", "0.6180449", "0.61650604", "0.6160895", "0.61582565", "0.61374503", "0.61316305", "0.61132455", "0.6108084", "0.6095351", "0.6093979", "0.60889375", "0.6079199", "0.60735977", "0.60674053", "0.60636973", "0.6063462", "0.6048149", "0.6046016", "0.6037571", "0.60328424", "0.60275435", "0.6021988", "0.60191256", "0.60077554", "0.59853214", "0.5968301", "0.59642655", "0.5960294", "0.59596914", "0.594717", "0.5942322", "0.5931264", "0.5927818", "0.59158015", "0.59142417", "0.5904908", "0.5890029", "0.58839923", "0.58839923", "0.58839923", "0.5883968", "0.5873605", "0.5873157", "0.5869669", "0.5869002", "0.58613515", "0.5854748", "0.58422166", "0.58414817", "0.5839675", "0.5837132", "0.58364826", "0.58152866", "0.57888037", "0.5786923", "0.5780218", "0.57755244", "0.575591", "0.5755158", "0.5752703", "0.57446456", "0.573819", "0.5729502", "0.5721925", "0.57171506", "0.5715944", "0.57039654", "0.570093" ]
0.59082425
67
This lengthy, irritating piece of code returns the slice idcs for subcube (i,j) in a decomposition of an original map of shape (2HD_res[0],2HD_res[1]) with chunks of 2LD_res per sides, together with buffers[0],[1] buffers pixel on each side, fixed by the periodicity condition of the HD map. Nothing very subtle for the map interior, but the edges and corners require a little bit of care.
def get_slices_chk_N(N, LD_res, HD_res, buffers, inverse=False): assert len(LD_res) == 2 and len(HD_res) == 2 if np.all(LD_res == HD_res): assert N == 0, N assert buffers == (0, 0), buffers sl0_LD = slice(0, 2 ** LD_res[0]) # Center of buffered cube sl1_LD = slice(0, 2 ** LD_res[1]) sl0_HD = slice(0, 2 ** HD_res[0]) # Center of buffered cube sl1_HD = slice(0, 2 ** HD_res[1]) ret_LD = [(sl0_LD, sl1_LD)] ret_HD = [(sl0_HD, sl1_HD)] return ret_LD, ret_HD assert np.all(LD_res < HD_res) assert len(buffers) == 2 assert buffers[0] < 2 ** LD_res[0] and buffers[1] < 2 ** LD_res[1] N0 = 2 ** LD_res[0] # shape of small cube, buffers excl. N1 = 2 ** LD_res[1] N0H = 2 ** HD_res[0] # shape of large cube N1H = 2 ** HD_res[1] Nchks_0 = 2 ** (HD_res[0] - LD_res[0]) Nchks_1 = 2 ** (HD_res[1] - LD_res[1]) assert N < Nchks_1 * Nchks_0, N b0 = buffers[0] b1 = buffers[1] ret_LD = [] ret_HD = [] j = N % Nchks_1 i = N // Nchks_1 # in 0, ..., Nchks_0 -1 if inverse: # We want the inverse mapping only sl0_LD = slice(b0, N0 + b0) # Center of buffered cube sl1_LD = slice(b1, N1 + b1) sl0_HD = slice(i * N0, (i + 1) * N0) # slices of HD cube sl1_HD = slice(j * N1, (j + 1) * N1) # slices of HD cube ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD if 0 < i < Nchks_0 - 1: # i in the interior : sl0_LD = slice(0, N0 + 2 * b0) # Slices of LD cube sl0_HD = slice(i * N0 - b0, (i + 1) * N0 + b0) # slices of HD cube if 0 < j < Nchks_1 - 1: # We are in the interior, no big deal sl1_LD = slice(0, N1 + 2 * b1) sl1_HD = slice(j * N1 - b1, (j + 1) * N1 + b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD elif j == 0: sl1_LD = slice(b1, N1 + 2 * b1) sl1_HD = slice(0, N1 + b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl1_LD = slice(0, b1) sl1_HD = slice(2 ** HD_res[1] - b1, 2 ** HD_res[1]) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD else: assert j == Nchks_1 - 1 sl1_LD = slice(0, N1 + b1) sl1_HD = slice(2 ** HD_res[1] - N1 - b1, 2 ** HD_res[1]) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl1_LD = slice(N1 + b1, N1 + 2 * b1) sl1_HD = slice(0, b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD elif i == 0: # Bulk 0 slices sl0_LD = slice(b0, N0 + 2 * b0) sl0_HD = slice(0, N0 + b0) if j == 0: # Upper left corner. Two tweaks. # Bulk : sl1_LD = slice(b1, N1 + 2 * b1) sl1_HD = slice(0, N1 + b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl1_LD = slice(b1, N1 + 2 * b1) sl1_HD = slice(0, N1 + b1) sl0_LD = slice(0, b0) sl0_HD = slice(N0H - b0, N0H) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(b0, N0 + 2 * b0) sl0_HD = slice(0, N0 + b0) sl1_LD = slice(0, b1) sl1_HD = slice(N1H - b1, N1H) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(0, b0) sl1_LD = slice(0, b1) sl0_HD = slice(N0H - b0, N0H) sl1_HD = slice(N1H - b1, N1H) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD elif j == Nchks_1 - 1: # upper right corner # Bulk : sl1_LD = slice(0, N1 + b1) sl1_HD = slice(2 ** HD_res[1] - N1 - b1, 2 ** HD_res[1]) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(0, b0) sl0_HD = slice(N0H - b0, N0H) sl1_LD = slice(0, N1 + b1) sl1_HD = slice(N1H - N1 - b1, N1H) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl1_LD = slice(N1 + b1, N1 + 2 * b1) sl1_HD = slice(0, b1) sl0_LD = slice(b0, N0 + 2 * b0) sl0_HD = slice(0, b0 + N0) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) # Last little square is missing : sl0_LD = slice(0, b0) sl0_HD = slice(N0H - b0, N0H) sl1_LD = slice(N1 + b1, N1 + 2 * b1) sl1_HD = slice(0, b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD else: assert 0 < j < Nchks_1 - 1 sl1_LD = slice(0, N1 + 2 * b1) sl1_HD = slice(j * N1 - b1, (j + 1) * N1 + b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(0, b0) sl0_HD = slice(2 ** HD_res[0] - b0, 2 ** HD_res[0]) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD elif i == Nchks_0 - 1: sl0_LD = slice(0, N0 + b0) sl0_HD = slice(2 ** HD_res[0] - N0 - b0, 2 ** HD_res[0]) if j == 0: # lower left corner. Two tweaks. # Bulk : sl1_LD = slice(b1, N1 + 2 * b1) sl1_HD = slice(0, N1 + b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl1_LD = slice(0, b1) sl1_HD = slice(2 ** HD_res[1] - b1, 2 ** HD_res[1]) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(N0 + b0, N0 + 2 * b0) sl0_HD = slice(0, b0) sl1_LD = slice(b1, N1 + 2 * b1) sl1_HD = slice(0, N1 + b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(N0 + b0, N0 + 2 * b0) sl1_LD = slice(0, b1) sl0_HD = slice(0, b0) sl1_HD = slice(N1H - b1, N1H) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD elif j == Nchks_1 - 1: # Lower right corner # Bulk : sl1_LD = slice(0, N1 + b1) sl1_HD = slice(2 ** HD_res[1] - N1 - b1, 2 ** HD_res[1]) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl1_LD = slice(N1 + b1, N1 + 2 * b1) sl1_HD = slice(0, b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(N0 + b0, N0 + 2 * b0) sl0_HD = slice(0, b0) sl1_LD = slice(0, N1 + b1) sl1_HD = slice(N1H - N1 - b1, N1H) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(N0 + b0, N0 + 2 * b0) sl1_LD = slice(N1 + b1, N1 + 2 * b1) sl0_HD = slice(0, b0) sl1_HD = slice(0, b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD else: assert 0 < j < Nchks_1 - 1 sl1_LD = slice(0, N1 + 2 * b1) sl1_HD = slice(j * N1 - b1, (j + 1) * N1 + b1) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) sl0_LD = slice(N0 + b0, N0 + 2 * b0) sl0_HD = slice(0, b0) ret_LD.append((sl0_LD, sl1_LD)) ret_HD.append((sl0_HD, sl1_HD)) return ret_LD, ret_HD
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _segments2slices(array_size, grid_segments, patch_segments):\n patch_slices = [slice(start, stop) for start, stop in patch_segments]\n array_slices = []\n\n for start, stop in grid_segments:\n segment_size = max(abs(start), abs(stop))\n k = int(ceil(float(segment_size) / array_size) + (-1 if start >= 0 else 0))\n cell_mirrored = k % 2\n \n step = 1\n if start < 0:\n start = k * array_size + start\n stop = k * array_size + stop\n else:\n start = start - k * array_size\n stop = stop - k * array_size\n\n if cell_mirrored:\n start = array_size - start - 1\n stop = array_size - stop - 1\n step = -1\n\n if stop < 0:\n stop = None\n\n array_slices.append(slice(start, stop, step))\n \n return array_slices, patch_slices", "def CC_2D(\n h5path_in,\n h5path_mask='',\n slicedim=0,\n usempi=False,\n h5path_out='',\n protective=False,\n ):\n\n # check output path\n if '.h5' in h5path_out:\n status, info = utils.h5_check(h5path_out, protective)\n print(info)\n if status == \"CANCELLED\":\n return\n\n # open data for reading\n h5file_mm, ds_mm, elsize, axlab = utils.h5_load(h5path_in)\n if h5path_mask:\n h5file_md, ds_md, _, _ = utils.h5_load(h5path_mask)\n\n # prepare mpi # TODO: could allow selection of slices/subset here\n mpi_info = utils.get_mpi_info(usempi)\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n\n # open data for writing\n h5file_out, ds_out = utils.h5_write(None, ds_mm.shape, 'uint32',\n h5path_out,\n element_size_um=elsize,\n axislabels=axlab,\n comm=mpi_info['comm'])\n\n # slicewise labeling\n maxlabel = 0\n for i in series:\n\n slcMM = utils.get_slice(ds_mm, i, slicedim, 'bool')\n if h5path_mask:\n slcMD = utils.get_slice(ds_md, i, slicedim, 'bool')\n labels, num = label(np.logical_and(~slcMM, slcMD), return_num=True)\n else:\n labels, num = label(~slcMM, return_num=True)\n print(\"found %d labels in slice %d\" % (num, i))\n\n if mpi_info['enabled']:\n # NOTE: assumed max number of labels in slice is 10000\n labels[~slcMM] += 10000 * i\n if i == n_slices - 1:\n maxlabel = np.amax(labels)\n else:\n labels[~slcMM] += maxlabel\n maxlabel += num\n\n if slicedim == 0:\n ds_out[i, :, :] = labels\n elif slicedim == 1:\n ds_out[:, i, :] = labels\n elif slicedim == 2:\n ds_out[:, :, i] = labels\n\n # save the maximum labelvalue in the dataset\n print(\"found %d labels\" % (maxlabel))\n if mpi_info['rank'] == mpi_info['size'] - 1:\n root = h5path_out.split('.h5')[0]\n fpath = root + '.npy'\n np.save(fpath, np.array([maxlabel]))\n\n # close and return\n try:\n h5file_mm.close()\n h5file_out.close()\n if h5path_mask:\n h5file_md.close()\n except (ValueError, AttributeError):\n return ds_out", "def core_slices(self, chunk):\n intersect_slices = []\n for s, b, olap, idx in zip(chunk.slices, self.bounds, self.overlap, range(0, len(chunk.slices))):\n if s.start == b.start:\n intersect_slices.append(slice(s.start + olap, s.stop))\n elif s.stop == b.stop:\n intersect_slices.append(slice(s.start, s.stop - olap))\n else:\n intersect_slices.append(s)\n\n return tuple(self.remove_chunk_overlap(chunk, intersect_slices))", "def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res", "def get_2D_slices(atlas: str):\n if atlas == \"avg\":\n AVGT, metaAVGT = nrr.read(\"nrrd/average_template_10.nrrd\")\n folder = \"avg\"\n elif atlas == \"nissl\":\n AVGT, metaAVGT = nrr.read(\"nrrd/ara_nissl_10.nrrd\")\n folder = \"nissl\"\n else:\n raise Exception(\"Wrong argument for var atlas\")\n\n degress = [i for i in range(-15, 16, 3)]\n\n for degree in tqdm(degress):\n rotated = rot(AVGT, angle=degree, mode=\"nearest\", order=0, reshape=True)\n print(degree)\n for plate_no in tqdm(range(300, 1300, 5)):\n save_img(rotated[plate_no], folder=folder, plate_no=plate_no, degree=degree)", "def getNewIntersections(self):\n sections = []\n sections2D = []\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n xis = xs / L\n R = self.XYZCoordinate.T\n if len(xis) != len(self.SkeletonPoints):\n raise Exception(\"Conflit between xis and SkeletonPoints.\", self.SKeletonPoints)\n for i in range(len(xis)):\n xi = xis[i]\n normalVec = self.returnTangentVectorAtXi(xi)\n originPoint = self.SkeletonPoints[i]\n \n # define transform manually\n T = np.zeros((4,4))\n T[:3,:3] = R[:,np.array([1,2,0])]; # 3rd axis is reduced by projection\n T[:3,3] = originPoint; \n T[3,3] = 1.0\n to_2D = (np.linalg.inv(T)).astype(float)\n \n try:\n slice = self.mesh.section(plane_origin=originPoint, plane_normal=normalVec)\n # 选取每个截面图中面积最大的子图,实现初步去噪\n if slice is not None:\n slice_2D, to_3D = slice.to_planar(to_2D=to_2D,check=False)\n slices_splited = slice_2D.split()\n sliceIndex = np.argmax([s.area for s in slices_splited])\n slice_2D = slices_splited[sliceIndex]\n if True: #slice_2D.area > 1e-1:\n sections2D.append(slice_2D)\n sections.append(slice_2D.to_3D(to_3D))\n except:\n pass\n \n self.Intersections2D = sections2D\n self.Intersections = sections", "def planeSliceGnoKDI(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 5000, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n\n cdist = uxmax/(np.abs(50*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # Plots\n fig = plt.figure(figsize = (6, 10))\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n # ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n # rx = np.linspace(-uxmax, uxmax, gsizex)\n # ry = np.linspace(-uymax, uymax, gsizey)\n # ux, uy = np.meshgrid(rx, ry)\n\n # rx2 = np.linspace(xmin, xmax, gsizex)\n # im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n # cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n # cbar.set_label('G', fontsize=16)\n # ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n # cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n # paths = cs.collections[0].get_paths()\n # uppaths = []\n # for p in paths:\n # cuvert = np.array(p.vertices).T\n # upx, upy = mapToUp(cuvert, alp, ax, ay)\n # ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n # ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n # ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n # ax0.set_xlabel(r\"$u'_x$\", fontsize = 16)\n # ax0.set_ylim([-uymax, uymax])\n # ax0.set_xlim([-uxmax, uxmax])\n # ax0.set_ylabel(r\"$u'_y$\", fontsize = 16)\n # ax0.set_title(\"Gain in the u' plane\")\n\n # G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n # G = G - G[-1] + 1\n fig = plt.figure(figsize = (7, 3), dpi = 100)\n ax1 = plt.subplot()\n # ax1.plot(rx2, G, color = 'blue', label = \"Gain from FFT\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'blue')\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(xmin, xmax)\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 16)\n ax1.set_ylabel('G', fontsize = 16)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n # ax1.legend(loc = 1)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n # grid.tight_layout(fig, pad = 1.5)\n plt.tight_layout()\n plt.show()\n return", "def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e", "def dense_patch_slices(image_size, patch_size, scan_interval):\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError(\"image_size should has 2 or 3 elements\")\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = [\n int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1\n for i in range(num_spatial_dims)\n ]\n slices = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices", "def planeSliceGFig2(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = int(len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n if comp == True:\n diff = difference(nreal) # determine number of complex solutions\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 1, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n \n fogain = np.zeros([nzones, npoints])\n zogain = np.zeros([nzones, npoints])\n for i in range(nzones):\n nroots = nreal[i]\n if nroots == 1:\n fogain[i] = np.abs(allfields[i])**2\n zogain[i] = np.abs(allfields[i])**2\n else:\n fogain[i] = np.abs(np.sum(allfields[i], axis = 0))**2\n zog = 0\n for j in range(nroots):\n zog = zog + np.abs(allfields[i][j])**2\n zogain[i] = zog\n \n fogain = fogain.flatten()\n zogain = zogain.flatten()\n\n # Construct uniform asymptotics\n # asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n # interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n # finx = np.linspace(xmin, xmax, 4*npoints)\n # asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(2, 2)\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[:, 0]), plt.subplot(grid[0, 1])\n # ax0, ax2 = plt.subplot(grid[0]), plt.subplot(grid[1])\n ax2 = plt.subplot(grid[1, 1], sharex=ax1)\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n cbar.set_label('G', fontsize = 18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\", linewidth = 1.)\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n xaxis = upxvecs.flatten()\n ax1.plot(xaxis, zogain, color = 'red', label = r'$0^{th}$ order GO gain')\n ax1.set_ylim(-cdist, np.max(G) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n # ax1.set_xlabel(r\"$u'_x$\")\n ax1.set_ylabel('G', fontsize = 18)\n ax1.legend(loc = 1, fontsize = 12)\n ax1.tick_params(labelsize = 14)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n \n # Plot gain along observer motion\n ax2.plot(rx2, G, color='blue', label=\"FFT gain\", linewidth=1.)\n for caus in upcross.T[0]:\n ax2.plot([caus, caus], [-10, 1000], ls='dashed', color='black')\n ax2.plot(xaxis, fogain, color='orange', label=r'$1^{st}$ order GO gain')\n ax2.set_ylim(-cdist, np.max(G) + 1.)\n ax2.set_xlim(np.min(rx2), np.max(rx2))\n ax2.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax2.set_ylabel('G', fontsize = 18)\n ax2.legend(loc = 1, fontsize = 12)\n # ax1.set_title(\"Slice Gain\")\n ax2.tick_params(labelsize = 14)\n ax2.grid()\n grid.tight_layout(fig)\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n plt.show()\n return", "def locate_slice_chunk(slice_start, slice_stop, height, overlap_metadata):\n if slice_stop < slice_start:\n raise ValueError(\n \"Stopping index must be larger than the starting index!!!\")\n g_nrow = overlap_metadata.shape[0] + 1\n side = overlap_metadata[0, 0, 1]\n overlap_list = overlap_metadata[:, 0, 0]\n if side == 1:\n list_slices = [(np.arange(i * height, i * height + height) -\n np.sum(overlap_list[0: i])) for i in range(g_nrow)]\n else:\n list_slices = [\n (np.arange(i * height + height - 1, i * height - 1, -1) -\n np.sum(overlap_list[0: i])) for i in range(g_nrow)]\n list_slices = np.asarray(list_slices)\n results = []\n for i, list1 in enumerate(list_slices):\n result1 = []\n if side == 1:\n for slice_idx in range(slice_start, slice_stop):\n pos = np.squeeze(np.where(list1 == slice_idx)[0])\n if pos.size == 1:\n fact = 1.0\n if i == 0:\n ver_overlap = overlap_list[i]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap:\n fact = dis1 / (ver_overlap - 1)\n elif i == (g_nrow - 1):\n ver_overlap = overlap_list[i - 1]\n if pos < ver_overlap:\n fact = pos / (ver_overlap - 1)\n else:\n ver_overlap1 = overlap_list[i]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap1:\n fact = dis1 / (ver_overlap1 - 1)\n if pos < ver_overlap1:\n fact = pos / (ver_overlap1 - 1)\n ver_overlap2 = overlap_list[i - 1]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap2:\n fact = dis1 / (ver_overlap2 - 1)\n if pos < ver_overlap2:\n fact = pos / (ver_overlap2 - 1)\n result1.append([i, pos, fact])\n else:\n for slice_idx in range(slice_start, slice_stop):\n pos = np.squeeze(np.where(list1 == slice_idx)[0])\n if pos.size == 1:\n fact = 1.0\n if i == 0:\n ver_overlap = overlap_list[i]\n if pos < ver_overlap:\n fact = 1.0 * pos / (ver_overlap - 1)\n elif i == (g_nrow - 1):\n ver_overlap = overlap_list[i - 1]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap:\n fact = 1.0 * dis1 / (ver_overlap - 1)\n else:\n ver_overlap1 = overlap_list[i]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap1:\n fact = 1.0 * dis1 / (ver_overlap1 - 1)\n if pos < ver_overlap1:\n fact = 1.0 * pos / (ver_overlap1 - 1)\n ver_overlap2 = overlap_list[i - 1]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap2:\n fact = 1.0 * dis1 / (ver_overlap2 - 1)\n if pos < ver_overlap2:\n fact = 1.0 * pos / (ver_overlap2 - 1)\n result1.append([i, pos, fact])\n if len(result1) > 0:\n results.append(result1)\n return results", "def _build_slices(dataset, patch_shape, stride_shape):\n slices = []\n if dataset.ndim == 4:\n in_channels, i_z, i_y, i_x = dataset.shape\n else:\n i_z, i_y, i_x = dataset.shape\n\n k_z, k_y, k_x = patch_shape\n s_z, s_y, s_x = stride_shape\n z_steps = SliceBuilder._gen_indices(i_z, k_z, s_z)\n for z in z_steps:\n y_steps = SliceBuilder._gen_indices(i_y, k_y, s_y)\n for y in y_steps:\n x_steps = SliceBuilder._gen_indices(i_x, k_x, s_x)\n for x in x_steps:\n slice_idx = (\n slice(z, z + k_z),\n slice(y, y + k_y),\n slice(x, x + k_x)\n )\n if dataset.ndim == 4:\n slice_idx = (slice(0, in_channels),) + slice_idx\n slices.append(slice_idx)\n return slices", "def _calc_slices(X):\n\n n_rows = X.shape[0]\n slices = [n_rows // comm.size for _ in range(comm.size)]\n count = n_rows % comm.size\n for i in range(count):\n slices[i] += 1\n\n return np.array(slices, dtype=np.int64)", "def _get_slices(\n self,\n stride: int,\n patch_size: Tuple[int, int],\n img_size: Tuple[int, int],\n pad: int = None,\n ) -> Tuple[Dict[str, slice], int, int]:\n y_end, x_end = patch_size\n nrows, pady = self._get_margins(y_end, img_size[0], stride, pad=pad)\n ncols, padx = self._get_margins(x_end, img_size[1], stride, pad=pad)\n\n xyslices = {}\n for row in range(nrows):\n for col in range(ncols):\n y_start = row * stride\n y_end = y_start + patch_size[0]\n x_start = col * stride\n x_end = x_start + patch_size[1]\n xyslices[f\"y-{y_start}_x-{x_start}\"] = (\n slice(y_start, y_end),\n slice(x_start, x_end),\n )\n\n return xyslices, pady, padx", "def define_sections (data_shape, xbin=1, ybin=1, tel=None):\n\n ysize, xsize = data_shape\n ny = get_par(set_bb.ny,tel)\n nx = get_par(set_bb.nx,tel)\n dy = ysize // ny\n dx = xsize // nx\n\n ysize_chan = get_par(set_bb.ysize_chan,tel) // ybin\n xsize_chan = get_par(set_bb.xsize_chan,tel) // xbin\n ysize_os = (ysize-ny*ysize_chan) // ny\n xsize_os = (xsize-nx*xsize_chan) // nx\n\n # the sections below are defined such that e.g. chan_sec[0] refers\n # to all pixels of the first channel, where the channel indices\n # are currently defined to be located on the CCD as follows:\n #\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 8, 9, 10, 11, 12, 13, 14, 15]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n # [ 0, 1, 2, 3, 4, 5, 6, 7]\n\n # channel section slices including overscan; shape=(16,2)\n chan_sec = tuple([(slice(y,y+dy), slice(x,x+dx))\n for y in range(0,ysize,dy) for x in range(0,xsize,dx)])\n\n # channel data section slices; shape=(16,2)\n data_sec = tuple([(slice(y,y+ysize_chan), slice(x,x+xsize_chan))\n for y in range(0,ysize,dy+ysize_os) for x in range(0,xsize,dx)])\n\n # channel vertical overscan section slices; shape=(16,2)\n # cut off [ncut] pixels to avoid including pixels on the edge of the\n # overscan that are contaminated with flux from the image\n # and also discard last column as can have high value\n ncut = 5\n ncut_vert = max(ncut // xbin, 1)\n os_sec_vert = tuple([(slice(y,y+dy), slice(x+xsize_chan+ncut_vert,x+dx-1))\n for y in range(0,ysize,dy) for x in range(0,xsize,dx)])\n\n # channel horizontal overscan sections; shape=(16,2)\n # cut off [ncut] pixels to avoid including pixels on the edge of the\n # overscan that are contaminated with flux from the image\n ncut_hori = max(ncut // ybin, 1)\n ysize_os_cut = ysize_os - ncut_hori\n os_sec_hori = tuple([(slice(y,y+ysize_os_cut), slice(x,x+dx))\n for y in range(dy-ysize_os_cut,dy+ysize_os_cut,ysize_os_cut)\n for x in range(0,xsize,dx)])\n\n # channel reduced data section slices; shape=(16,2)\n data_sec_red = tuple([(slice(y,y+ysize_chan), slice(x,x+xsize_chan))\n for y in range(0,ysize-ny*ysize_os,ysize_chan)\n for x in range(0,xsize-nx*xsize_os,xsize_chan)])\n\n\n return chan_sec, data_sec, os_sec_hori, os_sec_vert, data_sec_red", "def make_cspad_pix_coordinate_arrays (sp, xc_um, yc_um, orient_deg, tilt_deg) : # All lists of [4,8] \n sp.make_maps_of_2x1_pix_coordinates()\n\n sp.x_pix_um = np.zeros((sp.quads,sp.sects,sp.rows,sp.cols), dtype=np.float32)\n sp.y_pix_um = np.zeros((sp.quads,sp.sects,sp.rows,sp.cols), dtype=np.float32)\n\n angle_deg = orient_deg + tilt_deg\n \n for quad in range(sp.quads) :\n for sect in range(sp.sects) :\n\n angle_rad = math.radians(angle_deg[quad][sect]) \n S,C = math.sin(angle_rad), math.cos(angle_rad)\n Xrot, Yrot = rotation(sp.x_map2x1, sp.y_map2x1, C, S)\n\n sp.x_pix_um[quad][sect][:] = Xrot + xc_um[quad][sect]\n sp.y_pix_um[quad][sect][:] = Yrot + yc_um[quad][sect]\n\n sp.x_pix_um -= sp.x_pix_um.min() + 5 # add offset in um to get rid of \"rounding\" strips...\n sp.y_pix_um -= sp.y_pix_um.min() + 5", "def _getitem2d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iz = index[1]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n nz = hivects[1,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[1] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[1,i])\n iz2 = min(izstop, lovects[1,i] + fields[i].shape[1])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[1,i], iz2 - lovects[1,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iz, slice):\n sss[1] = 0\n\n return resultglobal[tuple(sss)]", "def read_cine_protocol(series_dicom_header):\n assert len(series_dicom_header.StudyInstanceUID.unique()) == 1, 'Trying to read dicoms from multiple studies!'\n assert len(series_dicom_header.SpacingBetweenSlices.unique()) == 1\n\n SpacingBetweenSlices = list(series_dicom_header.SpacingBetweenSlices)[0]\n SliceLocations = series_dicom_header.SliceLocation.unique()\n number_of_slices = len(SliceLocations) \n\n phases_per_slice = [len(series_dicom_header[series_dicom_header.SliceLocation==SliceLocation].InstanceNumber) \n for SliceLocation in series_dicom_header.SliceLocation.unique()]\n number_of_phases = phases_per_slice[0]\n\n if len(np.unique(phases_per_slice)) != 1:\n warnings.warn('Number of phases is variable across slice locations! Could be real or error, check!.')\n return None\n \n print('Found cine study with (number_of_slices, number_of_phases)', number_of_slices, number_of_phases)\n pixel_array = pydicom.read_file(series_dicom_header.iloc[0].FileName).pixel_array\n \n sax_4D = np.zeros((pixel_array.shape +(number_of_slices, number_of_phases)), dtype=pixel_array.dtype)\n \n dicom_4D_paths = {}\n for SliceIndex, SliceLocation in enumerate(sorted(SliceLocations)):\n slice_header = series_dicom_header[series_dicom_header.SliceLocation==SliceLocation]\n dicom_4D_paths[SliceIndex] = []\n for InstanceIndex, InstanceNumber in enumerate(sorted(slice_header.InstanceNumber)):\n DicomFileName = slice_header[slice_header.InstanceNumber==InstanceNumber].FileName.item()\n dicom = pydicom.read_file(DicomFileName)\n sax_4D[:,:,SliceIndex,InstanceIndex] += dicom.pixel_array\n\n dicom_4D_paths[SliceIndex] += [DicomFileName]\n\n affine = read_affine(series_dicom_header.iloc[series_dicom_header.SliceLocation.argmin()])\n\n sax_4D = nib.Nifti1Image(sax_4D, affine=affine), \n sax_4D.SpacingBetweenSlices = SpacingBetweenSlices\n\n return sax_4D, dicom_4D_paths", "def dense_patch_slices(\n image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int], return_slice: bool = True\n) -> list[tuple[slice, ...]]:\n num_spatial_dims = len(image_size)\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = []\n for i in range(num_spatial_dims):\n if scan_interval[i] == 0:\n scan_num.append(1)\n else:\n num = int(math.ceil(float(image_size[i]) / scan_interval[i]))\n scan_dim = first(d for d in range(num) if d * scan_interval[i] + patch_size[i] >= image_size[i])\n scan_num.append(scan_dim + 1 if scan_dim is not None else 1)\n\n starts = []\n for dim in range(num_spatial_dims):\n dim_starts = []\n for idx in range(scan_num[dim]):\n start_idx = idx * scan_interval[dim]\n start_idx -= max(start_idx + patch_size[dim] - image_size[dim], 0)\n dim_starts.append(start_idx)\n starts.append(dim_starts)\n out = np.asarray([x.flatten() for x in np.meshgrid(*starts, indexing=\"ij\")]).T\n if return_slice:\n return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]\n return [tuple((s, s + patch_size[d]) for d, s in enumerate(x)) for x in out] # type: ignore", "def long_slice(image_data):\n\n\t# Process binary data and open.\n\tim = image_data.split('base64,')[1]\n\tim = base64.b64decode(im)\n\tim = io.BytesIO(im)\n\t\n\timg = Image.open(im)\n\twidth, height = img.size\n\tupper = 0\n\tleft = 0\n\t\n\t# Max height to fit pdf.\n\tmax_height_mm = 198\n\tmax_height = (max_height_mm * 96) / 25.4\n\n\tslice_size = max_height\n\n\tslices = int(math.ceil(height/slice_size))\n\tcount = 1\n\n\tfinal_slices = []\n\tfor slice in range(slices):\n\t\t# If no more slices needed, set the lower bound to bottom of image.\n\t\tif count == slices:\n\t\t\tlower = height\n\t\telse:\n\t\t\tlower = int(count * slice_size) \n\t\t \n\t\t# Set the bounding box. \n\t\tbbox = (left, upper, width, lower)\n\t\tworking_slice = img.crop(bbox)\n\n\t\t# Save png as bytes object.\n\t\tbyte_io = io.BytesIO()\n\t\tworking_slice.save(byte_io, 'png')\n\t\t\n\t\t# Convert bytes object to base64 string and save to list.\n\t\timg_str = base64.b64encode(byte_io.getvalue())\n\t\timg_str = 'data:image/png;base64,' + img_str.decode()\n\t\tfinal_slices.append(img_str)\n\n\t\tupper = upper + slice_size\n\t\tcount = count + 1\n\n\treturn final_slices", "def oscanSub(img):\n oscanL = img[:,10:50]\n oscanR = img[:,2110:2150]\n mdL=np.median(oscanL,axis=1)\n mdR=np.median(oscanR,axis=1)\n #rowL=np.arange(0,mdL.shape[0])\n #rowR=np.arange(0,mdR.shape[0])\n #(aL,bL,sda,sdb,se)=linefit(rowL,mdL)\n #(aR,bR,sda,sdb,se)=linefit(rowR,mdR)\n #oscanLfit=rowL*bL+aL\n #oscanRfit=rowR*bR+aR\n for i in range(1080):\n img[:,i] = img[:,i] - mdL #oscanLfit\n img[:,1080+i] = img[:,1080+i] - mdR #oscanRfit\n return img", "def dcm_to_npy(folder, start=1, stop=280, mid_slice=190, dim=120, energies=['40kVp', '80kVp'],\n load_directory=r'D:\\OneDrive - University of Victoria\\Research/CBCT/',\n save_directory=r'D:\\OneDrive - University of Victoria\\Research/CBCT/'):\n path = load_directory + folder + '/'\n save_path = save_directory + folder + '/'\n\n # Create the folder in the save_directory\n gof.create_folder(folder_name=folder, directory_path=save_directory)\n\n # Create the 'RawMatrices' folder\n gof.create_folder(folder_name='RawMatrices', directory_path=save_path)\n\n save_path = save_path + 'RawMatrices/'\n\n # Save each slice as .npy matrix\n for energy in energies:\n\n dirs3 = os.listdir(save_path)\n\n # Create the energy folder in the RawMatrices folder\n gof.create_folder(folder_name=energy, directory_path=save_path)\n\n save_path = save_path + energy + '/'\n\n # Sub file path\n subpath = energy + '/Mouse_Cropped.xst/'\n # Load the mid_slice view to find the edges of the phantom\n data = pyd.dcmread(path + subpath + 'volume0' + mid_slice + '.dcm')\n s6 = data.pixel_array\n\n # Center the image for cropping\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.imshow(s6)\n ax.set_title('Click the edges of the phantom in the order: top, bottom, left, right. '\n '\\n Left-click: add point, Right-click: remove point, Enter: stop collecting')\n\n # Array to hold the coordinates of the center of the ROI and its radius\n # Left-click to add point, right-click to remove point, press enter to stop collecting\n # First point\n coords = plt.ginput(n=-1, timeout=-1, show_clicks=True)\n coords = np.array(coords)\n coords = np.round(coords, decimals=0)\n top = coords[0]\n bottom = coords[1]\n left = coords[2]\n right = coords[3]\n # Round the center coordinates to index numbers\n x = int(round((right[0]+left[0])/2))\n y = int(round((bottom[1]+top[1])/2))\n\n for i in np.arange(start, stop):\n if i < 10:\n filename = 'volume000' + str(i) + '.dcm'\n savename = 'volume000' + str(i) + '.npy'\n elif i < 100 and i >= 10:\n filename = 'volume00' + str(i) + '.dcm'\n savename = 'volume00' + str(i) + '.npy'\n else:\n filename = 'volume0' + str(i) + '.dcm'\n savename = 'volume0' + str(i) + '.npy'\n\n # Crop image\n crop = dim/2\n data = pyd.dcmread(path+subpath+filename)\n matrix = data.pixel_array\n matrix = matrix[y-crop:y+crop, x-crop:x+crop]\n np.save(save_path+savename, matrix)", "def close_mask_in(im_slice_2d, side):\n new_slice = im_slice_2d.copy()\n \n x_no_0, y_no_0 = np.nonzero(im_slice_2d)\n if len(x_no_0) == 0: return new_slice, new_slice\n #breakpoint()\n x1 = x_no_0.min() \n x2 = x_no_0.max()\n if side == \"l\":\n x_mid = x2; x_aux1 = x_mid - 9 + 1; x_aux2 = x2 + 1\n elif side == \"r\":\n x_mid = x1; x_aux2 = x_mid + 9; x_aux1 = x1\n \n y_mid = y_no_0[np.where(x_no_0==x_mid)[0]].min()\n y_min = y_no_0.min()\n \n # inferior line\n new_slice[x1:x2+1, y_min] = 1\n # medial line\n new_slice[x_mid, y_min:y_mid+1] = 1\n new_slice = binary_fill_holes(new_slice)\n # in_short array:\n other_slice = new_slice.copy() \n other_slice[x_aux1:x_aux2, :] = 0\n \n return new_slice, other_slice", "def subdivideMesh(IKLE,MESHX,MESHY): \n # ~~> Singling out edges\n from matplotlib.tri import Triangulation\n edges = Triangulation(MESHX,MESHY,IKLE).get_cpp_triangulation().get_edges()\n \n # ~~> Memory allocation for new MESH\n IELEM = len(IKLE); IPOIN = len(MESHX); IEDGE = len(edges)\n JKLE = np.zeros((IELEM*4,3),dtype=np.int) # you subdivide every elements by 4\n MESHJ = np.zeros((IEDGE,2),dtype=np.int) # you add one point on every edges\n \n # ~~> Lookup tables for node numbering on common edges\n pa,pb = edges.T\n k1b,k1a = np.sort(np.take(IKLE,[0,1],axis=1)).T\n indx1 = np.searchsorted(pa,k1a)\n jndx1 = np.searchsorted(pa,k1a,side='right')\n k2b,k2a = np.sort(np.take(IKLE,[1,2],axis=1)).T\n indx2 = np.searchsorted(pa,k2a)\n jndx2 = np.searchsorted(pa,k2a,side='right')\n k3b,k3a = np.sort(np.take(IKLE,[2,0],axis=1)).T\n indx3 = np.searchsorted(pa,k3a)\n jndx3 = np.searchsorted(pa,k3a,side='right')\n \n # ~~> Building one triangle at a time /!\\ Please get this loop parallelised\n j = 0\n for i in range(IELEM):\n k1 = indx1[i]+np.searchsorted(pb[indx1[i]:jndx1[i]],k1b[i])\n k2 = indx2[i]+np.searchsorted(pb[indx2[i]:jndx2[i]],k2b[i])\n k3 = indx3[i]+np.searchsorted(pb[indx3[i]:jndx3[i]],k3b[i])\n # ~~> New connectivity JKLE\n JKLE[j] = [IKLE[i][0],IPOIN+k1,IPOIN+k3]\n JKLE[j+1] = [IKLE[i][1],IPOIN+k2,IPOIN+k1]\n JKLE[j+2] = [IKLE[i][2],IPOIN+k3,IPOIN+k2]\n JKLE[j+3] = [IPOIN+k1,IPOIN+k2,IPOIN+k3]\n # ~~> New interpolation references for values and coordinates\n MESHJ[k1] = [IKLE[i][0],IKLE[i][1]]\n MESHJ[k2] = [IKLE[i][1],IKLE[i][2]]\n MESHJ[k3] = [IKLE[i][2],IKLE[i][0]]\n j += 4\n\n # ~~> Reset IPOBO while you are at it\n MESHX = np.resize(MESHX,IPOIN+IEDGE)\n MESHY = np.resize(MESHY,IPOIN+IEDGE)\n MESHX[IPOIN:] = np.sum(MESHX[MESHJ],axis=1)/2.\n MESHY[IPOIN:] = np.sum(MESHY[MESHJ],axis=1)/2.\n neighbours = Triangulation(MESHX,MESHY,JKLE).get_cpp_triangulation().get_neighbors()\n JPOBO = np.zeros(IPOIN+IEDGE,np.int)\n for n in range(IELEM*4):\n s1,s2,s3 = neighbours[n]\n e1,e2,e3 = JKLE[n]\n if s1 < 0:\n JPOBO[e1] = e1+1\n JPOBO[e2] = e2+1\n if s2 < 0:\n JPOBO[e2] = e2+1\n JPOBO[e3] = e3+1\n if s3 < 0:\n JPOBO[e3] = e3+1\n JPOBO[e1] = e1+1\n\n return JKLE,MESHX,MESHY,JPOBO,MESHJ", "def test_get_second_slice(self):\n self.init()\n assert np.all(get_second_slice(self.i64_3) == self.i64_3[:,:,1])\n assert np.all(get_second_slice(self.fi64_3) == self.fi64_3[:,:,1])\n assert np.all(get_second_slice(self.f64_3) == self.f64_3[:,:,1])\n assert np.all(get_second_slice(self.ff64_3) == self.ff64_3[:,:,1])\n assert get_second_slice(self.i64_3).shape == (3,3)\n assert get_second_slice(self.fi64_3).shape == (3,3)\n assert get_second_slice(self.f64_3).shape == (3,3)\n assert get_second_slice(self.ff64_3).shape == (3,3)\n assert get_second_slice(self.i64_3).dtype == 'float64'\n assert get_second_slice(self.fi64_3).dtype == 'float64'\n assert get_second_slice(self.f64_3).dtype == 'float64'\n assert get_second_slice(self.ff64_3).dtype == 'float64'\n assert get_second_slice(self.i64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.fi64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.f64_3).flags['F_CONTIGUOUS'] == True\n assert get_second_slice(self.ff64_3).flags['F_CONTIGUOUS'] == True", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def voronoi_sub_mask_1d_index_to_pixeliztion_1d_index_from_grids_and_geometry(\n grid,\n mask_1d_index_to_nearest_pixelization_1d_index,\n sub_mask_1d_index_to_mask_1d_index,\n pixel_centres,\n pixel_neighbors,\n pixel_neighbors_size,\n):\n\n sub_mask_1d_index_to_pixeliztion_1d_index = np.zeros((grid.shape[0]))\n\n for sub_mask_1d_index in range(grid.shape[0]):\n\n nearest_pixelization_1d_index = mask_1d_index_to_nearest_pixelization_1d_index[\n sub_mask_1d_index_to_mask_1d_index[sub_mask_1d_index]\n ]\n\n while True:\n\n nearest_pixelization_pixel_center = pixel_centres[\n nearest_pixelization_1d_index\n ]\n\n sub_pixel_to_nearest_pixelization_distance = (\n (grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2\n + (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])\n ** 2\n )\n\n closest_separation_from_pixelization_to_neighbor = 1.0e8\n\n for neighbor_pixelization_1d_index in range(\n pixel_neighbors_size[nearest_pixelization_1d_index]\n ):\n\n neighbor = pixel_neighbors[\n nearest_pixelization_1d_index, neighbor_pixelization_1d_index\n ]\n\n separation_from_neighbor = (\n grid[sub_mask_1d_index, 0] - pixel_centres[neighbor, 0]\n ) ** 2 + (grid[sub_mask_1d_index, 1] - pixel_centres[neighbor, 1]) ** 2\n\n if (\n separation_from_neighbor\n < closest_separation_from_pixelization_to_neighbor\n ):\n closest_separation_from_pixelization_to_neighbor = (\n separation_from_neighbor\n )\n closest_neighbor_pixelization_1d_index = (\n neighbor_pixelization_1d_index\n )\n\n neighboring_pixelization_1d_index = pixel_neighbors[\n nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index\n ]\n sub_pixel_to_neighboring_pixelization_distance = (\n closest_separation_from_pixelization_to_neighbor\n )\n\n if (\n sub_pixel_to_nearest_pixelization_distance\n <= sub_pixel_to_neighboring_pixelization_distance\n ):\n sub_mask_1d_index_to_pixeliztion_1d_index[\n sub_mask_1d_index\n ] = nearest_pixelization_1d_index\n break\n else:\n nearest_pixelization_1d_index = neighboring_pixelization_1d_index\n\n return sub_mask_1d_index_to_pixeliztion_1d_index", "def subcubes_from_ds9(cube, region_file='../nro_maps/AllShells.reg', pad_factor=1., shape='exact'):\n from spectral_cube import SpectralCube\n import pyregion\n\n try:\n #If cube is a str filename, read a SpectralCube.\n cube = SpectralCube.read(cube)\n except ValueError:\n pass\n\n if shape == 'square':\n import astropy.units as u\n subcube_list = []\n region_list = pyregion.open(region_file)\n for region in region_list:\n half_width = region.coord_list[2] * pad_factor * u.deg\n ra_center = region.coord_list[0] * u.deg\n dec_center = region.coord_list[1] * u.deg\n ra_range = [ra_center - half_width, ra_center + half_width]\n dec_range = [dec_center - half_width, dec_center + half_width]\n #print(ra_range, dec_range)\n subcube_list.append(cube.subcube(ra_range[1], ra_range[0], dec_range[0], dec_range[1]))\n if shape == 'exact':\n region_list = pyregion.open(region_file)\n subcube_list = []\n for region in region_list:\n \n if pad_factor != 1.:\n new_string = '{};{}({},{},{}\")'.format(region.coord_format, region.name,\n region.coord_list[0], region.coord_list[1],\n region.coord_list[2]*3600.*pad_factor)\n region = pyregion.parse(new_string)[0]\n \n subcube_list.append(cube.subcube_from_ds9region(pyregion.ShapeList([region])))\n if len(subcube_list) == 1:\n return subcube_list[0]\n else:\n return subcube_list", "def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y", "def core_slices(self, borders=None):\n if borders is None:\n borders = self.all_borders\n\n core_slices = list(self.slices)\n for border, direction in borders:\n core_slice = core_slices[border]\n if direction < 0:\n core_slice = slice(core_slice.start + self.overlap[border], core_slice.stop)\n else:\n core_slice = slice(core_slice.start, core_slice.stop - self.overlap[border])\n core_slices[border] = core_slice\n\n return tuple(core_slices)", "def decompose_hankel_2d(hankel,slice3d_shape,s, factors, rp):\n #orig dimensions\n n = slice3d_shape[2]\n m = slice3d_shape[1]//2**s\n # init return kspace slab\n slice3d_stage_s = np.zeros((rp['rcvrs'],m,n),dtype=DTYPE)\n p = rp['filter_size'][0]\n q = rp['filter_size'][1]\n #print('n , m, p, q : {}, {}, {}, {}'.format(n,m,p,q))\n #inner hankel dimension:\n ih_col = m-p+1\n ih_row = p\n (factor_inner, factor_outer) = factors\n # ------------decomposing outer hankel---------------------\n if rp['virtualcoilboost'] == False:\n receiverdim = int(rp['rcvrs'])\n elif rp['virtualcoilboost'] == True:\n receiverdim = int(rp['rcvrs']*2)\n\n for i in range(receiverdim):\n hankel_1rcvr = hankel[:,hankel.shape[1]//rp['rcvrs']*i:\\\n hankel.shape[1]//rp['rcvrs']*(1+i)]\n inner_hankel_arr = np.zeros((n,ih_col,ih_row),dtype=DTYPE)\n # how many occurrences of H_inner[j] are in outer hankel\n for j in range(0,q):\n # for each slab in outer hankel\n fill_arr = np.zeros((n,ih_col,ih_row),dtype=DTYPE) \n cols = [hankel_1rcvr[(m-p+1)*k:(m-p+1)*(k+1),p*j:p*(j+1)] \\\n for k in range(0,n-q+1)]\n cols = np.array(cols)\n fill_arr[j:n-q+j+1,:,:] = cols\n inner_hankel_arr = inner_hankel_arr + fill_arr\n # division by the multiples var to get the averages\n inner_hankel_avg_arr = np.divide(inner_hankel_arr,factor_outer[s])\n # avg: array of inner hankels\n # ----------------decomposing inner hankels------------\n for j in range(0,n):\n inner_hank_j = inner_hankel_avg_arr[j,:,:]\n hankel_j_arr = np.zeros(m,dtype=DTYPE)\n cols = [inner_hank_j[:,k] for k in range(inner_hank_j.shape[1])]\n cols = np.array(cols, dtype=DTYPE)\n for k in range(0,len(cols)):\n fill_arr = np.zeros(m,dtype=DTYPE)\n fill_arr[k:m-p+k+1] = cols[k]\n hankel_j_arr = hankel_j_arr + fill_arr\n hankel_j_arr_avg = np.divide(hankel_j_arr,factor_inner[s])\n slice3d_stage_s[i,:,j] = hankel_j_arr_avg\n return slice3d_stage_s", "def get_subpix_grid(self, g1d):\n\n rsp = interpolation_points(self.M)\n rspg1d = g1d[:, np.newaxis] + rsp[np.newaxis, :] * np.diff(g1d)[0]\n return rspg1d.ravel()", "def get2DSlice( self, slices: list ):\n assert(len(slices)==self._nDims-2)\n slices.extend([slice(self._nGlobalCoords[self._layout.dims_order[-2]]),\n slice(self._nGlobalCoords[self._layout.dims_order[-1]])])\n return self._f[tuple(slices)]", "def CC_2Dfilter(\n h5path_labels,\n map_propnames,\n criteria,\n h5path_int='',\n slicedim=0,\n usempi=False,\n outputfile='',\n protective=False,\n ):\n\n (min_area,\n max_area,\n max_intensity_mb,\n max_eccentricity,\n min_solidity,\n min_euler_number,\n min_extent) = criteria\n\n # prepare mpi\n mpi_info = utils.get_mpi_info(usempi)\n\n # TODO: check output path\n\n # open data for reading\n h5file_mm, ds_mm, _, _ = utils.h5_load(h5path_labels, comm=mpi_info['comm'])\n if h5path_int:\n h5file_mb, ds_mb, _, _ = utils.h5_load(h5path_int, comm=mpi_info['comm'])\n else:\n ds_mb = None\n # mask used as intensity image in mean_intensity criterium\n\n # get the maximum labelvalue in the input\n root = h5path_labels.split('.h5')[0]\n maxlabel = get_maxlabel(root, ds_mm)\n\n # prepare mpi\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n if mpi_info['rank'] == 0:\n fws_reduced = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n else:\n fws_reduced = None\n\n fws = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n\n mapall = criteria.count(None) == len(criteria)\n\n # pick labels observing the constraints\n go2D = ((max_eccentricity is not None) or\n (min_solidity is not None) or\n (min_euler_number is not None) or\n mapall)\n if go2D:\n\n for i in series:\n slcMM = utils.get_slice(ds_mm, i, slicedim)\n if h5path_int:\n slcMB = utils.get_slice(ds_mb, i, slicedim) # , 'bool'\n else:\n slcMB = None\n fws = check_constraints(slcMM, fws, map_propnames,\n criteria, slcMB, mapall)\n if mpi_info['enabled']:\n mpi_info['comm'].Reduce(fws, fws_reduced, op=MPI.MAX, root=0)\n else:\n fws_reduced = fws\n\n else:\n\n if mpi_info['rank'] == 0:\n fws = check_constraints(ds_mm, fws, map_propnames,\n criteria, ds_mb, mapall)\n fws_reduced = fws\n\n # write the forward maps to a numpy vector\n if mpi_info['rank'] == 0:\n slc = int(n_slices/2)\n slcMM = ds_mm[slc, :, :]\n slcMB = ds_mb[slc, :, :] if h5path_int else None\n datatypes = get_prop_datatypes(slcMM, map_propnames, slcMB)\n for i, propname in enumerate(map_propnames):\n root = outputfile.split('.h5')[0]\n nppath = '{}_{}.npy'.format(root, propname)\n outarray = np.array(fws_reduced[:, i], dtype=datatypes[i])\n np.save(nppath, outarray)\n\n # close and return\n h5file_mm.close()\n if h5path_int:\n h5file_mb.close()\n\n if mpi_info['rank'] == 0:\n return outarray", "def subregionIntersection(highres,lowres,regions):\n \n hr = nb.load(highres)\n hr_data = hr.darrays[0].data\n \n lr = nb.load(lowres)\n \n lr_indices = regionIndices(lr,regions)\n \n hr_regions = list(set(hr_data[lr_indices]))\n \n return hr_regions", "def subamostragem(imagem, r):\n\n lx, ly = imagem.shape\n imagem_sub = np.zeros([int(lx/r), int(ly/r)])\n for i in np.arange(r-1, lx, r):\n for j in np.arange(r-1, ly, r):\n imagem_sub[int(i/r), int(j/r)] = imagem[i, j]\n\n return imagem_sub", "def slices_to_unit_indices(self, slices):\n # remove dimension for channel\n slices = slices[-len(self.chunk_shape):]\n return itertools.product(\n *[\n range(\n # set start 0 if slice begins in first overlap area to prevent negative index\n # otherwise take floor div for the number of strides from bound start - offset of overlap\n 0 if sl.start is None or sl.start < b.start + o else (sl.start - b.start - o) // s,\n # set end to chunks if slice ends in the last overlap area to prevent index > chunks\n # otherwise take ceil div for the number of strides from start (no offset needed)\n chunks if sl.stop is None or sl.stop >= b.stop - o else math.ceil((sl.stop - b.start) / s)\n ) for b, s, chunks, sl, o in zip(self.bounds, self.strides, self.num_chunks, slices, self.overlap)\n ]\n )", "def mapping_matrix_from_sub_mask_1d_index_to_pixelization_1d_index(\n sub_mask_1d_index_to_pixelization_1d_index,\n pixels,\n total_mask_pixels,\n sub_mask_1d_index_to_mask_1d_index,\n sub_fraction,\n):\n\n mapping_matrix = np.zeros((total_mask_pixels, pixels))\n\n for sub_mask_1d_index in range(sub_mask_1d_index_to_mask_1d_index.shape[0]):\n mapping_matrix[\n sub_mask_1d_index_to_mask_1d_index[sub_mask_1d_index],\n sub_mask_1d_index_to_pixelization_1d_index[sub_mask_1d_index],\n ] += sub_fraction\n\n return mapping_matrix", "def kd_domain_split(counts_all, ndomains, log=null_log):\n\n split_fac = 1.35 * (float(ndomains)/np.cumprod(counts_all.shape)[-1])**(1.0/3.0)\n print('split factor', split_fac, file=log)\n # First translate the box so 0,0,0 in best posn to minimise communication\n total_shifts = []\n for axis in range(3):\n # Sum over other axes\n sum_axes = list(np.arange(len(counts_all.shape)))\n sum_axes.pop(axis)\n sum_axes = tuple(sum_axes)\n\n count_ax = counts_all.sum(axis=sum_axes, dtype=np.int64)\n # amount communicated per plane\n comm = count_ax + np.roll(count_ax, 1)\n\n total_shifts.append(np.argmin(comm))\n\n\n for axis, r in enumerate(total_shifts):\n counts_all = np.roll(counts_all, shift=-r, axis=axis)\n\n print('Best shifts', total_shifts, file=log)\n\n\n # pad\n counts_pad = np.empty(tuple(v+2 for v in counts_all.shape), dtype=counts_all.dtype)\n counts_pad[1:-1,1:-1,1:-1] = counts_all\n counts_pad[1:-1,1:-1,0] = counts_pad[1:-1,1:-1, -2]\n counts_pad[1:-1,1:-1,-1] = counts_pad[1:-1,1:-1,1]\n counts_pad[1:-1,0] = counts_pad[1:-1, -2]\n counts_pad[1:-1,-1] = counts_pad[1:-1, 1]\n counts_pad[0] = counts_pad[-2]\n counts_pad[-1] = counts_pad[1]\n\n\n domain_segments = []\n\n doms_tosplit = [((0,0,0), counts_pad, ndomains)]\n\n while len(doms_tosplit):\n dom_topleft, counts, ndom = doms_tosplit.pop(0)\n\n if ndom==1:\n # done\n dom_shape = tuple(v-2 for v in counts.shape)\n domain_segments.append((dom_topleft, dom_shape, counts.sum(dtype=np.uint64)))\n continue\n\n # Bisect this domain \n axis, split_idx, n_L = bisect_anyaxis(counts, ndom, split_fac)\n\n n_R = ndom-n_L\n\n if axis==0:\n counts_L, counts_R = counts[:split_idx+2], counts[split_idx:]\n elif axis==1:\n counts_L, counts_R = counts[:,:split_idx+2], counts[:,split_idx:] \n elif axis==2:\n counts_L, counts_R = counts[:,:,:split_idx+2], counts[:,:,split_idx:]\n else:\n raise Exception('3d only, aaargh.')\n\n # add left and right domains\n doms_tosplit.append((dom_topleft, counts_L, n_L))\n\n # top left of right domain\n dom_R_topleft = list(dom_topleft)\n dom_R_topleft[axis] += split_idx\n dom_R_topleft = tuple(dom_R_topleft)\n\n doms_tosplit.append((dom_R_topleft, counts_R, n_R))\n\n\n # sort domains biggest->smallest\n domain_segments = sorted(domain_segments, key=lambda ijk_shape_pts:-ijk_shape_pts[2])\n\n doms = np.empty(counts_all.shape, dtype=np.int16)\n\n for d,(ijk, shape, tot_pts) in enumerate(domain_segments):\n segment = tuple(slice(i,i+size) for i,size in zip(ijk, shape))\n doms[segment] = d+1\n real_pts = counts_all[segment].sum(dtype=np.int64)\n# print('domain', d, 'shape', shape, '{:,} pts, {:,} total'.format(real_pts, tot_pts), file=log)\n\n # Undo the total shifts\n for axis, r in enumerate(total_shifts):\n doms = np.roll(doms, shift=r, axis=axis)\n \n return doms", "def GetSubContoursByFrame(watershed, allValsByFrame):\n scListByFrame = []\n for frame in range(len(watershed)):\n scList = []\n for v in allValsByFrame[frame]:\n boundingRect = ImageContour.GetBoundingRect(watershed[frame], v)\n # No longer needed: #contour,turns,vals = ImageContour.GetContour(watershed[0],v,boundingRect=boundingRect,byNeighbor=True)\n (\n perimeterVals,\n perimeterList,\n scPoints,\n ) = ImageContour.GetPerimeterByNeighborVal(\n watershed[frame], v, boundingRect=boundingRect, getSubContours=True\n )\n scPointsAdj = [\n (np.array(scp) + [boundingRect[0][0], boundingRect[1][0]]).tolist()\n for scp in scPoints\n ] # Will need to - 0.5 to line up on an overlay\n if len(perimeterList) > 0:\n scList += [\n SubContour(\n points=scPointsAdj[i],\n numPoints=len(scPointsAdj[i]),\n adjusted_length=perimeterList[i],\n values=tuple(sorted([v, perimeterVals[i]])),\n startPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][0]\n ),\n endPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][-1]\n ),\n )\n for i in range(len(perimeterVals))\n ]\n scList.sort(key=lambda x: x.values)\n for i in range(len(scList) - 1, 0, -1):\n # if 2 subcoutours are the same, keep only the one with the minimum length computation\n if scList[i - 1].values == scList[i].values:\n scList[i - 1].adjusted_length = min(\n scList[i - 1].adjusted_length, scList[i].adjusted_length\n )\n del scList[i]\n scListByFrame.append(scList)\n return scListByFrame", "def generate_map(nrows, ncols, nrooms, max_col_size, max_row_size):\n arr = np.zeros((nrows, ncols), dtype=np.int8)\n\n for i in range(nrooms):\n rand_row_start = np.random.randint(nrows)\n rand_col_start = np.random.randint(ncols)\n\n rand_row_size = np.random.randint(max_row_size / 2, max_row_size)\n rand_col_size = np.random.randint(max_col_size / 2, max_col_size)\n\n arr[rand_row_start:rand_row_start + rand_row_size, rand_col_start:rand_col_start + rand_col_size] = 1\n\n labels = measure.label(arr)\n regions = measure.regionprops(labels)\n\n centroids = list()\n for region in regions:\n centroids.append(region.centroid)\n\n num_centroids = len(centroids)\n\n # get distances between every pair of centroids\n dists = scipy.spatial.distance.cdist(centroids, centroids)\n\n # get a distance that is greater than all current distances\n max_dist = np.max(dists) + 1\n\n # make sure upper triangle is at least max_dist so that when picking closest\n # pairs, we won't choose a diagonal element or a duplicate connection\n dists = dists + np.triu(np.ones((num_centroids, num_centroids))) * max_dist\n\n for i in range(num_centroids - 1):\n min_dist_idx = np.argmin(dists)\n min_dist_idx = np.unravel_index(min_dist_idx, dists.shape)\n\n # create a hallway between regionprops\n centroid1 = np.array(centroids[min_dist_idx[0]], dtype=np.int)\n centroid2 = np.array(centroids[min_dist_idx[1]], dtype=np.int)\n\n [row_centroid_1, row_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[0])\n [col_centroid_1, col_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[1])\n\n arr[row_centroid_1[0]:row_centroid_2[0] + 1, row_centroid_1[1]] = 1\n arr[row_centroid_2[0], col_centroid_1[1]:col_centroid_2[1] + 1] = 1\n\n dists[:, min_dist_idx[1]] += max_dist\n\n return arr", "def planeSliceGFig3(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n # print(bound)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n # print(upxvecs)\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n cbar.set_label('G', fontsize=18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'red', label = r\"$2^{nd}$ order GO gain\")\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax1.set_ylabel('G', fontsize = 18)\n # ax1.set_title(\"Slice Gain\")\n ax1.tick_params(labelsize = 14)\n ax1.grid()\n ax1.legend(loc = 1, fontsize = 14)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n grid.tight_layout(fig, pad = 1.5)\n plt.show()\n return", "def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)", "def split_combined_polys(polys, poly_lens, polys_per_mask):\n mask_polys_list = []\n for img_id in range(len(polys)):\n polys_single = polys[img_id]\n polys_lens_single = poly_lens[img_id].tolist()\n polys_per_mask_single = polys_per_mask[img_id].tolist()\n\n split_polys = mmcv.slice_list(polys_single, polys_lens_single)\n mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)\n mask_polys_list.append(mask_polys)\n return mask_polys_list", "def slices(left, right, key, how='inner'):\n threshold = (((psutil.virtual_memory().available / 1024) / 1024) / 1024) // PartitionRunner.MAX_THRESHOLD\n slices = 1\n memory = 0\n while True:\n left_slice = left[:len(left.index) // slices]\n right_slice = right[:len(right.index) // slices]\n rows = PartitionRunner.merge_size(left_slice, right_slice, 'chromosome', how=how)\n cols = len(left.columns) + len(right.columns) - (len(key) if isinstance(key, list) else 1)\n memory = ((((rows * cols * np.dtype(np.float64).itemsize) / 1024) / 1024) / 1024)\n if memory < threshold:\n break\n slices += 1\n\n return slices", "def add_subdivision(self):\n temp_sub_vertices = []\n for plane in (self.subdivision_list):\n current_mids = []\n mid_m_01 = Vec3d(0, 0, 0, 0)\n mid_m_12 = Vec3d(0, 0, 0, 0)\n mid_m_20 = Vec3d(0, 0, 0, 0)\n\n mid_m_01.x = (plane[0].x + plane[1].x) / 2\n mid_m_01.y = (plane[0].y + plane[1].y) / 2\n mid_m_01.z = (plane[0].z + plane[1].z) / 2\n mid_m_01.w = plane[0].w\n\n mid_m_12.x = (plane[1].x + plane[2].x) / 2\n mid_m_12.y = (plane[1].y + plane[2].y) / 2\n mid_m_12.z = (plane[1].z + plane[2].z) / 2\n mid_m_12.w = plane[1].w\n\n mid_m_20.x = (plane[2].x + plane[0].x) / 2\n mid_m_20.y = (plane[2].y + plane[0].y) / 2\n mid_m_20.z = (plane[2].z + plane[0].z) / 2\n mid_m_20.w = plane[2].w\n\n current_mids = [mid_m_01, mid_m_12, mid_m_20]\n temp_sub_vertices.append(current_mids)\n\n for index in range(len(current_mids)):\n v0 = Vec3d(0, 0, 0, 0)\n v1 = Vec3d(0, 0, 0, 0)\n v2 = Vec3d(0, 0, 0, 0)\n\n v0.x = plane[index].x\n v0.y = plane[index].y\n v0.z = plane[index].z\n\n v1.x = current_mids[index].x\n v1.y = current_mids[index].y\n v1.z = current_mids[index].z\n\n v2.x = current_mids[index - 1].x\n v2.y = current_mids[index - 1].y\n v2.z = current_mids[index - 1].z\n\n temp_sub_vertices.append([v0, v1, v2])\n\n self.subdivision_list = temp_sub_vertices", "def calculate_contours_fit(L_x, L_y, e, leaflet, ts, Plots, side):\n \n n = np.load(input_dir + 'directors_'+leaflet+'_tail_'+ str(ts) + '.npy') \n\n pos = np.load(input_dir + 'coordinates_'+leaflet+'_tail_' + str(ts) + '.npy') \n\n resid = np.load(input_dir + 'residues_'+leaflet+'_tail_' + str(ts) + '.npy')\n box = np.load(input_dir + 'box' + str(ts) + '.npy')\n\n \n chl = np.load(input_dir + 'cholesterol_'+leaflet+'_tail_' + str(ts) + '.npy')\n dlipc = np.load(input_dir + 'dlipc_'+leaflet+'_tail_' + str(ts) + '.npy') \n dspc = np.load(input_dir + 'dspc_'+leaflet+'_tail_' + str(ts) + '.npy')\n ssm = np.load(input_dir + 'ssm_'+leaflet+'_tail_' + str(ts) + '.npy')\n \n #n= np.ones(len(pos))\n \"\"\" END: read the input data \"\"\"\n\n\n field = order_vector_field(L_x, L_y, pos, n, e, box)\n\n c = pd.DataFrame(data=field).mean(axis=0).rolling(50, center=True, min_periods=1).mean() #50\n c.dropna(inplace=True)\n middle = 0.5*(np.max(c) + np.min(c)) \n #middle = 0.025\n contours = measure.find_contours(field, middle) # Marching Cubes algorith\n #save contours\n fac_x = box[0] / L_x #to get the right dimensions (range_x)\n fac_y = box[1] / L_y # (range_y)\n \n contours_x = []\n contours_y = []\n contours_x_y = []\n \n contours_all = []\n for m, contour in enumerate(contours):\n contours_x.append((contour[:, 1] * fac_x))\n contours_y.append((contour[:, 0] * fac_y))\n \n \n contours_x_y = np.column_stack((contours_x[m], contours_y[m]))\n contours_all.append(contours_x_y)\n np.save(output_contours + 'contours_'+leaflet+'.' + str(ts) + '.npy', contours_all)\n \n\n#===================================================\n#To assign resids to the different phases\n phase_belonging = np.zeros((len(pos)))\n ordered =[]\n disordered = []\n for i in np.arange(len(pos)):\n \n def apply_pbc(pos, box):\n if pos >= box:\n pos -= box\n if pos < 0:\n pos += box\n return pos\n \n idx_x = int(apply_pbc(pos[i,0], box[0]) / fac_x - 1.e-5) #the - 1.e-5 is because accuracy issue in the /\n idx_y = int(apply_pbc(pos[i,1], box[1]) / fac_y - 1.e-5) #this - 1.e-5 is because accuracy issue in the /\n #print(idx_x, idx_y)\n order= field[idx_y, idx_x]\n if (order > middle):\n ordered.append(order)\n order = 1 #ordered lipids\n \n else :\n disordered.append(order)\n order =0 #disordered lipids\n phase_belonging[i] = order\n \n\n resid_phases = np.column_stack((resid[:,0], phase_belonging))\n np.save(output_dir + 'resid_phases'+leaflet+'.'+ str(j) + '.npy', resid_phases)\n\n if Plots == True:\n plt.figure(figsize=(15,10)) \n \n contours_sorted = sorted(contours, key=len, reverse=True)\n \n for i in range(2):\n plt.plot(contours_sorted[i][:,1]* fac_x+0.5*fac_x, contours_sorted[i][:,0]* fac_y+0.5*fac_y, linewidth=3, color='#0000FF' ) ##00CC00\n \n #for m, contour in enumerate(contours_sorted):\n # print(contour[:,0])\n # for contour in contours: \n \n # plt.plot((contour[:, 1] * fac_x+0.5*fac_x),\n # (contour[:, 0] * fac_y+0.5*fac_y),\n # linewidth=4, color='#00CC00')\n \n plt.imshow(field, interpolation='nearest', \n cmap=plt.cm.gray_r,\n extent=[0, box[0], 0, box[1]], origin='lower', alpha=0.7) \n \n plt.axis('off')\n plot_scatter_order_field(pos, resid, dlipc, dspc, chl,ssm, n , box, ts, side) #phase_belonging.reshape(-1,1)\n plt.savefig(output_dir + 'contours-'+ leaflet + str(ts) + '.png', dpi=300) \n plt.close() \n \n return resid_phases #, ordered, disordered ", "def get_slices_indices(nii, axis, count):\n assert axis >= 0 and axis < 3, \"Invalid axis\"\n # roll the nifti so the dimension of interest is at the frontend\n nii = np.rollaxis(nii, axis)\n count = count + 2\n valid_slices = np.where([r.any() for r in nii])[0]\n step = int(np.ceil(len(valid_slices) / float(count)))\n slices = [valid_slices[idx] for idx in range(0, len(valid_slices), step)]\n slices = slices[1:-1]\n return(slices)", "def subimage_generator(image, patch_block_size, numberxy, numberz):\n width = np.shape(image)[1]\n height = np.shape(image)[2]\n imagez = np.shape(image)[0]\n block_width = np.array(patch_block_size)[1]\n block_height = np.array(patch_block_size)[2]\n blockz = np.array(patch_block_size)[0]\n\n stridewidth = (width - block_width) // (numberxy - 1)\n strideheight = (height - block_height) // (numberxy - 1)\n stridez = (imagez - blockz) // numberz\n # step 1:if image size of z is smaller than blockz,return zeros samples\n if imagez < blockz:\n nb_sub_images = numberxy * numberxy * 1\n hr_samples = np.zeros(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n return hr_samples\n # step 2:if stridez is bigger 1,return numberxy * numberxy * numberz samples\n if stridez >= 1:\n nb_sub_images = numberxy * numberxy * numberz\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for z in range(0, numberz * stridez, stridez):\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[z:z + blockz, x:x + block_width, y:y + block_height]\n indx += 1\n\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n return hr_samples\n\n # step3: if stridez==imagez,return numberxy * numberxy * 1 samples,one is [0:blockz,:,:]\n if imagez == blockz:\n nb_sub_images = numberxy * numberxy * 1\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[:, x:x + block_width, y:y + block_height]\n indx += 1\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n print(indx)\n print(nb_sub_images)\n return hr_samples\n # step4: if stridez==0,return numberxy * numberxy * 2 samples,one is [0:blockz,:,:],two is [-blockz-1:-1,:,:]\n if stridez == 0:\n nb_sub_images = numberxy * numberxy * 2\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[0:blockz, x:x + block_width, y:y + block_height]\n indx += 1\n hr_samples[indx, :, :, :] = image[-blockz - 1:-1, x:x + block_width, y:y + block_height]\n indx += 1\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n return hr_samples", "def plot_pcs_slice_sub(self,data_in,large_slice,plot_slice,\n indiv=0,color_array=None,sz=8):\n fig = plt.figure(figsize=(sz,6))\n gs = GridSpec(sz+len(self.states_list),1)\n feature_ax = plt.subplot(gs[:sz,:])\n stateseq_ax = plt.subplot(gs[sz+1])\n\n if color_array is None:\n color_array = self._get_colors()\n\n r_plot_slice = list(map(lambda x: large_slice[0] + x, plot_slice))\n z, perm = relabel_model_z(self,index=indiv)\n z = z[r_plot_slice]\n stateseq_norep, durations = rle(z)\n\n max_ = ceil(data_in.max()-data_in.min()) +1\n data_in=data_in[:,plot_slice]\n ttime = np.arange(data_in.shape[1])\n for ii in range(0,data_in.shape[0]):\n feature_ax.plot(ttime,data_in[ii,:] + ii*max_,'k')\n\n feature_ax.set_xlim((0,len(plot_slice)))\n feature_ax.set_ylim((data_in.min()-1,data_in.shape[0]*max_-1))\n feature_ax.set_yticks([])\n feature_ax.set_xticks([])\n\n stateseq_ax.imshow(z[:,np.newaxis].T,aspect='auto',\n cmap=ListedColormap(color_array),vmin=0,vmax=len(perm))\n stateseq_ax.set_yticks([])\n stateseq_ax.set_xticks([])\n\n for ii, pos in enumerate(durations.cumsum()):\n if durations[ii] >=1:\n feature_ax.axvline(pos,\n color=color_array[stateseq_norep[ii]],\n linestyle=':')\n return", "def locate_slice(slice_idx, height, overlap_metadata):\n g_nrow = overlap_metadata.shape[0] + 1\n side = overlap_metadata[0, 0, 1]\n overlap_list = overlap_metadata[:, 0, 0]\n if side == 1:\n list_slices = [(np.arange(i * height, i * height + height) -\n np.sum(overlap_list[0: i])) for i in range(g_nrow)]\n else:\n list_slices = [\n (np.arange(i * height + height - 1, i * height - 1, -1) -\n np.sum(overlap_list[0: i])) for i in range(g_nrow)]\n list_slices = np.asarray(list_slices)\n results = []\n for i, list1 in enumerate(list_slices):\n pos = np.squeeze(np.where(list1 == slice_idx)[0])\n if pos.size == 1:\n results.append([i, pos, 1.0])\n if len(results) == 2:\n if side == 1:\n results[0][2] = (1.0 * list_slices[results[0][0]][\n -1] - slice_idx) / (overlap_list[results[0][0]] - 1.0)\n results[1][2] = (slice_idx - 1.0 * list_slices[results[1][0]][\n 0]) / (overlap_list[results[0][0]] - 1.0)\n else:\n results[0][2] = (- slice_idx + 1.0 * list_slices[results[0][0]][\n 0]) / (overlap_list[results[0][0]] - 1.0)\n results[1][2] = (-1.0 * list_slices[results[1][0]][\n -1] + slice_idx) / (overlap_list[results[0][0]] - 1.0)\n return results", "def test_getitem_slice(self):\n random.seed(12345)\n\n nside_coverage = 32\n nside_map = 128\n\n full_map = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN\n full_map[0: 5000] = random.random(size=5000)\n\n sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)\n\n # Test in-range, overlap, out-of-range\n testing.assert_array_almost_equal(sparse_map[100: 500], full_map[100: 500])\n testing.assert_array_almost_equal(sparse_map[4500: 5500], full_map[4500: 5500])\n testing.assert_array_almost_equal(sparse_map[5500: 5600], full_map[5500: 5600])\n\n # Test stepped\n testing.assert_array_almost_equal(sparse_map[100: 500: 2], full_map[100: 500: 2])\n testing.assert_array_almost_equal(sparse_map[4500: 5500: 2], full_map[4500: 5500: 2])\n testing.assert_array_almost_equal(sparse_map[5500: 5600: 2], full_map[5500: 5600: 2])\n\n # Test all\n testing.assert_array_almost_equal(sparse_map[:], full_map[:])", "def get_sample(self, n, side_len):\n self.side_len = side_len\n side_len_scaled = self.side_len * self.resolution_scaler\n\n print(self.side_len, side_len_scaled, self.resolution_scaler)\n tiles = []\n center_coords = []\n\n # get random starting positions \n rand_rows = np.random.randint(self.src.shape[0]-side_len_scaled , size=(n,))\n rand_cols = np.random.randint(self.src.shape[1]-side_len_scaled, size=(n,))\n\n for row_idx, col_idx in zip(rand_rows, rand_cols):\n # get clip\n clip = self.get_window(row_idx, col_idx, side_len_scaled)\n\n # handle non-square clips\n if clip.shape[1] != side_len_scaled or clip.shape[2] != side_len_scaled:\n pad = np.full((3, side_len_scaled, side_len_scaled), self.pad_val)\n pad[:, 0:clip.shape[1], 0:clip.shape[2]] = clip\n clip = pad.copy()\n\n if self.resolution_scaler != 1:\n # note that we use the unscaled side length to ensure numpy arrays\n # have the same dimensionality\n clip = resize(clip, (3, self.side_len, self.side_len))\n \n print(clip.shape)\n\n tiles.append(clip)\n\n # get center lat/lon\n lat, lon = self.get_latlon_point(row_idx + side_len_scaled // 2, col_idx + side_len_scaled // 2)\n center_coords.append([lat, lon])\n\n return tiles, center_coords", "def beam_slices(map_file, fee_map, nside):\n\n t_name, r_name, _, _ = Path(map_file).stem.split(\"_\")\n\n pointings = [\"0\", \"2\", \"4\"]\n\n maps = []\n\n # load data from map .npz file\n tile_map = np.load(map_file, allow_pickle=True)\n fee_m = np.load(fee_map, allow_pickle=True)\n\n for p in pointings:\n\n tile = tile_map[p]\n\n if \"XX\" in t_name:\n fee = fee_m[p][0]\n else:\n fee = fee_m[p][1]\n\n # rotate maps so slices can be taken\n fee_r = rotate_map(nside, angle=-np.pi / 4, healpix_array=fee)\n tile_r = rotate_map(nside, angle=-np.pi / 4, healpix_array=tile)\n\n # fee_r[PB_0] = np.nan\n # tile_r[PB_0] = np.nan\n\n # slice the tile and fee maps along NS, EW\n # zenith angle thresh of 70 to determine fit gain factor\n NS_f, EW_f = healpix_cardinal_slices(nside, fee_r, 70)\n NS_t, EW_t = map_slices(nside, tile_r, 70)\n\n gain_NS = chisq_fit_gain(data=NS_t[0], model=NS_f[0])\n gain_EW = chisq_fit_gain(data=EW_t[0], model=EW_f[0])\n\n # slice the tile and fee maps along NS, EW.\n # the above gain factor is applied to full beam slices\n NS_fee, EW_fee = healpix_cardinal_slices(nside, fee_r, 85)\n NS_tile, EW_tile = map_slices(nside, tile_r, 85)\n\n # Scale the data so that it best fits the beam slice\n NS_tile_med = NS_tile[0] - gain_NS[0]\n EW_tile_med = EW_tile[0] - gain_EW[0]\n\n # delta powers\n del_NS = NS_tile_med - NS_fee[0]\n del_EW = EW_tile_med - EW_fee[0]\n\n # 3rd order poly fits for residuals\n fit_NS = poly_fit(NS_tile[2], del_NS, NS_tile[0], 3)\n fit_EW = poly_fit(EW_tile[2], del_EW, EW_tile[0], 3)\n\n maps.append(\n [\n [NS_tile, NS_fee, NS_tile_med, del_NS, fit_NS],\n [EW_tile, EW_fee, EW_tile_med, del_EW, fit_EW],\n ]\n )\n\n return maps", "def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]", "def __init__(self, nside=128, lonCol ='fieldRA',\n latCol='fieldDec', latLonDeg=True, verbose=True, badval=hp.UNSEEN,\n useCache=True, leafsize=100, radius=1.75,\n useCamera=False, rotSkyPosColName='rotSkyPos',\n mjdColName='observationStartMJD', chipNames='all'):\n super(HealpixSlicer, self).__init__(verbose=verbose,\n lonCol=lonCol, latCol=latCol,\n badval=badval, radius=radius, leafsize=leafsize,\n useCamera=useCamera, rotSkyPosColName=rotSkyPosColName,\n mjdColName=mjdColName, chipNames=chipNames, latLonDeg=latLonDeg)\n # Valid values of nside are powers of 2.\n # nside=64 gives about 1 deg resolution\n # nside=256 gives about 13' resolution (~1 CCD)\n # nside=1024 gives about 3' resolution\n # Check validity of nside:\n if not(hp.isnsideok(nside)):\n raise ValueError('Valid values of nside are powers of 2.')\n self.nside = int(nside)\n self.pixArea = hp.nside2pixarea(self.nside)\n self.nslice = hp.nside2npix(self.nside)\n self.spatialExtent = [0, self.nslice-1]\n self.shape = self.nslice\n if self.verbose:\n print('Healpix slicer using NSIDE=%d, ' % (self.nside) + \\\n 'approximate resolution %f arcminutes' % (hp.nside2resol(self.nside, arcmin=True)))\n # Set variables so slicer can be re-constructed\n self.slicer_init = {'nside': nside, 'lonCol': lonCol, 'latCol': latCol,\n 'radius': radius}\n self.useCache = useCache\n if useCache:\n # useCache set the size of the cache for the memoize function in sliceMetric.\n binRes = hp.nside2resol(nside) # Pixel size in radians\n # Set the cache size to be ~2x the circumference\n self.cacheSize = int(np.round(4.*np.pi/binRes))\n # Set up slicePoint metadata.\n self.slicePoints['nside'] = nside\n self.slicePoints['sid'] = np.arange(self.nslice)\n self.slicePoints['ra'], self.slicePoints['dec'] = self._pix2radec(self.slicePoints['sid'])\n # Set the default plotting functions.\n self.plotFuncs = [HealpixSkyMap, HealpixHistogram, HealpixPowerSpectrum]", "def makeSubapMap():\n a=numpy.zeros((sum(nsub),),numpy.int32)\n subFlag=subapFlag.copy()\n for i in range(NNGSCAM+NLGSOCAM+NBOBCAT):#ngs 1-3, truth, lgs, lofs, hofs\n tmp=subFlag[nsub[:i].sum():nsub[:i+1].sum()]\n tmp.shape=nsuby[i],nsubx[i]\n if i==NNGSCAM+NLGSOCAM:#lofs\n tmp[:]=sfNoObs*(i+1)\n elif i==1+NNGSCAM+NLGSOCAM:#hofs\n tmp[:]=sf14NoObs*(i+1)\n elif i==NNGSCAM:#lgs\n for j in range(4):\n jj=6-j\n tmp[j*2]=individualSubapFlag[jj]*(i+1)\n if j!=3:\n tmp[j*2+1]=individualSubapFlag[j]*(i+1)\n #jj=7-j\n #if jj<7:\n # tmp[j*2-1]=individualSubapFlag[jj]*(i+1)\n #tmp[j*2]=individualSubapFlag[j]*(i+1)\n else:\n tmp[:]=individualSubapFlag*(i+1)\n return subFlag", "def instance_submasks(gti):\n rc_locs = np.where(gti > 0)\n grouped_cc_rcs = util.group_items(\n np.ascontiguousarray(np.vstack(rc_locs).T),\n gti[rc_locs], axis=0\n )\n\n def bounding_box(rcs):\n rc1 = rcs.min(axis=0)\n rc2 = rcs.max(axis=0)\n return rc1, rc2\n\n for label, rcs in grouped_cc_rcs.items():\n rc1, rc2 = bounding_box(rcs)\n r_slice = slice(rc1[0], rc2[0] + 1)\n c_slice = slice(rc1[1], rc2[1] + 1)\n rc_sl = (r_slice, c_slice)\n subimg = gti[rc_sl]\n submask = (subimg == label).astype(np.uint8)\n\n rc_off = rc1\n yield label, submask, rc_off, rc_sl", "def contours_to_patient_coord_sys_and_points_to_skull_axial_axis(datasets, series_arr):\n mean_points_real = [0, 0, 0] # to storage points on the skull axis line (healthy slices)\n contours_list = [None] * series_arr.shape[2] # list of all contours of all slices\n contours_mean_point_list = [None] * series_arr.shape[2] # list of all mean points of contours of interest\n rotation_info_list = [] # to storage rotation info found by the icp\n # Converts all contours for patient coordinate space based on DICOM tag information\n for i in range(series_arr.shape[2]):\n img = series_arr[:, :, i]\n # Collecting image information\n img_orient_pat = [float(x) for x in list(datasets[i].ImageOrientationPatient)]\n img_position_pat = [float(x) for x in list(datasets[i].ImagePositionPatient)]\n pixel_spacing = [float(x) for x in list(datasets[i].PixelSpacing)]\n iop1 = np.array(img_orient_pat[0:3])\n iop2 = np.array(img_orient_pat[3:6])\n # Finding contours\n [cw, pma] = select_contours(img)\n # Setting which one is the internal / external contour (internal=[0], external=[1]) when needed\n if len(pma) == 2:\n contour_0_len = len(cw[0])\n contour_1_len = len(cw[1])\n if contour_0_len >= contour_1_len:\n cw[0], cw[1] = cw[1], cw[0]\n cw_real = copy.copy(cw)\n # Coordinate system conversion for all contours\n for contour in cw_real:\n for k in range(len(contour)):\n contour[k] = img_position_pat \\\n + iop1 * pixel_spacing[1] * contour[k][0] \\\n + iop2 * pixel_spacing[0] * contour[k][1]\n contours_list[i] = cw_real\n # Collecting points to skull axial axis and lateral symmetry calculation\n if len(pma) == 2: # healthy skull slice has outside and inside contours (pixel_mean_array has 2 points)\n # uses the mean point of the external contour (contours are approx. concentric)\n pixel_mean_real = img_position_pat \\\n + iop1 * pixel_spacing[1] * pma[1][0] \\\n + iop2 * pixel_spacing[0] * pma[1][1]\n contours_mean_point_list[i] = pixel_mean_real\n mean_points_real = np.vstack([mean_points_real, pixel_mean_real])\n # Lateral symmetry\n # external_contour_mirrored = mirror_contour_point(cw_real[1][:, 0:2], pixel_mean_real[0:2])\n # T = icp_wrap(cw_real[1][:, 0:2], external_contour_mirrored, debug=True)\n # rotation_info_list.append(T)\n\n return contours_list", "def check_pieces_3_variables_2d(self, generator, slice_tuple , concatenate_size):\n\n # Unpack the start and stop for each dimension from the slices\n\n # we need to keep track of the indices in the vertical dimension only, since the\n # concatenated blocks returned by the hdf_array_iterator are always going to be\n # (whole) valid matrices...\n # i.e. it will never return, [[1,2,3,4],[5,6,7,8],[9,10]], since that is not a matrix\n # instead it will chop it off the the form that is a valid matrix:\n # [[1,2,3,4],[5,6,7,8]]\n\n # therefore, we will also update the concatenate_size so that it is perfectly divisible by the\n # the number of entries in teh x dimension\n\n\n # calculate the vertical_start, vertical_stop indices in the y dimension... thats the only dimension we need to keep\n # track of because of the above mentioned reason\n\n print (concatenate_size)\n\n num_entries_x = slice_tuple[1].stop - slice_tuple[1].start\n\n vertical_start, vertical_end = (slice_tuple[0].start, slice_tuple[0].stop)\n vertical_stop = min(vertical_start + concatenate_size / num_entries_x, vertical_end)\n\n\n # since the hdf_array_iterator only provides complete valid matrices, we update\n # the concatenate_size being actually used\n\n concatenate_size = concatenate_size - concatenate_size % num_entries_x\n\n print (\"vertical_start: %s, vertical_stop: %s, concatenate_size: %s\" % (vertical_start,vertical_stop, concatenate_size))\n print (\"vertical_end: %s\" % vertical_end)\n print (\"num_entries_x: %s\" % num_entries_x)\n\n\n while vertical_stop < vertical_end:\n\n out = generator.next()\n\n y_stop = min(vertical_stop, vertical_end)\n\n truth1 = out['temperature']['values'] == self.t_result[vertical_start: y_stop , : min(concatenate_size, num_entries_x)]\n truth2 = out['salinity']['values'] == self.s_result[vertical_start: y_stop, : min(concatenate_size, num_entries_x)]\n truth3 = out['pressure']['values'] == self.p_result[vertical_start: y_stop, : min(concatenate_size, num_entries_x)]\n\n print (\"checking here!\")\n print (\"%s==%s\" % (out['temperature']['values'], self.t_result[vertical_start:vertical_stop, : min(concatenate_size, num_entries_x)]))\n self.assertTrue(truth1.all())\n self.assertTrue(truth2.all())\n self.assertTrue(truth3.all())\n\n vertical_start = vertical_stop\n vertical_stop += concatenate_size / num_entries_x", "def iterate_over_celestial_slices(array_in, array_out, wcs):\n\n # First put lng/lat as first two dimensions in WCS/last two in Numpy\n if wcs.wcs.lng == 0 and wcs.wcs.lat == 1:\n array_in_view = array_in\n array_out_view = array_out\n elif wcs.wcs.lng == 1 and wcs.wcs.lat == 0:\n array_in_view = array_in.swapaxes(-1, -2)\n array_out_view = array_out.swapaxes(-1, -2)\n else:\n array_in_view = array_in.swapaxes(-2, -1 - wcs.wcs.lat).swapaxes(-1, -1 - wcs.wcs.lng)\n array_out_view = array_out.swapaxes(-2, -1 - wcs.wcs.lat).swapaxes(-1, -1 - wcs.wcs.lng)\n\n # Flatten remaining dimensions to make it easier to loop over\n from operator import mul\n\n nx_in = array_in_view.shape[-1]\n ny_in = array_in_view.shape[-2]\n n_remaining_in = reduce(mul, array_in_view.shape, 1) // nx_in // ny_in\n\n nx_out = array_out_view.shape[-1]\n ny_out = array_out_view.shape[-2]\n n_remaining_out = reduce(mul, array_out_view.shape, 1) // nx_out // ny_out\n\n if n_remaining_in != n_remaining_out:\n raise ValueError(\"Number of non-celestial elements should match\")\n\n array_in_view = array_in_view.reshape(n_remaining_in, ny_in, nx_in)\n array_out_view = array_out_view.reshape(n_remaining_out, ny_out, nx_out)\n\n for slice_index in range(n_remaining_in):\n yield array_in_view[slice_index], array_out_view[slice_index]", "def matPart(mat, rs, re, cs, ce):\n return [[matGet(mat,x,y) for y in range(cs,ce)] \\\n for x in range(rs,re)]", "def cluster_contours(device, img, roi_objects, nrow=1, ncol=1, debug=None):\n\n device += 1\n\n if len(np.shape(img)) == 3:\n iy, ix, iz = np.shape(img)\n else:\n iy, ix, = np.shape(img)\n\n # get the break groups\n\n if nrow == 1:\n rbreaks = [0, iy]\n else:\n rstep = np.rint(iy / nrow)\n rstep1 = np.int(rstep)\n rbreaks = range(0, iy, rstep1)\n if ncol == 1:\n cbreaks = [0, ix]\n else:\n cstep = np.rint(ix / ncol)\n cstep1 = np.int(cstep)\n cbreaks = range(0, ix, cstep1)\n\n # categorize what bin the center of mass of each contour\n\n def digitize(a, step):\n if isinstance(step, int) == True:\n i = step\n else:\n i = len(step)\n for x in range(0, i):\n if x == 0:\n if a >= 0 and a < step[x + 1]:\n return x + 1\n elif a >= step[x - 1] and a < step[x]:\n return x\n elif a > step[x - 1] and a > np.max(step):\n return i\n\n dtype = [('cx', int), ('cy', int), ('rowbin', int), ('colbin', int), ('index', int)]\n coord = []\n for i in range(0, len(roi_objects)):\n m = cv2.moments(roi_objects[i])\n if m['m00'] == 0:\n pass\n else:\n cx = int(m['m10'] / m['m00'])\n cy = int(m['m01'] / m['m00'])\n # colbin = np.digitize(cx, cbreaks)\n # rowbin = np.digitize(cy, rbreaks)\n colbin = digitize(cx, cbreaks)\n rowbin = digitize(cy, rbreaks)\n a = (cx, cy, colbin, rowbin, i)\n coord.append(a)\n coord1 = np.array(coord, dtype=dtype)\n coord2 = np.sort(coord1, order=('colbin', 'rowbin'))\n\n # get the list of unique coordinates and group the contours with the same bin coordinates\n\n groups = []\n for i, y in enumerate(coord2):\n col = y[3]\n row = y[2]\n location = str(row) + ',' + str(col)\n groups.append(location)\n\n unigroup = np.unique(groups)\n coordgroups = []\n\n for i, y in enumerate(unigroup):\n col = int(y[0])\n row = int(y[2])\n for a, b in enumerate(coord2):\n if b[2] == col and b[3] == row:\n grp = i\n contour = b[4]\n coordgroups.append((grp, contour))\n else:\n pass\n\n coordlist = [[y[1] for y in coordgroups if y[0] == x] for x in range(0, (len(unigroup)))]\n\n contours = roi_objects\n grouped_contour_indexes = coordlist\n\n # Debug image is rainbow printed contours\n\n if debug == 'print':\n if len(np.shape(img)) == 3:\n img_copy = np.copy(img)\n else:\n iy, ix = np.shape(img)\n img_copy = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n rand_color = color_palette(len(coordlist))\n for i, x in enumerate(coordlist):\n for a in x:\n cv2.drawContours(img_copy, roi_objects, a, rand_color[i], -1, lineType=8)\n print_image(img_copy, (str(device) + '_clusters.png'))\n\n elif debug == 'plot':\n if len(np.shape(img)) == 3:\n img_copy = np.copy(img)\n else:\n iy, ix = np.shape(img)\n img_copy = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n rand_color = color_palette(len(coordlist))\n for i, x in enumerate(coordlist):\n for a in x:\n cv2.drawContours(img_copy, roi_objects, a, rand_color[i], -1, lineType=8)\n plot_image(img_copy)\n\n return device, grouped_contour_indexes, contours", "def diagonals_in_hd():\n number_of_pairs = 100000\n angles_for_d = {}\n for d in (10, 100, 1000):\n number_of_corners = 2 ** d - 1\n first_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n second_corner = [random.randint(0, number_of_corners) for _ in range(0, number_of_pairs)]\n\n dummy_d = [d for _ in range(0, number_of_pairs)]\n angles = []\n with cf.ProcessPoolExecutor() as executor:\n results = executor.map(find_angle, first_corner, second_corner, dummy_d)\n for result in results:\n angles.append(result)\n ser = pd.Series(angles)\n print(f\"Angles between diagonals for {d} dimensions\")\n print(ser.describe())\n angles_for_d[d] = ser\n\n plot_pmfs_for_ds(angles_for_d)", "def base_idx_neighbor_idx_simplices(n_base, n_neighbors=5, n_dim=2):\n combinations = np.array(list(itertools.combinations(np.arange(1,\n n_neighbors),\n n_dim-1))).astype(int)\n base_indices = np.repeat(np.arange(n_base), len(combinations))\n all_simplices = np.vstack([base_indices,\n np.tile(combinations, (n_base, 1)).T]).T\n #print('simplices', os.getpid(), len(all_simplices), flush=True)\n return all_simplices", "def _project_loops(self):\n\n self._create_projection_datasets()\n self._get_sho_chunk_sizes(10)\n\n '''\n Loop over the FORCs\n '''\n for forc_chunk_index in range(self._num_forcs):\n pos_chunk_index = 0\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n dc_vec = self._get_dc_offset()\n '''\n Loop over positions\n '''\n while self._current_pos_slice.stop < self._end_pos:\n loops_2d, nd_mat_shape_dc_first, order_dc_offset_reverse = self._get_projection_data(pos_chunk_index)\n\n # step 8: perform loop unfolding\n projected_loops_2d, loop_metrics_1d = self._project_loop_batch(dc_vec, np.transpose(loops_2d))\n\n # test the reshapes back\n projected_loops_2d = self._reshape_projected_loops_for_h5(projected_loops_2d,\n order_dc_offset_reverse,\n nd_mat_shape_dc_first)\n self.h5_projected_loops[self._current_pos_slice, self._current_sho_spec_slice] = projected_loops_2d\n\n metrics_2d = self._reshape_results_for_h5(loop_metrics_1d, nd_mat_shape_dc_first)\n\n self.h5_loop_metrics[self._current_pos_slice, self._current_met_spec_slice] = metrics_2d\n\n # Reset the position slice\n self._current_pos_slice = slice(None)\n\n pass", "def getBigRectangles(recognisedFacesCenters, recognisedFacesPercentages, recognisedFacesCenterSizes):\n recognisedBigFacesCenters = []\n recognisedBigFacesCentersSizes = []\n recognisedBigFacesPercentages = []\n\n \"\"\"Putting the higest probability frame in the final array by default\"\"\"\n maxvalueCenters = max(recognisedFacesPercentages)\n maxposCenters = recognisedFacesPercentages.index(maxvalueCenters)\n\n recognisedBigFacesCenters.append(recognisedFacesCenters[maxposCenters])\n recognisedBigFacesCentersSizes.append(\n recognisedFacesCenterSizes[maxposCenters])\n recognisedBigFacesPercentages.append(\n recognisedFacesPercentages[maxposCenters])\n\n \"\"\"Purging initial arrays of the values we just put in the final arrays\"\"\"\n recognisedFacesCenters.pop(maxposCenters)\n recognisedFacesPercentages.pop(maxposCenters)\n recognisedFacesCenterSizes.pop(maxposCenters)\n\n for i in range(len(recognisedFacesCenters)):\n maxvalueCenters = max(recognisedFacesPercentages)\n maxposCenters = recognisedFacesPercentages.index(maxvalueCenters)\n test = getTowCornersOfRectangle(\n recognisedFacesCenters[maxposCenters], recognisedFacesCenterSizes[maxposCenters], recognisedBigFacesCenters, recognisedBigFacesCentersSizes)\n \"\"\"If the area are not overlapping then add the tested frame into the final arrays\"\"\"\n if(test == 1):\n recognisedBigFacesCenters.append(\n recognisedFacesCenters[maxposCenters])\n recognisedBigFacesCentersSizes.append(\n recognisedFacesCenterSizes[maxposCenters])\n recognisedBigFacesPercentages.append(\n recognisedFacesPercentages[maxposCenters])\n \"\"\"Purging initial arrays of the tested values\"\"\"\n recognisedFacesCenters.pop(maxposCenters)\n recognisedFacesPercentages.pop(maxposCenters)\n recognisedFacesCenterSizes.pop(maxposCenters)\n return [recognisedBigFacesCenters, recognisedBigFacesCentersSizes, recognisedBigFacesPercentages]", "def CalculateSliceEdges(self, maxZSize):\n # always round up as there's no harm done in passing slightly too much data\n _A1_SEARCH_RADIUS = int(math.sqrt(self._a1Config.SPIRAL_CONFIG.MAX_NBRS_TO_SEARCH / 3.14) + 1)\n _DESPECKLE_SEARCH_RADIUS = int(math.sqrt(self._despeckleConfig.SPIRAL_CONFIG.MAX_NBRS_TO_SEARCH / 3.14) + 1)\n _TOTAL_DESIRED_MARGIN = _A1_SEARCH_RADIUS + _DESPECKLE_SEARCH_RADIUS\n\n reqDataL = max(0, self.xLims[0] - _TOTAL_DESIRED_MARGIN)\n reqDataR = min(self.xLims[1] + _TOTAL_DESIRED_MARGIN, self.InputRasterProps.width)\n reqDataT = max(0, self.yLims[0] - _TOTAL_DESIRED_MARGIN)\n reqDataB = min(self.yLims[1] + _TOTAL_DESIRED_MARGIN, self.InputRasterProps.height)\n reqDataHeight = reqDataB - reqDataT\n reqDataWidth = reqDataR - reqDataL\n\n # get an estimated maximum width for the slices based on the available memory and the number of years (vertical\n # size of the stack) and height (y dimension) of the data (not slicing in Y dimension)\n readShapeZYX = (maxZSize, reqDataHeight, reqDataWidth)\n sliceXSize = self.__CalculateSliceSize(readShapeZYX=readShapeZYX)\n\n # how many slices are we going to need\n totalFillWidth = self.xLims[1] - self.xLims[0]\n nChunks = int((totalFillWidth // sliceXSize) + 1)\n\n # generate the \"chunk\" boundaries that will represent the data processed in one thread's job, in terms of the\n # pixel coordinates of the source data files.\n # the actual slice boundaries for filling and writing out\n chunkEdges = np.linspace(self.xLims[0], self.xLims[1], nChunks + 1).astype(np.int32)\n leftRealEdges = chunkEdges[:-1]\n rightRealEdges = chunkEdges[1:]\n # the boundaries of these slices plus the margin of extra data for A1, but respecting the fact we can't\n # go beyond the edge of the source data\n left_A1_edges = np.clip((chunkEdges - _A1_SEARCH_RADIUS)[:-1],\n 0, np.inf).astype(np.int32)\n right_A1_edges = np.clip((chunkEdges + _A1_SEARCH_RADIUS)[1:],\n -np.inf, self.InputRasterProps.width).astype(np.int32)\n # the boundary of these slices plus the margin of extra data for A1 and despeckle, but respecting the fact\n # we can't go beyond the edge of the source data\n left_Despeckle_edges = np.clip((chunkEdges - _TOTAL_DESIRED_MARGIN)[:-1],\n 0, np.inf).astype(np.int32)\n right_Despeckle_edges = np.clip((chunkEdges + _TOTAL_DESIRED_MARGIN)[1:],\n -np.inf, self.InputRasterProps.width).astype(np.int32)\n\n # the left and right task boundaries are _SEARCH_RADIUS bigger than the data that will be searched\n # within them so that all pixels can have neighbours, if possible (not at global edge)\n x_offsets_overlapping = zip(left_Despeckle_edges, left_A1_edges, leftRealEdges,\n rightRealEdges, right_A1_edges, right_Despeckle_edges)\n\n # can we pad the top and bottom? (not if we are doing a global run as y-slicing isn't implemented)\n topA1Edge = np.clip(self.yLims[0] - _A1_SEARCH_RADIUS, 0, np.inf).astype(np.int32)\n bottomA1Edge = np.clip(self.yLims[1] + _A1_SEARCH_RADIUS,\n -np.inf, self.InputRasterProps.height).astype(np.int32)\n topDespeckleEdge = np.clip(self.yLims[0] - _TOTAL_DESIRED_MARGIN, 0, np.inf).astype(np.int32)\n bottomDespeckleEdge = np.clip(self.yLims[1] + _TOTAL_DESIRED_MARGIN,\n -np.inf, self.InputRasterProps.height).astype(np.int32)\n\n # create a list of the slices needed for each calendar day, i.e. all the x slices and for each of these,\n # all the y slices. Each dimension and slice coords are recorded as a 6-tuple which is a member of a list.\n # As there is only 1 y slice for now, these are a single-item list.\n # NB in the notebook version we also added the days themselves to the cartesian product for an overall\n # tasklist, here we are not bothering with that part\n taskListForEachDay = list(itertools.product(\n x_offsets_overlapping,\n [(topDespeckleEdge, topA1Edge, self.yLims[0], self.yLims[1], bottomA1Edge, bottomDespeckleEdge)]))\n return taskListForEachDay", "def test_partition_to_list_of_ids():\n\n flock_size = 3 # and num_cc = 4\n\n # measurement 0 in phase 0 (output of the flock at the time step 0)\n meas_0 = [1, 0, 0, 0,\n 0, 0, 1, 0,\n 0, 0, 0, 1]\n\n # measurement 1 in phase 0 (flock output at time step 1)\n meas_1 = [0, 0, 1, 0,\n 0, 1, 0, 0,\n 1, 0, 0, 0]\n\n # measurement 0 in phase 1 (flock output at time step 0 at phase 1)\n meas_p_0 = [0, 0, 0, 1,\n 1, 0, 0, 0,\n 1, 0, 0, 0]\n\n # measurement 1 in phase 1\n meas_p_1 = [0, 1, 0, 0,\n 1, 0, 0, 0,\n 0, 1, 0, 0]\n\n phase0 = [\n torch.Tensor(meas_0).view(flock_size, -1),\n torch.Tensor(meas_1).view(flock_size, -1)]\n\n phase1 = [\n torch.Tensor(meas_p_0).view(flock_size, -1),\n torch.Tensor(meas_p_1).view(flock_size, -1)]\n\n # measurements in the format collected by the measurement_manager.parse_to_...\n measurements = [phase0, phase1]\n\n # test the helper method\n partitioned = _partition_tensor_to_ids(phase0[0], flock_size)\n assert partitioned == [0, 2, 3]\n\n # test the final method, outer dimension corresponds to experts\n result = partition_to_list_of_ids(measurements, 3)\n\n # expected measurement for each expert\n e0 = [[0, 2], [3, 1]] # [[phase_0_measurement_0_id, phase_0_measurement_1_id],[phase_1..., ..]]\n e1 = [[2, 1], [0, 0]]\n e2 = [[3, 0], [0, 1]]\n\n # dimension for expert is the outer one\n assert result[0] == e0\n assert result[1] == e1\n assert result[2] == e2", "def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)", "def read_information(h, J, pivot):\r\n if pivot == h.shape[0]:\r\n return [], [], []\r\n curr_idx = pivot\r\n # mark represents starting point of each segment\r\n mark = curr_idx\r\n # list of mark for each segment\r\n cut_index = [mark]\r\n # list of list of int values of each segment\r\n segment_length = []\r\n int_values = []\r\n # incremental count of segments\r\n nb_segment = 0\r\n\r\n h_shape = h.shape[0]\r\n binary_dict = {}\r\n while curr_idx < h_shape:\r\n # init the mark_th segement in int_values\r\n int_value = h[mark]\r\n this = int_value\r\n # mv current index to next()\r\n curr_idx += 1\r\n while curr_idx < h_shape:\r\n exp = curr_idx-mark\r\n if not exp in binary_dict.keys():\r\n binary_dict[exp] = 2**exp\r\n this = [(i + binary_dict[exp]) if j == 1 else i for i, j in zip(this, h[curr_idx])]\r\n if len(set(this)) <= J and curr_idx < h_shape:\r\n int_value = this\r\n curr_idx += 1\r\n else:\r\n break\r\n\r\n int_values.append(int_value)\r\n mark = curr_idx\r\n cut_index.append(mark)\r\n nb_segment += 1\r\n\r\n transitions = []\r\n for i in range(len(int_values)-1):\r\n transitions.append(dict(Counter([(i, j) for i, j in zip(int_values[i], int_values[i+1])])))\r\n return [dict(Counter(i)) for i in int_values], transitions, cut_index[:-1]", "def createkSpaceMask (im_size, num_half_lines):\n \n if im_size[0] < 32 or im_size[1] < 32:\n sys.exit(\"side < 32\")\n\n if num_half_lines < 20:\n sys.exit(\"numOfLines < 10\")\n \n center = np.floor((im_size + 2) / 2)\n half_diagonal = np.linalg.norm(im_size) / 2\n step_length = 0.5\n num_steps = int(np.round(half_diagonal / step_length + 1))\n sampling_mask = np.zeros(im_size, float)\n\n for lineNum in range(num_half_lines):\n theta = 2 * np.pi * lineNum / num_half_lines\n direction = np.array([np.cos(theta), np.sin(theta)])\n for stepNum in range(num_steps):\n location = np.round(center + direction * stepNum * step_length).astype(int)\n if (location[0] >= 0) and (location[0] < im_size[0]) and (location[1] >= 0) and (location[1] < im_size[1]):\n sampling_mask[location[0], location[1]] = 1\n\n # take the center of kspace to the corners\n sampling_mask = np.fft.fftshift(sampling_mask)\n\n return sampling_mask", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df", "def calc(s,tnx,i0s,ies):\n\n # round down\n tile0 = s.start // tnx\n # round up\n tilee = -(-s.stop // tnx)\n\n tiles = []\n srcslices = []\n tgtslices = []\n for tile in range(tile0,tilee):\n ii0 = max(0, -((s.start - i0s[tile]) // s.step))\n iie = -((s.start - min(s.stop,ies[tile])) // s.step)\n if iie > ii0:\n tiles.append(tile)\n myi0 = s.start + ii0*s.step - i0s[tile]\n myie = s.start + iie*s.step - i0s[tile]\n srcslices.append(slice(myi0,myie,s.step))\n tgtslices.append(slice(ii0,iie))\n\n return tiles, srcslices, tgtslices", "def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;", "def get_sub_image_coords(coords, region_size, x_parts, y_parts):\n sub_image_coords = []\n if coords and region_size:\n left_start = coords[0] - region_size / 2\n top_start = coords[1] + region_size / 2\n sub_image_size_x = region_size / x_parts\n sub_image_size_y = region_size / y_parts\n for ix in range(x_parts):\n for iy in range(y_parts):\n sub_image_coords.append(\n (\n left_start + sub_image_size_x / 2 + (ix * sub_image_size_x),\n top_start - sub_image_size_y / 2 - (iy * sub_image_size_y),\n )\n )\n return sub_image_coords", "def clip_raster_with_multiple_polygons(src_raster, src_poly, partitions=10, return_raster=False, no_data_value=None, seed=None):\n # init resource\n assert (len(src_poly) // partitions) < np.iinfo(np.int32).max, \"Please increase partitions in order the gdal type overflow issue.\"\n df_poly_for_rasterize = src_poly.copy()\n partitions = len(src_poly) if len(src_poly) < partitions else partitions \n df_poly_for_rasterize.loc[:, 'id'] = range(len(df_poly_for_rasterize))\n parts = __split_idxs_partitions(df_poly_for_rasterize['id'].values, partitions=partitions, seed=seed)\n if no_data_value is None:\n no_data_value = 0 if src_raster.no_data_value is None else src_raster.no_data_value\n\n # rasterize by its id and clipping\n clipped_imgs = []\n for ps_idx, ps in enumerate(parts): # deal with one part of poly in shp per loop: 1. rasterize => 2. find each poly in the shp\n # 1. rasterize: rasterize only df_plot['id'].isin(ps) (only id in the splitted shp)\n df_poly_for_rasterize_ps = pd.concat([df_poly_for_rasterize[df_poly_for_rasterize['id'] == p].copy() for p in ps])\n df_poly_for_rasterize_ps.loc[:, 'id_ps'] = np.array(range(len(df_poly_for_rasterize_ps)), dtype=np.int32)\n raster_poly_part = rasterize_layer(df_poly_for_rasterize_ps, src_raster.rows, src_raster.cols, src_raster.geo_transform, use_attribute='id_ps', all_touched=True, no_data_value=-1)\n \n for id_p in range(len(df_poly_for_rasterize_ps)):\n # 2. find each the location (in the raster) of each poly in the shp \n coords = df_poly_for_rasterize_ps[df_poly_for_rasterize_ps['id_ps'] == id_p].total_bounds.reshape(2,2)\n npidxs = CRS.coords_to_npidxs(coords, src_raster.geo_transform)\n row_idxs_st, row_idxs_end, col_idxs_st, col_idxs_end = np.min(npidxs[:, 0]), np.max(npidxs[:, 0])+1, np.min(npidxs[:, 1]), np.max(npidxs[:, 1])+1\n clipped_img = src_raster.data[row_idxs_st:row_idxs_end, col_idxs_st:col_idxs_end].copy()\n ploy_mask = raster_poly_part.data[row_idxs_st:row_idxs_end, col_idxs_st:col_idxs_end, 0] == id_p\n if np.sum(ploy_mask) > 0:\n # generate clipped image\n clipped_img[~ploy_mask] = no_data_value\n if return_raster:\n gt = np.array(src_raster.geo_transform)\n gt[[0, 3]] = CRS.npidxs_to_coords([(row_idxs_st, col_idxs_st)], src_raster.geo_transform)[0]\n clipped_img = tgp.Raster(clipped_img, tuple(gt), src_raster.projection, src_raster.gdaldtype, no_data_value, src_raster.metadata)\n clipped_imgs.append(clipped_img)\n else:\n clipped_imgs.append(None)\n \n # na_percentage = np.sum([c is None for c in clipped_imgs[-len(df_poly_for_rasterize_ps):]]) / len(df_poly_for_rasterize_ps)\n # if na_percentage != 0 : \n # print(ps_idx, na_percentage)\n \n clipped_imgs = [clipped_imgs[i] for i in np.argsort(np.hstack(parts))]\n return clipped_imgs", "def make_slices(data, win_size):\n rows = data.shape[0] - win_size[0] + 1\n cols = data.shape[1] - win_size[1] + 1\n slices = []\n for i in range(win_size[0]):\n for j in range(win_size[1]):\n slices.append(data[i:rows+i, j:cols+j])\n return slices", "def long_slice(image_path, out_name, outdir, slice_size, net):\n img = Image.open(image_path)\n imgout = Image.open(image_path)\n orw, orh = img.size\n width, height = img.size\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n orw, orh = imgout.size\n width, height = img.size\n print(img.size)\n r = 1\n draw = ImageDraw.Draw(imgout)\n\n flag_continue = True\n while flag_continue:\n if os.path.exists(\"./testsliceimage/list.txt\"):\n os.remove(\"./testsliceimage/list.txt\")\n file = open(\"./testsliceimage/list.txt\", \"w+\")\n for sliceh in range(slicesh*step):\n for slicew in range(slicesw*step):\n #set the bounding box! The important bit\n bbox = (int(slicew*slice_size/step), int(sliceh*slice_size/step), int(slicew*slice_size/step)+slice_size, int(sliceh*slice_size/step)+slice_size)\n working_slice = img.crop(bbox)\n\n working_slice.save(os.path.join(outdir, \"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\"))\n file.write(\"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\\n\")\n\n if sliceh == 16 and slicew == 27 and width == 450 :\n print (int(slicew*slice_size/step), int(sliceh*slice_size/step),int(slicew*slice_size/step)+slice_size,int(sliceh*slice_size/step)+slice_size)\n\n file.close()\n transform_test = tf.Compose([tf.Grayscale(), tf.ToTensor(), tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n testset = UnknownDataset(\"./testsliceimage/\", \"./testsliceimage/list.txt\", transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=WORKERS)\n\n with torch.no_grad():\n N = 0\n for data in testloader:\n images, img_names = data['image'], data['image_name']\n outputs = net(images.float())\n _, predicted = torch.max(outputs.data, 1)\n # print(predicted)\n if max(predicted) == 1 :\n ite = -1\n for predic in predicted :\n ite += 1\n if predic == 1 and outputs[ite][1]-outputs[ite][0] > CONFIDENCE:\n print(img_names[ite])\n # print(outputs)\n N += 1\n #dessiner carre sur image\n slh = int(img_names[ite].split('_')[4])\n slw = int(img_names[ite].split('_')[5][:-4])\n x1 = int(slh * slice_size / step)\n x2 = x1 + slice_size\n y1 = int(slw * slice_size / step)\n y2 = y1 + slice_size\n\n if slh == 16 and slw == 27 and width ==450 :\n print (x1, y1, x2, y2)\n\n print(r)\n rh = orh / height\n rw = orw / width\n x1 = x1 * rh\n x2 = x2 * rh\n y1 = y1 * rw\n y2 = y2 * rw\n\n draw.rectangle(((y1, x1), (y2, x2)), outline=\"red\")\n # draw.text((y2,x2), img_names[0])\n copyfile(\"./testsliceimage/\"+img_names[ite], \"./goodimage/\"+ img_names[ite])\n\n if width <= 200 or height <= 200:\n flag_continue = False\n else:\n r = r * scale\n width, height = int(width/scale), int(height/scale)\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n width, height = img.size\n\n # imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout.save(\"./rectangle/out\", \"PNG\")", "def compose_hankel_2d(slice3d,rp):\n slice3d = np.array(slice3d, dtype=DTYPE)\n # annihilating filter size\n p = rp['filter_size'][0]\n q = rp['filter_size'][1]\n # hankel m n\n m = slice3d.shape[1]\n n = slice3d.shape[2]\n \n if rp['virtualcoilboost'] == False:\n receiverdim = int(rp['rcvrs'])\n elif rp['virtualcoilboost'] == True:\n receiverdim = int(rp['rcvrs']*2)\n \n for rcvr in range(receiverdim):\n\n slice2d = slice3d[rcvr,...]\n\n #make inner hankel list\n for j in range(0,n):\n # iterate in outer hankel elements\n for k in range(0,p):\n # iterate inner hankel columns\n col = np.expand_dims(slice2d[k:m-p+k+1,j],axis=1)\n if k == 0:\n cols = col\n else:\n cols = np.concatenate([cols,col],axis=1)\n if j == 0:\n hankel = np.expand_dims(cols,axis=0)\n else:\n hankel = np.concatenate(\\\n [hankel, np.expand_dims(cols,axis=0)], axis=0)\n\n # make outer hankel\n for i in range(q):\n #col = cp.hstack([hankel[i:n-q+i,:,:]])\n col = np.vstack([hankel[k,:,:] for k in range(i,n-q+i+1)])\n if i == 0:\n cols = col\n else:\n cols = np.concatenate([cols,col], axis=1)\n # concatenating along the receivers\n if rcvr == 0:\n hankel_full = cols\n else:\n hankel_full = np.concatenate([hankel_full, cols], axis=1)\n\n return hankel_full", "def Compare2dMasksFrom3dMasks(OrigSegRoi, NewSegRoi, OrigDicomDir, NewDicomDir): \n \n from DicomTools import GetDicomSOPuids\n from SegTools import GetPFFGStoSliceInds\n import matplotlib.pyplot as plt\n \n # Get the DICOM SOP UIDs:\n OrigSOPuids = GetDicomSOPuids(DicomDir=OrigDicomDir)\n NewSOPuids = GetDicomSOPuids(DicomDir=NewDicomDir)\n \n # Get the Per-frameFunctionalGroupsSequence-to-slice indices:\n OrigPFFGStoSliceInds = GetPFFGStoSliceInds(OrigSegRoi, OrigSOPuids)\n NewPFFGStoSliceInds = GetPFFGStoSliceInds(NewSegRoi, NewSOPuids)\n \n # Combined indices from OrigPFFGStoSliceInds and NewPFFGStoSliceInds:\n AllInds = OrigPFFGStoSliceInds + NewPFFGStoSliceInds\n \n # Remove duplicates:\n AllInds = list(set(AllInds))\n \n \n # Get the 3D SEG masks:\n Orig3dMask = OrigSegRoi.pixel_array\n New3dMask = NewSegRoi.pixel_array\n \n OrigShape = Orig3dMask.shape\n NewShape = New3dMask.shape\n \n print(f'Segments exist in OrigSegRoi on slices {OrigPFFGStoSliceInds}')\n print(f'Shape of Orig3dMask = {OrigShape}')\n print(f'\\nSegments exist in NewSegRoi on slices {NewPFFGStoSliceInds}')\n print(f'Shape of New3dMask = {NewShape}\\n')\n \n # Initialise the 3D cropped SEG masks:\n Orig3dMaskCropped = []\n New3dMaskCropped = []\n \n for i in range(OrigShape[0]):\n cropped = CropNonZerosIn2dMask(Orig3dMask[i])\n \n Orig3dMaskCropped.append(cropped)\n \n for i in range(NewShape[0]):\n cropped = CropNonZerosIn2dMask(New3dMask[i])\n \n New3dMaskCropped.append(cropped)\n \n \n Nrows = len(AllInds)\n \n Ncols = 2\n \n n = 1 # initialised sub-plot number\n \n fig, ax = plt.subplots(Nrows, Ncols, figsize=(5*Ncols, 6*Nrows))\n \n for i in range(len(AllInds)):\n SliceNum = AllInds[i]\n \n # Does slice SliceNum have a segment in OrigSegRoi or NewSegRoi?\n if SliceNum in OrigPFFGStoSliceInds:\n OrigFrameNum = OrigPFFGStoSliceInds.index(SliceNum)\n \n ax = plt.subplot(Nrows, Ncols, n, aspect='equal')\n ax.imshow(Orig3dMaskCropped[OrigFrameNum])\n ax.set_xlabel('Pixels'); ax.set_ylabel('Pixels')\n ax.set_title(f'Orig slice {SliceNum}')\n \n n += 1 # increment sub-plot number\n \n if SliceNum in NewPFFGStoSliceInds:\n NewFrameNum = NewPFFGStoSliceInds.index(SliceNum)\n \n ax = plt.subplot(Nrows, Ncols, n, aspect='equal')\n ax.imshow(New3dMaskCropped[NewFrameNum])\n ax.set_xlabel('Pixels'); ax.set_ylabel('Pixels')\n ax.set_title(f'New slice {SliceNum}')\n \n n += 1 # increment sub-plot number\n \n return", "def grid_spherical_decomposed(x, y, z, data, x_i, y_i, z_i, horz_res, missing_value=-32767):\n\n r_map = np.sqrt(x**2.0 + y**2.0) # cartesian radius from map (x,y) center\n az_map = np.arctan2(y,x) #azimuth in the cartesian system. might vary along a ray due to map projection curvature\n vcp = np.fromiter((np.median(az_map[:, i_az, :]) for i_az in range(az_map.shape[1])), np.float32)\n print x.shape\n \n r_i = np.arange(r_map.min(), r_map.max(), horz_res) # cartesian radius from map(x,y) center\n\n # also need to griddata the x, y, z geographic coordinates.\n # decomposed geometry in radar polar coordinates is a not a\n # geophysical coordinate system (it's really a tangent plane\n # coord sys without beam refraction effects), so really there \n # are two xyz systems in play here.\n\n # unless, if by using z and R = np.sqrt(x**2.0 + y**2.0), we remain in a cylinderical \n # system referenced to the map projection in use. I think this is true.\n\n # Interpolate from spherical to cylindrical.\n # Cylindrical system is a different\n # range coordinate than the radar range coordinate.\n az_idx = 1\n cyl_grid_shape = (r_i.shape[0], x.shape[az_idx], z_i.shape[0])\n cyl_grid = np.empty(cyl_grid_shape)\n \n for az_id in range(cyl_grid_shape[az_idx]):\n progress(az_id, cyl_grid_shape[az_idx], 'Gridding along azimuths')\n rhi_r = r_map[:, az_id, :]\n # rhi_y = y[:, az_id, :]\n # R_i = rhir = np.sqrt(x[:, az_id, :]**2.0 + y[:, az_id, :]**2.0)\n rhi_z = z[:, az_id, :]\n rhi_data = data[:, az_id, :]\n \n # input and output coordinates need to be taken from the same coordinate system\n cyl_grid[:, az_id, :] = griddata(rhi_r.flatten(), rhi_z.flatten(), rhi_data.flatten(), r_i, z_i).T\n print \"\\r\" + 'Gridding along azimuths ... done'\n # cyl_grid is r, az, z instead of r, az, el\n \n # get mesh of coordinates for all interpolated radii r_i and along the azimuth\n # since constant radar azimuth might have curvature induced by the map projection\n # it's tricky to do this.\n\n # steps:\n # Do new transform from r,az radar system to map system using r=r_i to get x,y\n # or \n # Just do naive assumption that azimuths are straight and accept the error (used this one)\n \n # interpolate from cylindrical to cartesian.\n grid = np.empty((len(x_i), len(y_i), len(z_i)), dtype=np.float32)\n for z_id in range(z_i.shape[0]):\n progress(z_id, z_i.shape[0], 'Gridding at constant altitude')\n cappi_x = r_i[:, None]*np.cos(vcp[None, :])\n cappi_y = r_i[:, None]*np.sin(vcp[None, :])\n cappi_data = cyl_grid[:,:,z_id]\n \n # input and output coordinates need to be taken from the same coordinate system\n grid_2d = griddata(cappi_x.flatten(), cappi_y.flatten(), cappi_data.flatten(), x_i, y_i).T\n grid[:, :, z_id] = grid_2d\n print \"\\r\" + 'Gridding at constant altitude ... done'\n \n grid[np.isnan(grid)] = missing_value\n \n return grid", "def constroi(self):\n\n #---------------------------------------------------------\n # nnos = numero de nos\n # ngl = numero de graus de liberdade\n # ID entra como matriz de zeros e uns e sai com uma matriz\n # contendo os indices dos graus de liberdade e\n # zeros nas restriçoes\n # con = contem a conectividade de cada elemento\n # --------------------------------------------------------\n nnos = len(self.ID)\n ngl = 0.0\n\n for i in range(nnos):\n for j in range(2):\n n = self.ID[i][j]\n if n >= 1:\n self.ID[i][j]=0.0\n else:\n ngl = ngl + 1\n self.ID[i][j] = ngl\n\n self.LD = np.zeros((self.nelm,4))\n for i in range(self.nelm):\n for j in range(2):\n self.LD[i][j] = self.ID[self.conect[i][0]][j]\n self.LD[i][j+2] = self.ID[self.conect[i][1]][j]\n return np.transpose(self.LD)", "def get_ptsByCntByRoi(rts, sopuids, p2c=False):\n \n # Get the ContourSequence-to-slice indices by ROI:\n c2sIndsByRoi, c2sInds = get_c2sIndsByRoi(rts, sopuids)\n \n numRois = len(c2sIndsByRoi)\n \n if p2c:\n print('\\n\\n', '-'*120)\n print('Running of get_ptsByCntByRoi():')\n print('\\n\\n', '-'*120)\n #print(f' c2sIndsByRoi = {c2sIndsByRoi}')\n print_indsByRoi(c2sIndsByRoi)\n print(f'numRois = {numRois}')\n \n ptsByCntByRoi = []\n \n for r in range(numRois):\n # Number of contours in this ROI:\n numCnts = len(c2sIndsByRoi[r])\n \n # Get a list of the ContourSequences in this ROI:\n #sequences = deepcopy(rts.ROIContourSequence[r].ContourSequence)\n sequences = list(rts.ROIContourSequence[r].ContourSequence)\n \n # Get a list of all ReferencedSOPInstanceUIDs from ContourSequences:\n #RefSopUids = [sequence.ContourImageSequence[0]\\\n # .ReferencedSOPInstanceUID for sequence in ContourSequences]\n \n if False:#p2c:\n print(f' r = {r}')\n print(f' numCnts = {numCnts}')\n print(f' number of Contour Sequences = {len(sequences)}')\n \n ptsByCnt = []\n \n # Loop through each contour sequence:\n for c in range(len(sequences)):\n # Get the indices of all matching RefSopUids:\n #ind = RefSopUids.index(SopUid) # this will only find the first \n # match, and there may be more than one!\n #inds = [i for i, e in enumerate(RefSopUids) if e==SopUid]\n \n # Iterate for each index in inds:\n #for ind in inds:\n \n #ContourSequence = deepcopy(ContourSequences[c])\n \n #ContourData = [float(item) for item in ContourSequence.ContourData]\n \n #cntData = deepcopy(sequence.ContourData)\n cntData = list(sequences[c].ContourData)\n \n pts = cntdata_to_pts(cntData)\n \n ptsByCnt.append(pts)\n \n if False:#p2c:\n print(f' c = {c}')\n print(f' len(pts) = {len(pts)}')\n \n ptsByCntByRoi.append(ptsByCnt)\n \n cntdataByCntByRoi = ptsByCntByRoi_to_cntdataByCntByRoi(\n ptsByCntByRoi\n )\n \n if p2c:\n #R = len(ptsByCntByRoi)\n #print(f'\\n len(ptsByCntByRoi) = {R}')\n #for r in range(R):\n # print(f' len(ptsByCntByRoi[{r}]) = {len(ptsByCntByRoi[r])}')\n print_ptsByCntByRoi(ptsByCntByRoi)\n print('-'*120)\n\n return ptsByCntByRoi, cntdataByCntByRoi, c2sIndsByRoi, c2sInds", "def get_slice(P1, P2, name):\n \n centre_dist = distance_3D(P1, P2)\n plot_img = np.zeros((ceil(centre_dist / 2. + 1), centre_dist + 2 ))\n Xrange = np.arange(-centre_dist / 4., centre_dist / 4. + 1)\n \n # time goes along the vector between P1 and P2\n # since it might be at an angle, I can't loop in 1\n # pixel increments - this will miss certain slices. Therefore,\n # I need to loop through by 1/cosA, where A is angle between\n # the xy plane and vector P1->P2\n sampling = sample_rate(P1, P2)\n \n for time in np.linspace(0, centre_dist + 1,\n centre_dist * sampling):\n # Go up along the line\n new_pt = vector_3D(P1, P2, time)\n old_pt = vector_3D(P1, P2, time - centre_dist / 2. * sampling)\n\n if time == 0:\n input_file = name % int(round(new_pt[2], 0))\n img = io.imread(input_file)\n \n # Check if the previous slice is the same as the next\n # don't load it again if it is - save computation time\n if int(round(new_pt[2], 0)) != int(round(old_pt[2], 0)):\n \n input_file = name % int(round(new_pt[2], 0))\n img = io.imread(input_file)\n \n for X in Xrange:\n \n # Get along the X direction for every height\n x, y, z = vector_perpendicular_3D(new_pt, P2, 1, 0, X)\n \n pixel_value = interpolation(x, y, img)\n \n plot_img[X + centre_dist / 4., time] = pixel_value\n else:\n for X in Xrange:\n \n # Get along the X direction for every height\n x, y, z = vector_perpendicular_3D(new_pt, P2, 1, 0, X)\n\n pixel_value = interpolation(x, y, img)\n \n plot_img[X + centre_dist / 4., time] = pixel_value\n \n return plot_img", "def id2ind(ngrid,nspecies,filename):\n ntot=ngrid[0]*ngrid[1]*ngrid[2]*nspecies\n np_icell=np.empty((3,ntot),dtype=np.intc)\n np_ispecies=np.empty(ntot,dtype=np.intc)\n icell=np_icell\n ispecies=np_ispecies\n \n f=StringIO.StringIO()\n\n for ii in xrange(ntot):\n tmp,ispecies[ii]=divmod(ii,nspecies)\n tmp,icell[0,ii]=divmod(tmp,ngrid[0])\n icell[2,ii],icell[1,ii]=divmod(tmp,ngrid[1])\n jj=(icell[0,ii]+1)*(icell[1,ii]+1)*(icell[2,ii]+1)\n f.write(\"{:>6d} {:>6d} {:>6d}\\n\".format(ii+1,(ii+nspecies)//nspecies,ispecies[ii]))\n ffinal=open(filename,\"w\")\n ffinal.write(f.getvalue())\n f.close()\n ffinal.close()", "def getChunks():", "def test_4d_two_index_time():\n fmask = \"common_data/4d_pipe/time_2index/test%02d%03d.fid\"\n dic,data = ng.pipe.read_lowmem(fmask)\n\n fname = \"common_data/4d_pipe/time_2index/test02006.fid\"\n sdic,sdata = ng.pipe.read(fname)\n\n assert data.shape == (8, 12, 16, 1400)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2,3].real,2) == -395.11\n assert round(data[0,1,2,3].imag,2) == 52.72\n assert round(data[5,9,11,987].real,2) == -35.09\n assert round(data[5,9,11,987].imag,2) == 33.07\n\n # check the slice\n assert sdata.shape == (16, 1400)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == 75.93\n assert round(sdata[1,2].imag,2) == 5.55\n assert round(sdata[7,800].real,2) == -8.93\n assert round(sdata[7,800].imag,2) == -10.24\n\n # slice/data matching\n assert_array_equal(data[1,5],sdata)\n\n lowmem_write_readback_4D(dic,data)", "def get_DIV2k_data(pLow, pFull, bs: int, sz: int):\n suffixes = {\"dataset/DIV2K_train_LR_x8\": \"x8\",\n \"dataset/DIV2K_train_LR_difficult\": \"x4d\",\n \"dataset/DIV2K_train_LR_mild\": \"x4m\"}\n lowResSuffix = suffixes[str(pLow)]\n src = ImageImageList.from_folder(pLow, presort=True).split_by_idxs(\n train_idx=list(range(0, 800)), valid_idx=list(range(800, 900)))\n\n data = (src.label_from_func(\n lambda x: pFull/(x.name).replace(lowResSuffix, '')\n ).transform(\n get_transforms(\n max_rotate=30,\n max_zoom=3.,\n max_lighting=.4,\n max_warp=.4,\n p_affine=.85\n ),\n size=sz,\n tfm_y=True,\n ).databunch(bs=bs, num_workers=8, no_check=True)\n .normalize(imagenet_stats, do_y=True))\n data.c = 3\n return data", "def get_data_subsets(t0, t1):\n\n # Iridium data:\n irid = iridium[(iridium.time >= t0) & (iridium.time <= t1)]\n irid_B = np.vstack((irid.B_e.values, irid.B_n.values, irid.B_r.values))\n irid_coords = np.vstack((irid.lon.values, irid.lat.values, irid.r.values))\n\n # SuperMAG data:\n smag = supermag.loc[t0:t1, :]\n smag_B = np.vstack((smag.Be.values, smag.Bn.values, smag.Bu.values))\n smag_coords = np.vstack((smag.lon.values, smag.lat.values))\n\n # SuperDARN data:\n sd = superdarn.loc[t0:t1, :]\n vlos = sd['vlos'].values\n sd_coords = np.vstack((sd['glon'].values, sd['glat'].values))\n los = np.vstack((sd['le'].values, sd['ln'].values))\n\n\n # Make the data objects. The scale keyword determines a weight for the dataset. Increase it to reduce weight\n iridium_data = lompe.Data(irid_B * 1e-9, irid_coords, datatype = 'space_mag_fac', scale = 200e-9)\n supermag_data = lompe.Data(smag_B * 1e-9, smag_coords, datatype = 'ground_mag' , scale = 100e-9)\n superdarn_data = lompe.Data(vlos , sd_coords , LOS = los, datatype = 'convection' , scale = 500 )\n\n return(iridium_data, supermag_data, superdarn_data)", "def test_CCI_SM_v033_025Img_img_reading_1D_combined():\n\n filename = os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"combined\", \"2016\",\n \"ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-20160101000000-fv03.3.nc\")\n\n parameter = ['sm']\n img_c = CCI_SM_025Img(filename=filename, parameter=parameter, subgrid=None,\n array_1D=True)\n\n image_c = img_c.read()\n\n ref_lat = image_c.lat[1440 * (719-273) + 693]\n ref_lon = image_c.lon[1440 * 273 + 693]\n\n assert ref_lon == -6.625\n assert ref_lat == 21.625\n\n assert sorted(image_c.data.keys()) == sorted(parameter)\n\n ref_sm = image_c.data['sm'][1440 * (719-273) + 693]\n nptest.assert_almost_equal(ref_sm, 0.142998, 5)\n\n ###### land grid\n land_grid = CCILandGrid()\n img_c = CCI_SM_025Img(filename=filename, parameter=parameter, subgrid=land_grid,\n array_1D=True)\n\n assert img_c.grid.find_nearest_gpi(-6.625, 21.625) == (642933, 0)\n\n image_c = img_c.read()\n\n sm = image_c.data['sm'][164759]\n lat = image_c.lat[164759]\n lon = image_c.lon[164759]\n\n assert ref_lat == lat\n assert ref_lon == lon\n nptest.assert_almost_equal(ref_sm, sm, 5)", "def cfdGetBoundaryElementsSubArrayForBoundaryPatch(self):\r\n\r\n for iBPatch, theBCInfo in self.cfdBoundaryPatchesArray.items():\r\n \r\n startBElement=self.numberOfElements+self.cfdBoundaryPatchesArray[iBPatch]['startFaceIndex']-self.numberOfInteriorFaces\r\n endBElement=startBElement+self.cfdBoundaryPatchesArray[iBPatch]['numberOfBFaces']\r\n \r\n self.cfdBoundaryPatchesArray[iBPatch]['iBElements']=list(range(int(startBElement),int(endBElement)))", "def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart", "def build_mosaic(self, side_len):\n self.side_len = side_len\n tiles = []\n center_coords = []\n\n row_idx = 0\n clip_num = 0\n\n for i in range(int(self.src.shape[0] / side_len)+1):\n col_idx = 0\n for j in range(int(self.src.shape[1] / side_len)+1):\n\n clip_num += 1\n # get clip\n clip = self.get_window(row_idx, col_idx, side_len)\n\n # handle non-square clips\n if clip.shape[1] != side_len or clip.shape[2] != side_len:\n pad = np.full((3, side_len, side_len), self.pad_val)\n pad[:, 0:clip.shape[1], 0:clip.shape[2]] = clip\n clip = pad.copy()\n\n tiles.append(clip)\n\n # get center lat/lon\n lat, lon = self.get_latlon_point(row_idx + side_len // 2, col_idx + side_len // 2)\n\n center_coords.append([lat, lon])\n\n # increment counters\n col_idx += side_len\n row_idx += side_len\n\n return tiles, center_coords", "def cmap_idl4():\n r=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 15 22 30 37 45 52 60 67 75 82 90 97 105 112 120 125 130 135 140 145 150 155 160 165 170 175 180 185 190 195 200 200 201 201 202 202 203 203 204 204 205 205 206 206 207 207 208 208 209 209 210 210 211 211 212 212 213 213 214 214 215 215 216 216 217 217 218 218 219 219 220 220 221 221 222 222 223 223 224 224 225 225 226 226 227 227 228 228 229 229 230 230 231 231 232 232 233 233 234 234 235 235 236 236 237 237 238 238 239 239 240 240 241 241 242 242 243 243 244 244 245 245 246 246 247 247 248 248 249 249 250 250 251 251 252 252 253 253 254 254 255 255\"\n g=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 6 9 12 15 18 21 25 28 31 34 37 40 43 46 50 53 56 59 62 65 68 71 75 78 81 84 87 90 93 96 100 103 106 109 112 115 118 121 125 128 131 134 137 140 143 146 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 150 149 148 148 147 146 146 145 145 144 143 143 142 141 141 140 140 137 135 132 130 127 125 122 120 117 115 112 110 107 105 102 100 93 87 81 75 68 62 56 50 43 37 31 25 18 12 6 0 2 4 6 9 11 13 16 18 20 23 25 27 29 32 34 36 39 41 43 46 48 50 53 55 57 59 62 64 66 69 71 73 76 78 80 83 85 87 89 92 94 96 99 101 103 106 108 110 113 115 117 119 122 124 126 129 131 133 136 138 140 142 145 147 149 152 154 156 159 161 163 166 168 170 172 175 177 179 182 184 186 189 191 193 196 198 200 202 205 207 209 212 214 216 219 221 223 226 228 230 232 235 237 239 242 244 246 249 251 253 255\"\n b=\"0 2 4 6 8 10 12 14 16 18 20 22 25 27 29 31 33 35 37 39 41 43 45 47 50 52 54 56 58 60 62 64 66 68 70 72 75 77 79 81 83 85 87 89 91 93 95 97 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 100 96 93 90 87 84 81 78 75 71 68 65 62 59 56 53 50 46 43 40 37 34 31 28 25 21 18 15 12 9 6 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\"\n rm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(r.split())]\n gm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(g.split())]\n bm = [tuple([i/255,]+[int(x)/255,]*2) for i,x in enumerate(b.split())]\n cdict = {'red':rm, 'green':gm, 'blue':bm}\n cmap = plt.matplotlib.colors.LinearSegmentedColormap('idl4',cdict,256)\n return cmap", "def get_segmented_pcds_from_image(pcd_file_path, training_weights_checkpoint_dir): \n pcd_file_name = pcd_file_path[pcd_file_path.rfind('/')+1:-4]\n dict_of_scenes1 = {}\n temp_np_array_dict = {}\n\n dataloader_obj = DataLoaderOCID()\n\n # dictionary creation step\n temp_pcd = o3d.io.read_point_cloud(pcd_file_path, remove_nan_points=False)\n temp_np_array_dict['rgb'] = dataloader_obj.get_rgb_from_pcd(temp_pcd)\n temp_xyz_array = np.asarray(temp_pcd.points)\n temp_xyz_array = temp_xyz_array.reshape((temp_np_array_dict['rgb'].shape[0], temp_np_array_dict['rgb'].shape[1], 3))\n temp_np_array_dict['xyz'] = dataloader_obj.get_xyz_from_pcd(temp_pcd)\n temp_np_array_dict['depth'] = temp_np_array_dict['xyz'][:,:,2]\n dict_of_scenes1[pcd_file_name] = temp_np_array_dict\n temp_np_array_dict['label'] = np.zeros_like(temp_np_array_dict['xyz'][:,:,0])\n\n rgb_imgs1, xyz_imgs1, seg_masks1, label_imgs1, fg_masks1, file_names1 = get_segmented_masks_from_rgbd(dict_of_scenes1, training_weights_checkpoint_dir)\n visualize_predicted_and_true_segment_masks(rgb_imgs1, xyz_imgs1, seg_masks1, label_imgs1)\n\n segmented_pcds = get_segmented_point_clouds(seg_masks1[0], temp_np_array_dict['depth'])\n return segmented_pcds", "def sliceMesh(polyline,IKLE,MESHX,MESHY,tree=None):\n from matplotlib.tri import Triangulation\n xys = []\n douplets = []\n # ~~> Calculate the minimum mesh resolution\n dxy = math.sqrt(min(np.square(np.sum(np.fabs(MESHX[IKLE]-MESHX[np.roll(IKLE,1)]),axis=1)/3.0) + \\\n np.square(np.sum(np.fabs(MESHY[IKLE]-MESHY[np.roll(IKLE,1)]),axis=1)/3.0)))\n accuracy = np.power(10.0, -8+np.floor(np.log10(dxy)))\n\n xyo = np.array(polyline[0])\n for i in range(len(polyline)-1):\n xyi = np.array(polyline[i+1])\n dio = math.sqrt(sum(np.square(xyo-xyi)))\n\n # ~~> Resample the line to that minimum mesh resolution\n rsmpline = np.dstack((np.linspace(xyo[0],xyi[0],num=int(dio/dxy)),np.linspace(xyo[1],xyi[1],num=int(dio/dxy))))[0]\n nbpoints = len(rsmpline)\n nbneighs = min( 8,len(IKLE) )\n # ~~> Filter closest 8 elements (please create a good mesh) as a halo around the polyline\n halo = np.zeros((nbpoints,nbneighs),dtype=np.int)\n for i in range(nbpoints):\n d,e = tree.query(rsmpline[i],nbneighs)\n halo[i] = e\n halo = np.unique(halo)\n\n # ~~> Get the intersecting halo (on a smaller mesh connectivity)\n edges = Triangulation(MESHX,MESHY,IKLE[halo]).get_cpp_triangulation().get_edges()\n\n # ~~> Last filter, all nodes that are on the polyline\n olah = []\n nodes = np.unique(edges)\n for node in nodes: # TODO(jcp): replace by numpy calcs\n if getDistancePointToLine((MESHX[node],MESHY[node]),xyo,xyi) < accuracy: olah.append(node)\n ijsect = zip(olah,olah)\n xysect = [(MESHX[i],MESHY[i]) for i in olah]\n lmsect = [ (1.0,0.0) for i in range(len(ijsect)) ]\n mask = np.zeros((len(edges),2),dtype=bool)\n for i in olah:\n mask = np.logical_or( edges == i , mask )\n edges = np.compress(np.logical_not(np.any(mask,axis=1)),edges,axis=0)\n\n # ~~> Intersection with remaining edges\n for edge in edges:\n xyj = getSegmentIntersection( (MESHX[edge[0]],MESHY[edge[0]]),(MESHX[edge[1]],MESHY[edge[1]]),xyo,xyi )\n if xyj != []:\n ijsect.append(edge) # nodes from the mesh\n xysect.append(tuple(xyj[0])) # intersection (xo,yo)\n lmsect.append((xyj[1],1.0-xyj[1])) # weight along each each\n\n # ~~> Final sorting along keys x and y\n xysect = np.array(xysect, dtype=[('x', '<f4'), ('y', '<f4')])\n xysort = np.argsort(xysect, order=('x','y'))\n\n # ~~> Move on to next point\n for i in xysort:\n xys.append( xysect[i] )\n douplets.append( (ijsect[i],lmsect[i]) )\n xyo = xyi\n\n return xys,douplets", "def split_iters(iter_ranges, n_threads = None):\n\n\n if n_threads is None:\n n_threads = cpu_count()\n \n counts = [safediv(r[1] - r[0], r[2]) for r in iter_ranges]\n # largest_dim = np.max(counts)\n total_count = float(np.sum(counts))\n split_factors = [ (c / total_count) ** 2 for c in counts ]\n if len(counts) > 2:\n # kludgy heuristic\n # if you're reading across multiple dimensions\n # assume there might be reuse of data read in \n # and try to split up work so it fits into cache \n expected_bytes = 8 \n for dim in counts:\n expected_bytes *= dim\n expected_kb = expected_bytes / 1024\n l2_cache_size = 8192\n n_pieces = max(n_threads, expected_kb / l2_cache_size)\n else: \n n_pieces = 2*n_threads \n \n # initialize work_items with an empty single range \n work_items = [[]]\n for (dim_idx,dim_count) in enumerate(counts):\n\n dim_start, _, dim_step = iter_ranges[dim_idx]\n n_dim_pieces = int(math.ceil(split_factors[dim_idx] * n_pieces))\n dim_factor = float(dim_count) / n_dim_pieces\n \n old_work_items = [p for p in work_items]\n work_items = []\n for i in xrange(n_dim_pieces):\n # copy all the var ranges, after which we'll modifying \n # the biggest dimension \n\n start = dim_start + int(math.floor(dim_step * dim_factor * i))\n stop = dim_start + int(math.floor(dim_step * dim_factor * (i+1)))\n \n dim_work_item = (start,stop,dim_step)\n for old_work_item in old_work_items:\n new_work_item = [r for r in old_work_item]\n new_work_item.append(dim_work_item) \n work_items.append(new_work_item)\n\n return work_items", "def depth_rendering(ref_view, disparity_map, lf_size = (64, 512, 512, 3)):\n lf_one_way = int(math.floor(math.sqrt(lf_size[0])))\n\n x_indices = np.arange(lf_size[1])\n y_indices = np.arange(lf_size[2])\n b_indices = np.arange(lf_size[0])\n\n #Create a grid of size lf_size[:3] consisting of the pixel co ordinates of each image\n _, x, y = np.meshgrid(b_indices, x_indices, y_indices, indexing= 'ij')\n\n # Create a grid of size (lf_size[0], 2) consiting of the row, col lf positions\n grid = np.meshgrid(np.arange(lf_one_way), np.arange(lf_one_way), indexing= 'ij')\n stacked = np.stack(grid, 2)\n positions = stacked.reshape(-1, 2)\n\n # Compute the distance from each lf position from the reference view\n # Repeat the elements of this to match the size of the disparity map\n ref_pos = np.array(\n [lf_one_way // 2, lf_one_way // 2])\n distance = (np.tile(ref_pos, (lf_size[0], 1)) - positions).T\n dis_repeated = np.repeat(distance, lf_size[1] * lf_size[2], axis = 1)\n dis_repeated = dis_repeated.reshape(2, lf_size[0], lf_size[1], lf_size[2])\n\n\n # Tile the disparity map so that there is one for each lf_position - lf_size[0]\n tiled_map = np.tile(disparity_map, (lf_size[0], 1, 1))\n\n # Compute the shifted pixels\n x_shifted = (x.astype(np.float32) - tiled_map * dis_repeated[0]).flatten()\n y_shifted = (y.astype(np.float32) - tiled_map * dis_repeated[1]).flatten()\n\n #indices for linear interpolation in a square around the central point\n x_low = np.around(x_shifted).astype(int)\n #x_high = x_low + 1\n\n y_low = np.around(y_shifted).astype(int)\n #y_high = y_low + 1\n\n #Place co-ordinates outside the image back into the image\n x_low_clip = np.clip(x_low, 0, ref_view.shape[0] - 1)\n #x_high_clip = np.clip(x_high, 0, ref_view.shape[0] - 1)\n y_low_clip = np.clip(y_low, 0, ref_view.shape[1] - 1)\n #y_high_clip = np.clip(y_high, 0, ref_view.shape[1] - 1)\n\n #Gather the interpolation points\n interp_pts_1 = np.stack((x_low_clip, y_low_clip))\n #interp_pts_2 = np.stack((x_low_clip, y_high_clip))\n #interp_pts_3 = np.stack((x_high_clip, y_low_clip))\n #interp_pts_4 = np.stack((x_high_clip, y_high_clip))\n\n #Index into the images\n desired_shape = lf_size\n res_1 = torch_big_sample(ref_view, interp_pts_1, desired_shape)\n return res_1\n res_2 = torch_big_sample(ref_view, interp_pts_2, desired_shape)\n res_3 = torch_big_sample(ref_view, interp_pts_3, desired_shape)\n res_4 = torch_big_sample(ref_view, interp_pts_4, desired_shape)\n\n #Compute interpolation weights\n x_low_f = x_low.astype(np.float32)\n d_x_low = 1.0 - (x_shifted.astype(np.float32) - x_low_f)\n d_x_high = 1.0 - d_x_low\n y_low_f = y_low.astype(np.float32)\n d_y_low = 1.0 - (y_shifted.astype(np.float32) - y_low_f)\n d_y_high = 1.0 - d_y_low\n\n w1 = torch.from_numpy(d_x_low * d_y_low)\n w2 = torch.from_numpy(d_x_low * d_y_high)\n w3 = torch.from_numpy(d_x_high * d_y_low)\n w4 = torch.from_numpy(d_x_high * d_y_high)\n\n #THEY AGREE AT THIS POINT\n weighted_1 = torch.mul(repeat_weights(w1, desired_shape), res_1)\n weighted_2 = torch.mul(repeat_weights(w2, desired_shape), res_2)\n weighted_3 = torch.mul(repeat_weights(w3, desired_shape), res_3)\n weighted_4 = torch.mul(repeat_weights(w4, desired_shape), res_4)\n\n novel_view = torch.add(torch.add(weighted_1, weighted_2), weighted_3)\n torch.add(novel_view, weighted_4, out=novel_view)\n return novel_view", "def get_rois4plotting(params, sub_id = None, pysub = 'hcp_999999', use_atlas = True, atlas_pth = '', space = 'fsLR_den-170k'): \n\n if use_atlas:\n # Get Glasser atlas\n atlas_df, atlas_array = create_glasser_df(atlas_pth)\n \n if sub_id:\n \n # if single id provided, put in list\n if isinstance(sub_id, str) or isinstance(sub_id, int):\n sub_id = [sub_id]\n\n sub_id_list = ['sub-{sj}'.format(sj = str(pp).zfill(3)) if 'sub-' not in str(pp) else str(pp) for pp in sub_id]\n\n ## start empty dictionaries \n ROIs = {}\n color_codes = {}\n roi_verts = {}\n\n # loop over participant list\n for pp in sub_id_list:\n\n print('Getting ROIs for participants %s'%pp)\n\n if use_atlas:\n print('Using Glasser ROIs')\n # ROI names\n ROIs[pp] = list(params['plotting']['ROIs']['glasser_atlas'].keys())\n\n # colors\n color_codes[pp] = {key: params['plotting']['ROIs']['glasser_atlas'][key]['color'] for key in ROIs[pp]}\n\n # get vertices for ROI\n roi_verts[pp] = {}\n for _,key in enumerate(ROIs[pp]):\n print(key)\n roi_verts[pp][key] = np.hstack((np.where(atlas_array == ind)[0] for ind in atlas_df[atlas_df['ROI'].isin(params['plotting']['ROIs']['glasser_atlas'][key]['ROI'])]['index'].values))\n\n else:\n ## check if dict or str\n if isinstance(pysub, dict):\n pysub_pp = pysub[pp]\n else:\n pysub_pp = pysub\n\n # set ROI names\n ROIs[pp] = params['plotting']['ROIs'][space]\n\n # dictionary with one specific color per group - similar to fig3 colors\n color_codes[pp] = {key: params['plotting']['ROI_pal'][key] for key in ROIs[pp]}\n\n # get vertices for ROI\n roi_verts[pp] = {}\n for _,val in enumerate(ROIs[pp]):\n print(val)\n roi_verts[pp][val] = cortex.get_roi_verts(pysub_pp,val)[val]\n \n else:\n raise NameError('No subject ID provided')\n \n return ROIs, roi_verts, color_codes" ]
[ "0.60255563", "0.5919896", "0.58811724", "0.5863269", "0.585582", "0.58138037", "0.57283646", "0.56544244", "0.5653726", "0.56497926", "0.5631293", "0.55874455", "0.5561426", "0.5546333", "0.55354935", "0.55143887", "0.5499627", "0.54738224", "0.5452872", "0.54172575", "0.54149383", "0.53868145", "0.53789157", "0.5355338", "0.5341189", "0.53384954", "0.5338029", "0.5336821", "0.53366953", "0.5329026", "0.53280586", "0.5327341", "0.5325644", "0.53133357", "0.52916044", "0.52830064", "0.5265453", "0.5265397", "0.5263072", "0.52462894", "0.52453905", "0.5233233", "0.52323014", "0.52191335", "0.52137077", "0.5213234", "0.52110255", "0.5209145", "0.52039766", "0.5169014", "0.51623344", "0.5155303", "0.51536626", "0.5152846", "0.5148532", "0.5145279", "0.5133123", "0.5132907", "0.5132715", "0.5125976", "0.5122974", "0.5120039", "0.5119765", "0.5105356", "0.5103192", "0.5091539", "0.5084962", "0.5084139", "0.5082736", "0.508188", "0.50779736", "0.50714517", "0.5069483", "0.5063663", "0.5056069", "0.5056005", "0.50530946", "0.50452834", "0.50448984", "0.5042799", "0.50360715", "0.50335884", "0.50313", "0.5029873", "0.50182265", "0.5018117", "0.50168073", "0.50086", "0.50035226", "0.50032544", "0.50009036", "0.49920228", "0.49916708", "0.49915898", "0.49865103", "0.4980164", "0.4977658", "0.49776182", "0.4977065", "0.49734145" ]
0.6117562
0
Function that raise exception of the public method area
def area(self): raise Exception("area() is not implemented")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\r\n raise self", "def __call__(self):\r\n raise self", "def throw(self):\n pass", "def exception(self, *args, **kwargs):", "def unexpectedException(self):", "def raise_(err):\n raise err", "def exception(self):\n raise Exception(\"Exception test\")", "def raise_error(Err):\n raise Err()", "def throw_method(type, value=None, traceback=None): # pylint: disable=redefined-builtin\n raise type, value, traceback", "def unexpected_error(self, exception):", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def exception(self, e):\n pass", "def rescue(self, instance):\n pass", "def exception(self, *args, **kwargs):\n return super(Blueprint, self).exception(*args, **kwargs)", "def user_exception(self, frame, exc_info):\n pass", "def get_exception():\n raise Exception(\"example\")", "def raise_fail(*args, **kwargs):\n raise Exception(\"oops\")", "def _raise_performing_request_error(self, *args, **kwargs):", "def _RaiseFatal(cls, sub, subargs, errorcode, *args):\n ScriptForge.InvokeSimpleScript('ScriptForge.SF_Utils._EnterFunction', sub, subargs)\n cls.RaiseFatal(errorcode, *args)\n raise RuntimeError(\"The execution of the method '\" + sub.split('.')[-1] + \"' failed. Execution stops.\")", "def raise_exception(method):\n\n def raise_ex(*args, **kw):\n try:\n # Try running the method\n result = method(*args, **kw)\n # raise the exception and print the stack trace\n except Exception as error:\n l.error(\n \"Some error occured in the function {}. With Error: {}\".format(\n method.__name__, error\n )\n ) # this is optional but sometime the raise does not work\n raise # this raises the error with stack backtrack\n return result\n\n return raise_ex # here the memberfunction timed will be called\n\n # This function works as a decorator to raise errors if python interpretor does not do that automatically", "def assertion_failed(self, func, exception):", "def assertion_errored(self, func, exception):", "def throw(self, type, value=None, traceback=None):\n pass", "def error(self, *args, **kwargs):", "def fatal(self, *args, **kwargs):", "def error(self):\n ...", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def __call__(self,action=None):\n raise NYI", "def throw(error: Exception) -> None:\n raise error", "def py_raise(*xs):\n raise NotImplemented", "def sample_exception_function(self, a, b):\r\n raise Exception(\"An error has occurred.\")", "def WrappedException(self) -> object:", "def raise_exc(self, exctype):\n\t\t_async_raise(self._get_my_tid(), exctype)", "def exception_handler(self, exception):\n pass", "def 报错(自身, func):\n 自身.错误处理 = func\n return func", "def _raise_ex(fn):\n\n def _decorated(*args, **kwargs):\n v = fn(*args, **kwargs)\n if isinstance(v, Exception): raise v\n return v\n\n return _decorated", "def on_exception(self):\n pass", "def callback_exception(*args, **kwargs):\n raise DemoCallbackException()", "def _raise_http_error(self, *args, **kwargs):", "def does_not_raise(self, function, *args, **kwargs):\n try:\n return function(*args, **kwargs)\n except Exception as e:\n self.log_error(\"{} did raise {}: {}\".format(\n function.__name__,\n type(e).__name__, e\n ), None)", "def _check_exc(self):\n if self._exc is not None:\n raise self._exc", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def __call__(self, *args, **kwargs):\r\n return self.error(*args, **kwargs)", "def _reraise(*args, **keys):\n return True", "def __call__(self, function: BaseException):\n self._add_attr(function)\n return function", "def bad(self):\n raise NotImplementedError", "def bad(self):\n raise NotImplementedError", "def raise_error(cls, *args):\n raise cls(cls.message)", "def error(self):\n pass", "def exsafe(func):\n error_msg_template=\"{{}} executing function '{}':\".format(func.__name__)\n @func_utils.getargsfrom(func,hide_outer_obj=True) # PyQt slots don't work well with bound methods\n def safe_func(*args, **kwargs):\n with exint(error_msg_template=error_msg_template):\n return func(*args,**kwargs)\n return safe_func", "def fail(self, msg=None):\n raise Exception, msg", "def __throw_exception(self, text):\n raise Exception(text)", "def test_call(self):\r\n with self.assertRaises(NotImplementedError):\r\n self.est1(1)", "def __call__(self, *args, **kwargs):\n if isinstance(self._exp, type) and issubclass(self._exp, Exception):\n with pytest.raises(self._exp):\n self._f(*args, **kwargs)\n else:\n assert self._exp == self._f(*args, **kwargs)", "def raise_500():\n raise ValueError('Foo!')", "def test_call_with_exception(self):\n eclipse_name='p_func'\n def called_from_eclipse(arguments):\n a=a +1 \n return SUCCEED\n addPythonFunction(eclipse_name,called_from_eclipse)\n my_var=Var()\n Compound('call_python_function',Atom(eclipse_name),[1,my_var]).post_goal()\n with self.assertRaises(UnboundLocalError) as exp:\n resume()", "def handle_execution_exception(self, ex):\n raise(ex)", "def __nonzero__ ( self ) :\n raise AbstractMethodException( self , \"__nonzero__\" )", "def unprotected_method():\n return {\"message\": \"Anyone access this function\"}", "def raise_exception(request):\n raise Exception(\"Let's test error handling\")", "def test_exception(self) -> None:\n raise Exception(self.text)", "def create_exception(self, msg: str):", "def WrapNonExceptionThrows(self) -> bool:", "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def test_area_methodwithargthrowerror(self):\n s3 = Square(3, 1, 3)\n with self.assertRaises(TypeError) as e:\n s3.area(9)\n self.assertEqual(str(e.exception),\n \"area() takes 1 positional argument but 2 were given\")", "def _Raise(self, t):\n self.RaiseError(t, \"Exception raising not supported\")", "def testRaisesException(self):\n\t\tx = BaseAction('x')\n\t\tx.throws = Exception()\n\t\tself.failUnlessRaises(Exception, x.playback)", "def _exception_dispatcher(self, e):\n # TODO Currently not doing anything\n raise e", "def test_method_error(self, example_staypoints):\n method = \"unknown\"\n error_msg = f\"method '{method}' is unknown. Supported values are ['dbscan'].\"\n with pytest.raises(AttributeError) as e:\n example_staypoints.as_staypoints.generate_locations(method=\"unknown\")\n assert error_msg == str(e.value)", "def test_area_no_args(self):\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)", "def _fail(raise_):\n if raise_:\n raise _UnexpectedForm()\n return None", "def patch_raise_exception() -> None:\n raise TrestleError('Forced raising of an errors')", "def traceback(self):", "def _notYetImplemented(self, val=None):\n raise VimbaException(-1001)", "def throwException(self):\n raise se.cygni.texasholdem.game.exception.NoRoomSpecifiedException(message)", "def __init__(self):\n raise Exception('TODO IMPLEMENT ME !')", "def raise_exc(self, exctype):\n _async_raise(self._get_my_tid(), exctype)", "def raise_exc(self, exctype):\n _async_raise(self._get_my_tid(), exctype)", "def testBrokenMethod(self):\n h = self.getHandler()\n rq = '''{\"jsonrpc\":\"2.0\", \"method\":\"brokenMethod\", \"id\":\"1\"}'''\n messages, dummy = h.parse_body(rq)\n msg = messages[0]\n h.handle_message(msg)\n self.assertEqual(str(msg.error), \"Error executing service method\")\n self.assertEqual(\n repr(msg.error), 'InternalError(\"Error executing service method\")')\n self.assertTrue(isinstance(msg.error, InternalError))", "def invalid(self):\n pass", "def error(self, message=None, show_help=True):", "def _on_exception(self, exception):\n pass", "def whenException(self, channel, call):", "def testExceptionRaisedByFunctions(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g.h(3, 4)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\tself.failUnlessRaises(Exception, x.g.h, 3, 4)", "def __raise_clean_exception(exc_type, exc_value, exc_traceback):\n if exc_type.__name__ not in dir(napalm.exceptions) and \\\n exc_type.__name__ not in __builtins__.keys():\n epilog = (\"NAPALM didn't catch this exception. Please, fill a bugfix on \"\n \"https://github.com/napalm-automation/napalm/issues\\n\"\n \"Don't forget to include this traceback.\")\n print(epilog)\n raise exc_type, exc_value, exc_traceback", "def rethrow(self):\n if self._exception:\n raise self._exception", "def error(msg):\n\n raise Exception(msg)", "def __call__(self):\n raise NotImplementedError()", "def __call__(self):\r\n raise NotImplementedError('override me')", "def error(self, msg, *args, **kwargs):\n pass", "def indicate_error(self):\n pass", "def die(self, msg=None):\r\n raise Exception(msg)", "def test_class_errored(self, cls, exception):", "def e(msg):\n raise Exception(repr(msg))", "def test_should_not_be_callable(self):\n with self.assertRaises(NotImplementedError):\n BaseValidator()(\"any-value\")", "def ERR(self):", "def test_call_parent_get_raise_exception(self):\r\n twitter = client.TwitterClient(mock_logger)\r\n with pytest.raises(NotImplementedError) as e:\r\n twitter.get(30)\r\n assert str(e.value) == \"Child class should implement this\"", "def _broken_ep(ep, exc, *args, **kwargs):\n import logging\n logger = logging.getLogger('yatsm')\n logger.critical('Trying to import \"{0.name}\" algorithm entry point '\n 'raised a {1}'.format(ep, exc))\n raise exc", "def test_unavailable(self):\n feature_guard = _make_requires(False, \"Error text\")\n\n @feature_guard\n def inner(): # pragma: nocover\n pytest.fail(\"Should not be called\")\n\n with pytest.raises(NotImplementedError) as e:\n inner()\n\n assert \"Error text\" in str(e.value)", "def __exit__(self, _exc_type, _exc_val, _exc_tb):\n raise NotImplementedError", "def on_failure(self, exc: BaseException) -> None:" ]
[ "0.7519949", "0.7519949", "0.7479147", "0.7351615", "0.72099924", "0.69999564", "0.69876474", "0.69554514", "0.66960365", "0.6632302", "0.6583972", "0.6538573", "0.65098816", "0.65000296", "0.6460797", "0.64351404", "0.64302117", "0.64289075", "0.6414966", "0.6404366", "0.63970834", "0.6373459", "0.63111615", "0.6301588", "0.6251948", "0.6245026", "0.6236364", "0.62296915", "0.6193575", "0.61888754", "0.61781096", "0.61770105", "0.617496", "0.6150274", "0.6145215", "0.61379373", "0.61365336", "0.61109465", "0.6075298", "0.6071282", "0.6066042", "0.60610574", "0.6052629", "0.6032594", "0.6024068", "0.6023236", "0.6023236", "0.60127676", "0.60126346", "0.5997205", "0.5987654", "0.5977881", "0.5958727", "0.595124", "0.59426695", "0.59362644", "0.5936056", "0.59342676", "0.59083825", "0.58978224", "0.5874116", "0.5860902", "0.5857582", "0.5855343", "0.58494914", "0.58133966", "0.58112025", "0.5810941", "0.5809309", "0.58092594", "0.5809158", "0.5790886", "0.57648295", "0.5763456", "0.57540256", "0.5752359", "0.57477564", "0.57477564", "0.5745962", "0.5740291", "0.5737704", "0.57350034", "0.57304907", "0.57229924", "0.57185084", "0.57166886", "0.57064295", "0.5697148", "0.5690072", "0.56898177", "0.5688025", "0.5685586", "0.56768405", "0.56679845", "0.56528944", "0.5641365", "0.5639595", "0.5638526", "0.56327873", "0.5624499", "0.5621009" ]
0.0
-1
Function that integer validator to a name and value
def integer_validator(self, name, value): self.name = name self.value = value if type(value) is not int: raise TypeError("{} must be an integer".format(name)) if value <= 0: raise ValueError("{} must be greater than 0".format(name)) self.value = value self.name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(name + \" must be an integer\")\n elif value <= 0 and name not in (\"x\", \"y\"):\n raise ValueError(name + \" must be > 0\")\n elif value < 0 and name in (\"x\", \"y\"):\n raise ValueError(name + \" must be >= 0\")", "def integer_validator(self, name, value):\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be greater than 0\".format(name))", "def integer_validator(self, name, value):\n if type(value) is not int:\n raise(TypeError(\"{} must be an integer\".format(name)))\n if value <= 0:\n raise(ValueError(\"{} must be greater than 0\".format(name)))", "def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be greater than 0\".format(name))", "def integer_validator(self, name, value):\n if not issubclass(int, type(value)) or not isinstance(value, int):\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be greater than 0\".format(name))", "def validate_int(self, name, number):\n if type(number) is not int:\n raise TypeError(\"{} must be an integer\".format(name))", "def validate_int(self, name, number):\n if type(number) is not int:\n raise TypeError(\"{} must be an integer\".format(name))", "def _validate_input_integer(display_name, value):\n\n if isinstance(value, int) is False:\n raise ValueError(display_name + \" must be integer.\")", "def checkint(name, val, mn=None, mx=None):\n try:\n\tif val[0:2] == '0x' or val[0:2] == '0X':\n\t x = string.atoi(val, 16)\n\telif val[0:0] == '0':\n\t x = string.atoi(val, 8)\n\telse:\n\t # allow commas as long as they are properly spaced\n\t x = string.split(val, \",\")\n\t if len(x) > 1:\n\t\tfor e in x[1:]:\n\t\t if len(e) != 3:\n\t\t\traise ValidationError, \\\n\t\t\t '%s is not a valid integer' % val\n\t\tif len(x[0]) < 1 or len(x[0]) > 3:\n\t\t raise ValidationError, \\\n\t\t\t '%s is not a valid integer' % val\n\t\tval = re.sub(\",\", \"\", val)\n\t x = string.atoi(val)\n\tif ((mn is not None and x < mn) or\n\t (mx is not None and x > mx)):\n\t\traise ValidationError, \\\n\t\t 'parameter \"%s\", value \"%s\" is out of range' % \\\n\t\t (name, val)\n\treturn\n except ValueError:\n\traise ValidationError, '%s is not a valid integer' % val", "def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check", "def __integer(value, name=\"\", internal=False):\n if value is None:\n __ex(\"The %s is missing.\" % name, internal)\n if value == \"\":\n __ex(\"The %s must not be empty.\" % name, internal)\n try:\n value = int(value)\n except ValueError:\n __ex(\"The %s must be an integer.\" % name, internal)\n return int(value)", "def _validateInt(dErrors, sName, sValue, iMin = 0, iMax = 0x7ffffffe, aoNilValues = tuple([-1, None, ''])):\n (sValue, sError) = ModelDataBase.validateInt(sValue, iMin, iMax, aoNilValues, fAllowNull = True);\n if sError is not None:\n dErrors[sName] = sError;\n return sValue;", "def intvalue(value, name=\"\", positive=True, zero=False, negative=False):\n value = __integer(value, \"%s value\" % name, False)\n if not positive:\n if value > 0:\n __ex(\"The %s value must not be positive.\" % name, False)\n if not zero:\n if value == 0:\n __ex(\"The %s value must not be zero.\" % name, False)\n if not negative:\n if value < 0:\n __ex(\"The %s value must not be negative.\" % name, False)", "def data_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(name))\n if name == \"height\" or name == \"width\":\n if value <= 0:\n raise ValueError(\"{} must be > 0\".format(name))\n else:\n if value < 0:\n raise ValueError(\"{} must be >= 0\".format(name))", "def _int_validator(arg):\n if arg is None or type(arg) != int:\n raise ValueError('Incorrect value: input should be an int')", "def check_for_int(check):", "def check_int_in(possible_values: List[int]) -> Validator[int]:\n\n def validator(var_name: str, val: object) -> int:\n n = check_int(var_name, val)\n if n not in possible_values:\n raise ValidationError(_(\"Invalid {var_name}\").format(var_name=var_name))\n return n\n\n return validator", "def _validateIntNN(dErrors, sName, sValue, iMin = 0, iMax = 0x7ffffffe, aoNilValues = tuple([-1, None, ''])):\n (sValue, sError) = ModelDataBase.validateInt(sValue, iMin, iMax, aoNilValues, fAllowNull = False);\n if sError is not None:\n dErrors[sName] = sError;\n return sValue;", "def clean_value(self, value):\n try:\n return int(value)\n except ValueError:\n raise ValidationError('\"%s\" is not an integer' % value)", "def validate_input(**kwargs):\n for name, value in kwargs.items():\n if name == \"x\" or name == \"y\":\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n elif value < 0:\n raise ValueError(\"{} must be >= 0\".format(name))\n else:\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n elif value <= 0:\n raise ValueError(\"{} must be > 0\".format(name))", "def is_valid_input(value):\n if value is None:\n return None\n\n try:\n value = int(value)\n except ValueError:\n return None\n\n if 1 <= value <= 5:\n return value\n else:\n return None", "def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):\n if not isinstance(description, str):\n raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n\n if not isinstance(inputvalue, (int, long)):\n raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))\n\n if not isinstance(minvalue, (int, long, type(None))):\n raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))\n\n if not isinstance(maxvalue, (int, long, type(None))):\n raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))\n\n _checkNumerical(inputvalue, minvalue, maxvalue, description)", "def validate_input_type(value):\n try:\n int(value)\n except ValueError:\n raise NotIntegerError()", "def check_list_value(self, name, list_int):\n while True:\n numb = input(f\"-- {name} : \"\n f\"Entrez une de ces valeurs : {list_int} : \")\n try:\n check = int(numb)\n if check in list_int or check == 99:\n break\n except ValueError:\n pass\n return check", "def validate_positive_integer(\n value: Any, none_allowed: bool, display_name: str\n) -> None:\n if none_allowed and value is None:\n return\n\n if not isinstance(value, int):\n raise TypeError(f\"{display_name} must be a positive integer\")\n if value <= 0:\n raise ValueError(f\"{display_name} must be a positive integer\")", "def check_value(self, value):", "def validate(self, name, values):\r\n \r\n pass", "def gen_input_check(self, n):\r\n assert (\r\n isinstance(n, numbers.Number) and float(n).is_integer()\r\n ), \"Input must be an integer value.\"\r\n assert n >= 0, \"Input must be nonnegative\"\r\n\r\n return int(n)", "def __verify_integer_field(cls, plugin_instance, field_name, field_value):\n\n if not isinstance(field_value, int):\n raise BadPluginError(\n class_name=type(plugin_instance).__name__, field_name=field_name\n )", "def _validate_db_int(**kwargs):\n max_int = (2 ** 31) - 1\n\n for param_key, param_value in kwargs.items():\n if param_value and param_value > max_int:\n msg = _(\"'%(param)s' value out of range, \"\n \"must not exceed %(max)d.\") % {\"param\": param_key,\n \"max\": max_int}\n raise exception.Invalid(msg)", "def numeric_input(input_value: str) -> int:\n try:\n input_value = int(input_value)\n except ValueError:\n pass\n if not isinstance(input_value, int):\n return ArgumentTypeError(\"Please specify number\")\n if input_value < 1 or input_value > 4:\n return ArgumentTypeError(\"Value should be in range from 1 to 4\")\n return input_value", "def validate(cls, value):\n if isinstance(value, (int, UUID)):\n value = {\"pk\": value}\n return cls(value)", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True", "def validate_puzzle_param(self, name):\n is_puzzle_parameter_valid = False\n while is_puzzle_parameter_valid is False:\n puzzle_parameter = self.ask_user_input(\"Enter a valid '\" + name + \"'\")\n if not puzzle_parameter.isdigit():\n print(\"Not a number, please try again\")\n elif 1 <= int(puzzle_parameter) <= 9:\n is_puzzle_parameter_valid = True\n self.current_response = puzzle_parameter\n else:\n print(\"Number is out of the valid range (1 to 9), please try again\")\n return is_puzzle_parameter_valid", "def validateInt(sValue, iMin = 0, iMax = 0x7ffffffe, aoNilValues = tuple([-1, None, '']), fAllowNull = True):\n if sValue in aoNilValues:\n if fAllowNull:\n return (None if sValue is None else aoNilValues[0], None);\n return (sValue, 'Mandatory.');\n\n try:\n if utils.isString(sValue):\n iValue = int(sValue, 0);\n else:\n iValue = int(sValue);\n except:\n return (sValue, 'Not an integer');\n\n if iValue in aoNilValues:\n return (aoNilValues[0], None if fAllowNull else 'Mandatory.');\n\n if iValue < iMin:\n return (iValue, 'Value too small (min %d)' % (iMin,));\n elif iValue > iMax:\n return (iValue, 'Value too high (max %d)' % (iMax,));\n return (iValue, None);", "def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)", "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False", "def _validate_scalar(obj):\n if not isinstance(obj, int):\n raise TypeError(\"scalar must be an integer\")", "def check_argument_int_greater_than_one(value: str) -> int:\n ivalue = int(value)\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(\"{} is an invalid positive int value\".format(value))\n return ivalue", "def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def validate(val, num1=0, num2=float('inf')):\n val = int(val)\n if not num1 <= val < num2:\n raise ArgumentTypeError(\"Value out of range: {}. \"\n \"Should be between {} and {}.\".format(val, num1, num2 - 1))\n return val", "def _check_value(self, value, name, check_function):\n if check_function is not None:\n is_good = check_function(value) #May raise an exception\n assert is_good in [0,1,True,False]\n if not is_good:\n raise ValueError(\"Invalid parameter value %r for parameter %s\" \\\n % (value, name))", "def get_int(num, name='Number'):\n try:\n num = int(num)\n except ValueError:\n raise ValueError('{} must be an integer'.format(name))\n\n return num", "def validate(self, instance, value):", "def validate(self, instance, value):", "def test_task_with_one_int_validation_parameter_validate_data(number, expected_value):\r\n\r\n assert algo.TaskWithOneIntValidationParameter.validate_data(number) == expected_value", "def value_error(number):\n try:\n nbr = int(number)\n except ValueError:\n print(\"You can't sum letters, please write a number\")\n verification = False\n else:\n verification = True\n return verification", "def _validate_integer(self, action_result, parameter, key, allow_zero=False):\n\n if parameter is not None:\n try:\n if not float(parameter).is_integer():\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_VALID_INT_MSG.format(param=key)), None\n\n parameter = int(parameter)\n except:\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_VALID_INT_MSG.format(param=key)), None\n\n if parameter < 0:\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_NON_NEG_INT_MSG.format(param=key)), None\n if not allow_zero and parameter == 0:\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_NON_NEG_NON_ZERO_INT_MSG.format(param=key)), None\n\n return phantom.APP_SUCCESS, parameter", "def checkInt(value):\n if int(value) == value:\n return int(value)\n else:\n return value", "def _validate_impl(self, value, name):\n raise NotImplementedError()", "def test_default_zero_fields_validate(self):\r\n it = self.IntegerTest()\r\n it.validate()", "def _validate(self, instance, value):", "def strict_integer_validator(cls, v: Any) -> int:\n if int(v) != Decimal(v):\n raise ValueError\n return int(v)", "def input_to_int(value):\n \n if value == \"1\" or value == \"2\" or value == \"3\" or value == \"4\" or value == \"5\" or value == \"6\":\n\n value = int(value)\n\n return value\n else:\n\n print(\"Your input was invalid. Please choose from one of the options next time.\")\n\n return False", "def test_min_int_string(self):\n val = DwcaValidator(yaml.load(self.yaml_value, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n # provide error on type mismatch\n document = {'code': 'vijf'}\n val.validate(document)\n self.assertEqual(val.errors,\n {'code': [\"value 'vijf' is not numeric\"]},\n msg=\"alert on datatype mismatch for min \"\n \"evaluation fails\")", "def _validate_param(name, value):\n\n # First things first -- check that we have a legal parameter name.\n try:\n validator = _legal_params[name]\n except KeyError:\n raise ViewVCException(\"An illegal parameter name was provided.\", \"400 Bad Request\")\n\n # Is there a validator? Is it a regex or a function? Validate if\n # we can, returning without incident on valid input.\n if validator is None:\n return\n elif hasattr(validator, \"match\"):\n if validator.match(value):\n return\n else:\n if validator(value):\n return\n\n # If we get here, the input value isn't valid.\n raise ViewVCException(\n 'An illegal value was provided for the \"%s\" parameter.' % (name), \"400 Bad Request\"\n )", "def test_creation_int():\n value = 1\n\n num_a = param.Integer(value=value)\n assert num_a.value == value", "def validate(self, value: Any, low: int, high: int) -> bool:\n pass", "def validate_integer(self, p_str):\n # p_str is str\n if re.search(r\"^[1-9]\\d*$\", p_str) or p_str == \"\":\n return True\n self.frame.bell() # alert wrong input\n return False", "def test_int_value_constraint():\n prop = \"Ingredient count\"\n c_value = IntValueConstraint(name=prop, value=3)\n mapped_c_value = c_value.to_dict()\n\n assert mapped_c_value[\"name\"] == prop\n assert mapped_c_value[\"type\"] == \"integer\"\n assert mapped_c_value[\"options\"][\"value\"] == 3", "def __call__(self, value):\n return self.validate(value)", "def __allowed_values_inccorrect_number(self):\n strTestName = 'Values of a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 1.4\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def validate_interval_name(name):\n msg = 'invalid interval name \"{}\"'.format(name)\n if name[0] not in ['+', '-']:\n raise ValueError(msg)\n if name[1] not in ['d', 'm', 'P', 'M', 'A']:\n raise ValueError(msg)\n try:\n int(name[2:])\n except ValueError:\n raise ValueError(msg)", "def pos_int_validator(arg):\n num = int(arg)\n if num > 0:\n return num\n else:\n raise argparse.ArgumentTypeError(\"{} - must be a positive number\".format(arg))", "def test_int_field():", "def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value", "def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)", "def Validate(self, win):\n\n txtCtrl = self.GetWindow()\n val = txtCtrl.GetValue()\n isValid = False\n if val.isdigit():\n digit = int(val)\n if digit >= self._min and digit <= self._max:\n isValid = True\n if not isValid:\n # Notify the user of the invalid value\n msg = \"Value must be between %d and %d\" % \\\n (self._min, self._max)\n wx.MessageBox(msg,\n \"Invalid Value\",\n style=wx.OK|wx.ICON_ERROR)\n return isValid", "def intrange(value, name=\"\", value_min=None, value_max=None, zero=False):\n value = __integer(value, \"%s value\" % name, False)\n if value_min is not None:\n value_min = __integer(value_min, \"minimal %s value\" % name, True)\n intvalue(value_min, name, True, True, True)\n if value_max is not None:\n value_max = __integer(value_max, \"maximal %s value\" % name, True)\n intvalue(value_max, name, True, True, True)\n if not zero:\n if value == 0:\n __ex(\"The %s value must not be zero.\" % name, False)\n if (value_min is not None) and (value_max is not None):\n if value_min > value_max:\n __ex(\"The maximal %s value must be greater than the minimal \"\n \"value.\" % name, False)\n if (value_min == value_max) and (value != value_min):\n __ex(\"The %s value can only be %s (depending on further range \"\n \"further range arguments).\" % (name, value_min), False)\n if (value < value_min) or (value > value_max):\n __ex(\"The %s value must be between %s and %s (depending on \"\n \"further range arguments).\" % (name, value_min, value_max),\n False)\n elif value_min is not None:\n if value < value_min:\n __ex(\"The %s value must not be less than %s.\" % (name, value_min),\n False)\n elif value_max is not None:\n if value > value_max:\n __ex(\"The %s value must not be greater than %s.\" %\n (name, value_max), False)", "def validateID(self, id : int) -> int:\n # If ID is a string, ensure it can be casted to an int before casting and returning.\n if type(id) == str:\n if not lib.stringTyping.isInt(id):\n raise TypeError(\"user ID must be either int or string of digits\")\n return int(id)\n # If ID is not a string, nor an int, throw an error.\n elif type(id) != int:\n raise TypeError(\"user ID must be either int or string of digits\")\n # ID must be an int, so return it.\n return id", "def input_validator(self, a_dict):\n for k, v in a_dict.items():\n if not isinstance(v, int):\n raise TypeError(\"{} must be an integer\".format(k))\n elif k == \"width\" and v < 1:\n raise ValueError(\"{} must be > 0\".format(k))\n elif k is \"height\" and v < 1:\n raise ValueError(\"{} must be > 0\".format(k))\n elif k is \"x\" and v < 0:\n raise ValueError(\"{} must be >= 0\".format(k))\n elif k is \"y\" and v < 0:\n raise ValueError(\"{} must be >= 0\".format(k))", "def check_name(value, key=\"player name\"):\n\n value = value.strip()\n if value.__class__.__name__ not in [\"str\", \"unicode\"]:\n raise OptionTypeError(key, \"text\")\n if len(value) < 2:\n raise OptionNameError()\n if value == [\"SERVER\"] or value == [\"SYSTEM\"]:\n raise OptionNameInvalidError()\n for key in value:\n if not key in string.printable:\n OptionNameCharacterError()\n if key in string.whitespace:\n if key != \" \":\n OptionNameCharacterError()\n try: int(value)\n except:pass\n else:\n OptionNameNumberError()", "def test_int_range_constraint_validation():\n\n # Test valid values OK\n minimum = 1\n maximum = 2\n IntRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n\n # Test minimum must be less than maximum\n minimum = 3\n maximum = 2\n try:\n RealRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n assert False, \"IntRangeConstraint should require that minimum be less than maximum\"\n except CitrinationClientError:\n pass\n\n # Test values must be castable to float\n minimum = {}\n maximum = 2\n try:\n c = IntRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n assert False, \"IntRangeConstraint should require that minimum and maximum be castable to integers\"\n except CitrinationClientError:\n pass", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def range_validator(value_str, args):\n \n assert len(args) == 5, \"Error: range_validator requires 5 arguments.\"\n a_type, lb, ub, allow_none, error_msg = args\n try:\n if allow_none and value_str == 'None':\n value = None\n else:\n value = a_type(value_str)\n except ValueError:\n raise InputException(error_msg + value_str)\n if (lb != None and value < lb) or (ub != None and value > ub):\n raise InputException(error_msg + value_str)\n return value", "def possible_int(arg):\n try:\n return int(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as an int, treating it as a string')\n return arg", "def _validate_integer(mapping: Mapping[str, Any],\n ref: str) -> Optional[SchemaError]:\n if 'minimum' in mapping and 'maximum' in mapping:\n minimum = mapping['minimum']\n maximum = mapping['maximum']\n\n if minimum > maximum:\n return SchemaError(\n message=\"minimum (== {}) > maximum (== {})\".format(\n minimum, maximum),\n ref=ref)\n\n excl_min = False if 'exclusive_minimum' not in mapping \\\n else bool(mapping['exclusive_minimum'])\n excl_max = False if 'exclusive_maximum' not in mapping \\\n else bool(mapping['exclusive_maximum'])\n\n if excl_min and excl_max:\n if minimum == maximum:\n return SchemaError(\n message=(\n \"minimum (== {}) == maximum and \"\n \"both are set to exclusive\").format(minimum),\n ref=ref)\n elif not excl_min and excl_max:\n if minimum == maximum:\n return SchemaError(\n message=(\n \"minimum (== {}) == maximum and \"\n \"maximum is set to exclusive\").format(minimum),\n ref=ref)\n elif excl_min and not excl_max:\n if minimum == maximum:\n return SchemaError(\n message=(\n \"minimum (== {}) == maximum and \"\n \"maximum is set to exclusive\").format(minimum),\n ref=ref)\n elif not excl_min and not excl_max:\n # If minimum == maximum it is ok to have\n # >= minimum and <= maximum as a constraint.\n pass\n else:\n raise AssertionError(\"Unexpected code path\")\n\n return None", "def validate(self, value):\r\n return value", "def valid_port(ctx, param, value):\n try:\n value = int(value)\n except ValueError:\n pass\n\n return value", "def clean_value(self):\n value = self.cleaned_data[\"value\"]\n m = value[-1].lower()\n if m in self.MULTIPLIERS:\n mp = self.MULTIPLIERS[m]\n value = value[:-1]\n else:\n mp = 1\n try:\n value = int(value)\n except ValueError:\n raise forms.ValidationError(_(\"Integer is required\"))\n return value * mp", "def validate(n = 5):", "def checkNumberInt(value):\n if value.isnumeric():\n return int(value)\n else:\n print(\"You did not enter the correct numbers!\")\n newNum = input(\"Please enter a number: \")\n return checkNumberInt(newNum)", "def is_valid(self, value):\r\n pass", "def _parse_positive_int_param(request, query_params, param_name):\n param = query_params.get(param_name)\n if not param:\n return None\n try:\n param = int(param)\n if param <= 0:\n raise ValueError()\n return param\n except ValueError:\n request.respond('query parameter \"%s\" must be integer > 0' % param_name,\n 'text/plain', 400)\n return -1", "def validate_input(function):\n\n def validator(min_factor, max_factor):\n if max_factor < min_factor:\n raise ValueError(\"Min factor must be smaller or equal as max factor\")\n if min_factor == max_factor:\n return None, []\n else:\n return function(min_factor, max_factor)\n\n return validator", "def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, int) and self._validate_value(value))\n else:\n return False", "def validate(self, val):\n try:\n return int(val)\n except:\n raise InvalidCoordinateValueError(\n (\n \"Error: Invalid coordinate values. \"\n \"Coordinates should be integer values\"\n )\n )", "def isEditName(id):\n for char in id:\n if re.compile('[0-9]+').match(char[0]) == None:\n print NameError(\"'%s' is not valid name. \\n Id should be numeric\" % (name))\n return -1\n return 0", "def sanitized_int_input(s: str) -> int:\n\n v = input(s)\n if is_convertible_to_int(v):\n return int(v)\n else:\n print(\"There was an error, please enter a number.\")\n return sanitized_int_input(s)", "def test_convertCharToInt_lower_value_5(self):\n self.assertRaises(ValueError, rules.convertCharToInt, '-5')", "def test_validate_min_value(self):\n\n test_values = [\n -5,\n 2,\n ]\n\n testrow = TestSchema()\n\n for value in test_values:\n testrow.int_min_field = value\n self.assertRaises(Exception, testrow.save)", "def validate_emprestimo_identifier(emprestimo_id: int):\n if not isinstance(emprestimo_id, int):\n raise InvalidFieldType(code=400)\n\n if emprestimo_id <= 0:\n raise InvalidFieldValue(code=400)\n\n return", "def isInt(form, field):\n\t\ttry:\n\t\t\tval = field.data.strip()\n\t\t\tif val:\n\t\t\t\tint(val)\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\traise ValidationError(\"Invalid integer provided\")", "def check_range(number: object, min_r: float, max_r: float, name: str = \"\") -> float:\n if not isinstance(number, (float, int)):\n raise FFmpegNormalizeError(f\"{name} must be an int or float\")\n if number < min_r or number > max_r:\n raise FFmpegNormalizeError(f\"{name} must be within [{min_r},{max_r}]\")\n return number", "def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )", "def check_enum(self, name, values):\n v = self.__dict__.get(name)\n if v not in values:\n raise ValueError(\n \"Invalid value: {0}='{1}', not in '{2}'\".format(name, v, values))", "def validate_count(ctx, param, value):\n if isinstance(value, str):\n try:\n value = value.replace(',', '')\n\n return int(value)\n except ValueError:\n ioc_common.logit({\n 'level': 'EXCEPTION',\n 'message': f'({value} is not a valid integer.'\n })\n else:\n return int(value)", "def validate_value(self, value):\n if value < 0:\n raise serializers.ValidationError(\"The value must be above 0.\")\n return value", "def name_to_number(name):\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n elif name == \"scissors\":\n number = 4\n else:\n print \"Name is invalid!\"\n return 1\n return number" ]
[ "0.8130921", "0.788335", "0.7841091", "0.7826758", "0.7700402", "0.7427867", "0.7427867", "0.7261612", "0.7167946", "0.70053536", "0.693558", "0.6867623", "0.67775255", "0.6776555", "0.67418826", "0.6563546", "0.65442383", "0.6446882", "0.6382053", "0.6289101", "0.626554", "0.6226404", "0.6196827", "0.6183374", "0.61630064", "0.6121859", "0.6090905", "0.607859", "0.60723066", "0.605181", "0.599275", "0.5974808", "0.5948449", "0.5945583", "0.59283805", "0.59176886", "0.5916138", "0.5869142", "0.5859331", "0.58451", "0.5840335", "0.58371425", "0.58285064", "0.5827764", "0.5827764", "0.5823384", "0.58121556", "0.58042973", "0.58037776", "0.5799079", "0.5792701", "0.57907397", "0.5789699", "0.5778848", "0.5777276", "0.5770234", "0.57608515", "0.5750745", "0.5748077", "0.5748029", "0.5741494", "0.5735134", "0.57342803", "0.57279027", "0.5726393", "0.57166016", "0.569181", "0.56901175", "0.568395", "0.5683802", "0.5678693", "0.5677354", "0.56755424", "0.56737524", "0.56737524", "0.56726885", "0.5671317", "0.56649524", "0.5663769", "0.56561935", "0.56486773", "0.5647024", "0.5642573", "0.56423736", "0.5635329", "0.5623961", "0.5622296", "0.56176025", "0.5610779", "0.56012636", "0.5595103", "0.5585712", "0.5582071", "0.5578781", "0.5575479", "0.55733883", "0.5549544", "0.55481577", "0.5547873", "0.55374223" ]
0.8029562
1
This function is called to check if a username / password combination is valid.
def check_auth(username, password): return username == USERNAME and password == PASSWORD
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw", "def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password", "def check_valid(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE username = %s\", (username,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if sha256_crypt.verify(password, credentials[1]):\n return True\n return False", "def check_auth(username, password):\n return username == 'aweber' and password == 'aweber1100'", "def check_auth(username, password):\n return username == 'nicholas' and password == ADMIN_PASS", "def check_auth(username, password):\n return username == 'admin' and password == 'worcester'", "def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password", "def check_auth(username, password):\n return username == 'admin' and password == 'pebble'", "def check_auth(username, password):\n return username == 'jeffkoons' and password == 'likesweirdbaloons'", "def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'", "def check_auth(username, password):\n return username == 'asimov' and password == 'tagada72'", "def check_auth(username, password):\n return username == 'sammy' and password == 'BasicPassword!'", "def is_valid_password(password, username):\n import string\n if len(password) < 4 or ' ' in password:\n return False\n if username:\n if string.lower(username) in string.lower(password):\n return False\n return True", "def check_auth_password(self, username, password):\n return AUTH_FAILED", "def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])", "def check_auth(username, password):\n return username == 'admin' and password == 'password'", "def _credentials_are_valid(self, username, password):\n LDAP_SERVER = 'ldap://xxx.xxx.xxx' # EDIT THIS\n LDAP_USERNAME = '%[email protected]' % username # EDIT THIS\n LDAP_PASSWORD = password\n\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n # perform a synchronous bind\n ldap_client.set_option(ldap.OPT_REFERRALS, 0)\n ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD)\n except ldap.INVALID_CREDENTIALS:\n ldap_client.unbind()\n # Wrong username or password\n return False\n except ldap.SERVER_DOWN:\n # AD server not available\n return False\n # all is well\n ldap_client.unbind()\n # Login successful\n return True", "def username_is_valid(username):\n\n if len(username) < MINIMUM_PASSWORD_LENGTH:\n return False\n else:\n return True", "def check_auth(username, password):\n return (username == app.config['USERNAME'] and\n password == app.config['PASSWORD'])", "def check_auth(username, password):\n return username == c.id and password == c.pw", "def check_auth(username, password):\n return username == 'admin' and password == 'root'", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def password_validator(username, password):\n digits = re.search(r'\\d+', password)\n capital_letters = re.search(r'[A-Z]+', password)\n lenght = len(password) > PASSWORD_MIN_LENGTH\n special_symbol = re.search(r'[\\-\\/\\@\\?\\!\\,\\.\\#\\&\\*]+', password)\n\n statement = digits and capital_letters and lenght and special_symbol\n\n if statement:\n return True\n return False", "def check_auth(username, password):\n return username == 'admin' and password == 'admin'", "def check_auth(username, password):\n ADMIN_USER = config.CONFIG_VARS['ADMIN_USER']\n ADMIN_PASS = config.CONFIG_VARS['ADMIN_PASS']\n return username == ADMIN_USER and password == ADMIN_PASS", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def check_credentials(username, password):\n if not validate_username(username) or not validate_password(password):\n return False\n sql = \"SELECT password \" \\\n \"FROM users \" \\\n \"WHERE username=:username AND is_active=TRUE\"\n result = db.session.execute(sql, {\"username\": username})\n user = result.fetchone()\n if user is None:\n return False\n password_hash = user[0]\n if check_password_hash(password_hash, password):\n return True\n return False", "def is_logged_in_user_valid(user_name, password):\n if user_name.upper() == \"HELLO\" and password == \"World\":\n return True # User input matches user name and password.\n else:\n return False # User input does not match user name and password.s", "def check_auth(username, password):\n\n config = get_app_configurations()\n\n with open(config[\"credentials\"], \"r\") as fh:\n u, p = fh.readline().rstrip().split(\",\")\n\n return username == u and password == p", "def verify_pw(username, password):\n credentials = HtpasswdFile(app.config[\"CREDENTIAL_FILE\"])\n if not credentials.check_password(username, password):\n logging.warning(\"%s tried to login with wrong password\", username)\n return False\n return True", "def check_auth(username, password):\n return password == os.getenv('PASSWORD')", "def validate_user(self, username, password, client, request, *args, **kwargs):\n log.debug('Validating username %r and its password', username)\n if self._usergetter is not None:\n user = self._usergetter(username, password, client, request, *args, **kwargs)\n if user:\n log.debug('Successfully validated username %r', username)\n request.user = user\n return True\n return False\n log.debug('Password credential authorization is disabled.')\n return False", "def _validate_password_works_with_username(password, username=None):\n if password == username:\n raise errors.AccountPasswordInvalid(accounts.PASSWORD_CANT_EQUAL_USERNAME_MSG) # lint-amnesty, pylint: disable=no-member", "def check_auth(username, password):\n return username == os.environ['USERNAME'] and password == os.environ['PASSWORD']", "def is_valid_login(self, username, password):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_LOGIN_INFORMATION_VALID, username + '|' + password)", "def authenticate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n user = self.username.data\n\n cur = get_cursor()\n if email_exists(cur, user):\n user = get_username(cur, user)\n\n if username_exists(cur, user):\n pw_hash = get_pw_hash(cur, user)\n\n if check_password(self.password.data, pw_hash):\n self.username.data = user\n return True\n\n return False", "def is_valid_user(self, username, password): # WORKS\n done1 = self.cur.execute(\"SELECT password FROM users WHERE username=\\\"{}\\\"\".format(username))\n done2 = self.cur.execute(\"SELECT username FROM admins WHERE username=\\\"{}\\\"\".format(username))\n if done1 == 0 and done2 == 0: # If both queries are unsuccessful, username doesn't exist in both tables.\n return False\n else:\n if done1 == 1: # If username exists in USERS table.\n self.cur.execute(\"SELECT password FROM users WHERE username=\\\"{}\\\"\".format(username))\n stored_password = self.cur.fetchone()[0]\n return check_password_hash(stored_password, password) # Returns True if the hashes match.\n else: # If username exists in ADMINS table.\n self.cur.execute(\"SELECT password FROM admins WHERE username=\\\"{}\\\"\".format(username))\n stored_password = self.cur.fetchone()[0]\n return check_password_hash(stored_password, password) # Returns True if the hashes match.", "def check_user(self,username, password):\n safe_input = (username, password)\n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=? AND Password=?\",safe_input).fetchone()\n if vals:\n logging.info('%s was authenticated', username)\n return True\n else:\n logging.info('Failed login for %s', username)\n return False", "def valid_login(username, password):\n db = get_db()\n db.ping(True)\n cur = db.cursor()\n\n try:\n sql = \"SELECT password FROM users WHERE user_name = '{}';\".format(username)\n cur.execute(sql)\n for i in cur:\n return check_password_hash(i[0], password)\n return False\n except mysql.connector.Error as err:\n flash(err, \"set\")\n return False\n finally:\n cur.close()\n db.close()", "def check_credentials(self, username, password):\n user = None\n if username != \"\":\n # Calling DB and fetching userdetails\n user = userdetails_API_query(username)\n print \"id \", user['_id']\n if user != None:\n #u = app.config['BASIC_AUTH_USERNAME'] = user['username']\n #pwd = app.config['BASIC_AUTH_PASSWORD'] = user['pw_hash']\n # print \" u & pwd\",username\n if user['username'] == username and check_password_hash(user['pw_hash'], password):\n g.user = user['_id'], username, user['email']\n return True\n print \"g.user\", g.user\n return False", "def test_credentials(self):\r\n data = self._deep_clean('[email protected]')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def check_auth(username, password):\n return username == 'daniel' and password == config['redis_auth_key']", "def validate_authentication(self, username, password, handler):\n hash = md5(password).hexdigest()\n msg = \"Authentication failed.\"\n if not self.has_user(username):\n if username == 'anonymous':\n msg = \"Anonymous access not allowed.\"\n raise AuthenticationFailed(msg)\n if username != 'anonymous':\n if self.user_table[username]['pwd'] != hash:\n raise AuthenticationFailed(msg)", "def is_correct_user(self, login, password):\n pass", "def check_auth(username, password):\n return username == get_env('UPLOAD_USER') and password == get_env('UPLOAD_PASSWORD')", "def verify_password(self, username, password):\n\n try:\n self.c.execute('SELECT password FROM profiles WHERE name=(?)', (username,))\n\n db_pw = self.c.fetchone()[0]\n print(password)\n\n return db_pw == password\n\n except TypeError:\n return False", "def check_credentials(username, password):\n\n return db.auth_user(username, password)", "def check_auth(_, http_password):\n return (password is not None) and (password == http_password)", "def check_user(self, username, password):\n user = [user for user in self.db if user['username'] == username]\n if user:\n if check_password_hash(user[0][\"password\"], password):\n return True\n return False\n return False", "def validate(self) -> bool:\n if not super().validate():\n return False\n\n # Does the user exist\n user = User.query.filter_by(username=self.username.data).first()\n if not user:\n self.username.errors.append('Invalid username or password')\n return False\n\n # Does given password match user's password\n if not user.check_password(self.password.data):\n self.username.errors.append('Invalid username or password')\n return False\n\n return True", "def check_credentials(username, password):\n\t\n\tconn = sqlite3.connect('db/user_task.db')\n\tcursor = conn.execute(\"SELECT password from user WHERE username == \\'%s\\'\" % (username))\n\tdata = cursor.fetchall()\n\tconn.close()\n\n\tif len(data) == 0:\n\t\treturn u\"Incorrect username\"\n\n\tfor row in data:\n\t\tencoded_password = hashlib.sha1(password.encode('utf-8')).hexdigest()\n\t\tif row[0] == encoded_password:\n\t\t\treturn None\n\n\treturn u\"Incorrect password\"\n\t\n\t# An example implementation which uses an ORM could be:\n\t# u = User.get(username)\n\t# if u is None:\n\t# return u\"Username %s is unknown to me.\" % username\n\t# if u.password != md5.new(password).hexdigest():\n\t# return u\"Incorrect password\"", "def check_auth(username, password):\n try:\n locust_username = os.environ['LOCUST_USER_NAME']\n locust_password = os.environ['LOCUST_PASSWORD']\n return username == locust_username and password == locust_password\n except:\n return True", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def _check_user_pass(self):\n if not self.username:\n self.username = input(' 请输入手机号:')\n if self.username.isdigit() and '+86' not in self.username:\n self.username = '+86' + self.username\n\n if not self.password:\n self.password = input(' 请输入密码:')", "def check_password(self, username, password): # tested\r\n conn = self.get_db()\r\n with conn:\r\n c = conn.cursor()\r\n sql = ('select password from gameuser where username=%s')\r\n c.execute(sql,(username,))\r\n hashedpass = md5.new(password).hexdigest()\r\n u = c.fetchone()\r\n if u == None:\r\n raise NoUserExistsException(username)\r\n # print 'database contains {}, entered password was {}'.format(u[0],hashedpass)\r\n return u[0] == hashedpass", "def valid_login_password(username, password, pw_hash):\n salt = pw_hash.split(',')[1]\n return pw_hash == make_pw_hash(username, password, salt)", "def check_auth(username, password):\n return username == 'admin' and password in app.config[\"CLAIM_SECRETS\"]", "def validate_password(self, field):\n username = self.username.data\n if platform.system().lower() == 'linux':\n g.logger.debug('use pam for authenticate.')\n from pam import authenticate\n if authenticate(username, field.data):\n g.logger.info('session opened for user %s.' % username)\n return username\n else:\n raise ValueError('Authentication failure.')\n return username", "def check_credentials_typo(credentials):\n regex_username = r'^[\\w\\.\\-]{2,}$'\n regex_password = r'[^.]{4,10}$'\n\n if not match(regex_username, credentials['username']):\n raise ValueError('invalid username typo')\n\n if not match(regex_password, credentials['password']):\n raise ValueError('invalid password typo')", "def username_and_password_check(username: str, password: str) -> bool:\n table = metadata.tables['users']\n s = select(\n [\n func.count(table.c.id)\n ]).where(tuple_(table.c['username'], table.c['password'])\n .in_([(username, password)]))\n result = conn.execute(s).scalar()\n return bool(result)", "def check_auth(username, password):\n user = User.query.filter(User.username == username).first()\n\n if user:\n return user.password == password\n else:\n return False", "def verify_password(username, password):\n if username in user_auth and check_password_hash(user_auth.get(username), password):\n return username", "def authentication_validation(username, password, access_token):\n if bool(username) is not bool(password):\n raise Exception(\"Basic authentication requires a username AND\" \" password.\")\n if (username and access_token) or (password and access_token):\n raise Exception(\n \"Cannot use both Basic Authentication and\"\n \" OAuth2.0. Please use only one authentication\"\n \" method.\"\n )", "def invalid_credentials( form , field ): \n\tusername_entered = form.username.data\n\tpassword_entered = field.data \n\tuser_object = User.query.filter_by(username = username_entered).first()\n\tif user_object is None : \n\t\traise ValidationError(\"Username or Password is incorrect !\")\n\telif not pbkdf2_sha256.verify(password_entered , user_object.password) : \n\t\traise ValidationError(\"Username or Password is incorrect !\")", "def is_password_valid(password):\n #TODO : This should also be handled by the front_end\n pass", "def check_password(username, password, htpasswd_fn):\n entries = parse_htpasswd(htpasswd_fn, username)\n if not entries.has_key(username):\n raise NoSuchUser('No user: %r' % username)\n return check_entry_password(\n username, password, entries[username])", "def auth(username, password):\n return username == password", "def _check_password(self, password):\n if self.password_regex.search(password) is not None:\n print(\"Correct password\")\n return True\n else:\n print(\"Wrong password\")\n return False", "def check_auth(username, password):\n return username == current_app.config['DOC_USERNAME'] and password == current_app.config['DOC_PASSWORD']", "def _check_username(self, username):\n if self.username_regex.search(username) is not None:\n print(\"Correct username\")\n return True\n else: \n print(\"Wrong username\")\n return False", "def check_auth_password(self, username, password):\n if username == self.username and password == self.password:\n return paramiko.AUTH_SUCCESSFUL\n return paramiko.AUTH_FAILED", "def check_credentials(input_password, real_password):\n return pwd_context.verify(input_password, real_password)", "def test_valid_username_invalid_password(self):\n response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': '1sfsdf'})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def authorise_login(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE password = %s\", (password,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if password != credentials[1]:\n return False\n return True", "def clean(self):\n cleaned_data = super().clean()\n username = cleaned_data['username']\n password = cleaned_data['password']\n\n if authenticate(username=username, password=password) is None:\n raise ValidationError('Your username or password is incorrect.')", "def _check_password(self, password):\n rule = re.compile(constant.password_regex)\n if not rule.match(password):\n return False\n # disallow password from azure guide, yes, it's hard code.\n disallowed = constant.password_disallowed\n return password not in disallowed", "def validate(username: str, password: str) -> dict:\n validation_check = {}\n \n # Check to see if the username was left blank\n if username.strip() == \"\":\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username cannot be left blank.\"\n\n # Check to see if the username is taken\n elif not sql.is_username_taken(username):\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username is incorrect\"\n\n # Check to see if the password was left blank\n if password.strip() == \"\":\n validation_check[\"success\"] = False\n validation_check[\"password\"] = \"Password cannot be left blank.\"\n\n\n if not validation_check.get(\"success\", True):\n return validation_check\n\n else:\n return sql.verify_credentials(username, password)", "def check_friend(self, username):\n if (self.isBlank(username) or self.isValidLen(username)):\n return False\n\n safe_input = (username,)\n \n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=?\" ,safe_input).fetchone()\n if vals:\n return True\n else:\n return False", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def check_password(self, password):\n return self.password == password", "def test_validate_credentials(self):\n pass", "def check_my_users(user):\n user_data = my_users.get(user['username'])\n if not user_data:\n return False # <--- invalid credentials\n elif user_data.get('password') == user['password']:\n return True # <--- user is logged in!\n\n return False # <--- invalid credentials", "def valid_login(username: str, password: str):\n\n try:\n LoginHandler.login(username, password)\n global validLogin\n validLogin = True\n except:\n validLogin = False", "def checkLogin(username, password):\n\n # Query database for username\n cur = initialise(2)\n cur.execute(\"SELECT * FROM users WHERE username = ?\", [username])\n users = cur.fetchall()\n\n # Ensure username exists and password is correct\n if len(users) != 1 or not check_password_hash(users[0][\"hash\"], password):\n return False, username\n else:\n return True, users[0][\"id\"]", "def validate_login(name,password):\n\t\n\t#Read the attendance excelsheet check if username and password matched\n\tdf_atten=pd.read_csv(\"datasrc/People.csv\")\n\t# 10006 ultbjxu\n\t\n\tif (df_atten.Username.astype(str).str.contains(name).any() and df_atten.Password.astype(str).str.contains(password).any()):\t\t\n\t\treturn True\n\telse: \n\t\treturn False", "def isUSER(username, password):\n usr = UserData[\"username\"].to_dict()\n listofUsers = list(UserData[\"username\"])\n if username in listofUsers:\n # print('Is a User')\n getindex = listofUsers.index(username)\n listofPasswords = list(UserData[\"password\"])\n # print(getindex)\n if listofPasswords[getindex] == password:\n # print('Password Correct')\n return True, True\n return True, False\n return False, False", "def _input(self):\n\t\tself.username = input(\"Username:\")\n\t\tself.password = input(\"password:\")\n\t\tret = re.match(r\"^\\w{1,20}$\", self.username)\n\t\tprint(\"------>input string\", self.username)\n\t\tprint(\"------>matched string\",ret)\t\n\t\ttime.sleep(1)\n\t\tif ret == None:\n\t\t\treturn True\n\t\telse:\n\t\t\tpass\n\n\t\tsql = f\"\"\"select name, password from users where name='{self.username}';\"\"\"\n\t\tprint(\"----------->\", sql)\n\t\ttime.sleep(1)\n\t\tcount = self.cs.execute(sql)\n\t\tprint(count)\n\t\tif (self.username, self.password) == self.cs.fetchone():\n\t\t\tprint(\"Login Successfully\")\n\t\t\ttime.sleep(1)\n\t\t\treturn False\n\t\telse:\n\t\t\tprint(\"Wrong username or password\")\n\t\t\ttime.sleep(1)\n\t\t\treturn True", "def is_valid_login(username, password):\n with open(PASSFILE, \"r\") as passfile:\n for record in passfile:\n try:\n valid_user, valid_password = False, False\n r_username, r_salt_hash = record.split()\n if username == r_username:\n valid_user = True\n if sha256_crypt.verify(password, r_salt_hash):\n valid_password = True\n if valid_user and valid_password:\n return True\n except ValueError:\n pass\n return False", "def check_auth(username, password):\n session.pop('username', None)\n session.pop('password', None)\n session['username'] = username\n session['password'] = password\n # Test if we can connect to a region\n connect_to_region()\n return True", "def check_credentials_validation(credentials):\n spec = {'_id': credentials['username'], 'password': credentials['password']}\n if not current_app.mongo.observer.users.find_one(spec):\n raise Unauthorized('invalid credentials')", "def username_check(username):\n\n try: \n pwd.getpwnam(username)\n print(\"User %s DOES EXIST. Try a different username.\" % (username)) \n return False\n\n except KeyError: \n print(\"User %s DOES NOT exist. Continuing...\" % (username)) \n return True", "def test_invalid_username_valid_password(self):\n response = self.client.post(reverse('users:login'), {'username': 'xyzabe', 'password': self.user['password1']})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def authenticate(username, password):\n test = User.load(username)\n test_password = test.password\n input_password = md5.new(\n password + config.get('security', 'salt')).digest()\n if input_password == test_password:\n return True\n else:\n return False", "def test_authenticate_invalid_username(self):\r\n print(\"Authenticate user invalid username\")\r\n username = \"test9999user\"\r\n password = \"password\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)", "def verify_pw(username, password):\n global password_store\n logger = logging.getLogger('verify_pw')\n if not password_store:\n logger.error(\"No password store specified\")\n return False\n logger.debug(\"Verifying password for %s\" % username)\n return password_store.verify(username, password)" ]
[ "0.82211864", "0.8146436", "0.7962432", "0.7885527", "0.78588223", "0.78382313", "0.78288996", "0.7815222", "0.7798579", "0.77856815", "0.77700555", "0.7760794", "0.7754975", "0.77548647", "0.7750886", "0.7749223", "0.77322245", "0.7715806", "0.7687129", "0.7685904", "0.7673684", "0.766307", "0.766307", "0.76620096", "0.76567376", "0.7652723", "0.765255", "0.7611541", "0.75952524", "0.7564781", "0.7557824", "0.7547461", "0.75408834", "0.75314397", "0.7506924", "0.75042623", "0.74691737", "0.7442317", "0.741009", "0.740618", "0.73972464", "0.7394501", "0.7389306", "0.73495054", "0.7336364", "0.7299509", "0.7276375", "0.7247304", "0.7230579", "0.7205749", "0.7199119", "0.71977973", "0.7193516", "0.71885014", "0.71854734", "0.71695465", "0.7169254", "0.7159132", "0.7140503", "0.7135009", "0.71116257", "0.7110569", "0.7108786", "0.709969", "0.7093436", "0.7031712", "0.7030077", "0.7026029", "0.7025823", "0.69905216", "0.6989299", "0.69882464", "0.69840413", "0.696945", "0.69658744", "0.69613117", "0.6940676", "0.6931497", "0.6914118", "0.6901826", "0.68954366", "0.68901163", "0.68864787", "0.6883074", "0.68827283", "0.68799317", "0.68769723", "0.68669415", "0.6864661", "0.68634623", "0.68553925", "0.6848534", "0.6846545", "0.6840525", "0.6836461", "0.6829205", "0.6827091", "0.682658", "0.68223697", "0.6813599" ]
0.7975914
2
Sends a 401 response that enables basic auth
def authenticate(): return Response( 'Could not verify your access level for that URL.\n' 'You have to login with proper credentials', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def authenticate():\n return Response(\n '', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate(self):\n abort(\n 401,\n description=self.exception,\n www_authenticate=(\"WWW-Authenticate\", 'Basic realm=\"%s\"' % __package__),\n )", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n\treturn Response(\n\t'Could not verify your access level for that URL.\\n'\n\t'You have to login with proper credentials', 401,\n\t{'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response('Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials',\n 401,\n {\n 'WWW-Authenticate': 'Basic realm=\"Login Required\"'\n }\n )", "def authenticate():\n return send_msg(\n 401,\n 'Must be connected',\n headers={'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(render_template('index.html', auth=False), 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return flask.Response('Login required.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def authenticate(self):\n return Response(\n 'Could not verify your access level for that URL.\\nYou have to login with proper credentials',\n 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with Web Manager credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def unauthorized():\n return HttpError(401)", "def basic_auth_error():\n logger.debug(\"Basic authentication failed.\")\n return unauthorized(\"Invalid credentials.\")", "def response_unauthorised():\n\n response = {\n 'status': 'failed',\n 'error': 'Not Authorised'\n }\n\n return response_json(response, status=401)", "def challenge(self, environ, status, app_headers=(), forget_headers=()):\n resp = Response()\n resp.status = 401\n resp.headers = self.forget(environ, {})\n for headers in (app_headers, forget_headers):\n for name, value in headers:\n resp.headers[name] = value\n resp.content_type = \"text/plain\"\n resp.body = \"Unauthorized\"\n return resp", "def authenticate(self):\n resp = Response(None, 401)\n abort(401, description='Please provide proper credentials', response=resp)", "def _login(self, environ, start_response):\n response = HTTPUnauthorized()\n response.www_authenticate = ('Basic', {'realm': self._realm})\n return response(environ, start_response)", "def add_basic_auth(blueprint: Blueprint, username, password, realm='api'):\n\n @blueprint.before_request\n def basic_http_auth(*args, **kwargs):\n auth = request.authorization\n if auth is None or auth.password != password or auth.username != username:\n return Response('Please login', 401, {'WWW-Authenticate': f'Basic realm=\"{realm}\"'})", "def authenticate():\n return abort(401)", "def unauthorized():\n return {'errors': ['Unauthorized']}, 401", "def forget(self, request):\n return [('WWW-Authenticate', 'Basic realm=\"%s\"' % self.realm)]", "def _respond_unauthorized(self, request, message=\"Unauthorized\"):\n resp = Response()\n resp.status = 401\n resp.headers = self.forget(request.environ, {})\n resp.content_type = \"text/plain\"\n resp.body = message\n request.environ[\"repoze.who.application\"] = resp\n return None", "def authenticate():\n resp = {\"status\": 401, \"message\": \"Could not verify your access level for that URL\"}\n return Response(dumps(resp), status=404, mimetype='application/json')", "def hidden_basic_auth(user=\"user\", passwd=\"passwd\"):\n\n if not check_basic_auth(user, passwd):\n return status_code(404)\n return jsonify(authenticated=True, user=user)", "def CR_authentication():\n \n # create a random 10 character string\n choices = string.letters + string.digits + string.punctuation;\n randomString = ''.join(random.choice(choices) for i in range(10))\n session['challenge'] = randomString\n \n return Response('Access failed.', 401, {'WWW-Authenticate': str.format('Basic realm=\\\"Protected iStreet event data; Challenge: {0}\\\"', randomString)})", "def authenticate():\n return Response(\n '''Login Required - email [email protected] for access or DM him @_hyp3ri0n on Twitter. Please do so from a corporate email or\n with some kind of proof that you are a security engineer, academic, or need this data for research purposes (e.g. email from a corporate or .edu email or provide\n a linkedin or twitter account showing as much)\n You have to login with proper credentials''', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required - email [email protected] for access or DM him @_hyp3ri0n on Twitter. Please do so from a corporate email or with some kind of proof that you are a security engineer, academic, or need this data for research purposes (e.g. email from a corporate or .edu email or provide a linkedin or twitter account showing as much)\"'})", "def auth_failure():\n return \"Request denied due to failed authorization\", 201, {'Content-Type': 'text/html'}", "def unauthorized(self, error):\n return jsonify({'error': \"NOT AUTHORIZED\"}), 401", "def _handle_authentication_error(self):\n response = make_response('Access Denied')\n response.headers['WWW-Authenticate'] = self.auth.get_authenticate_header()\n response.status_code = 401\n return response", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def test_unauthorized_exception(exception_app):\n request, response = exception_app.test_client.get('/401')\n assert response.status == 401\n\n request, response = exception_app.test_client.get('/401/basic')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') is not None\n assert response.headers.get('WWW-Authenticate') == \"Basic realm='Sanic'\"\n\n request, response = exception_app.test_client.get('/401/digest')\n assert response.status == 401\n\n auth_header = response.headers.get('WWW-Authenticate')\n assert auth_header is not None\n assert auth_header.startswith('Digest')\n assert \"qop='auth, auth-int'\" in auth_header\n assert \"algorithm='MD5'\" in auth_header\n assert \"nonce='abcdef'\" in auth_header\n assert \"opaque='zyxwvu'\" in auth_header\n\n request, response = exception_app.test_client.get('/401/bearer')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') == \"Bearer\"", "def display_401(error):\n return render_template('/error401.html'), 401", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def basic_auth(user=\"user\", passwd=\"passwd\"):\n\n if not check_basic_auth(user, passwd):\n return status_code(401)\n\n return jsonify(authenticated=True, user=user)", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def basic_http_auth(f):\n def wrap(request, *args, **kwargs):\n if request.META.get('HTTP_AUTHORIZATION', False):\n authtype, auth = request.META['HTTP_AUTHORIZATION'].split(' ')\n auth = base64.b64decode(auth)\n username, password = auth.split(':')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return f(request, *args, **kwargs)\n else:\n r = HttpResponse(\"Auth Required\", status = 401)\n r['WWW-Authenticate'] = 'Basic realm=\"ThatPanda DDNS\"'\n return r\n r = HttpResponse(\"Auth Required\", status = 401)\n r['WWW-Authenticate'] = 'Basic realm=\"ThatPanda DDNS\"'\n return r\n \n return wrap", "def auth_error():\n return unauthorized('Invalid credentials')", "def require_http_auth(request):\n\n if http_auth_allowed(request) and not request.user.is_authenticated:\n site = get_current_site(request)\n response = HttpResponse(status=401)\n response['WWW-Authenticate'] = (\n 'Basic realm=\"{}\", charset=\"UTF-8\"'.format(site.name)\n )\n # Check whether the client supports cookies.\n response.set_cookie('testcookie', '1', secure=(not settings.DEBUG),\n httponly=True, samesite='Lax')\n return response\n else:\n raise PermissionDenied()", "def response_401(description=None):\n resp_def = dict(util.RESPONSE_404)\n if description is not None:\n resp_def['description'] = description\n\n return response(401, resp_def)", "def http_basic_auth():\n users = ['administrator', 'admin']\n passwords = ['administrator', 'admin']\n protectedResource = 'http://localhost/secured_path'\n foundPass = False\n for user in users:\n if foundPass:\n break\n for passwd in passwords:\n encoded = base64.encodestring(user + ':' + passwd)\n response = requests.get(protectedResource, auth=(user, passwd))\n if response.status_code != 401:\n print('User Found!')\n print('User: %s, Pass: %s' % (user, passwd))\n foundPass = True\n break", "def assertHttpUnauthorized(self, resp):\r\n return self.assertEqual(resp.status_code, 401)", "def basic_auth_required(view_func):\n # http://djangosnippets.org/snippets/448/\n def _auth(request, *args, **kwargs):\n if 'HTTP_AUTHORIZATION' in request.META:\n auth = request.META['HTTP_AUTHORIZATION'].split()\n if len(auth) == 2:\n if auth[0].lower() == \"basic\":\n uname, passwd = base64.b64decode(auth[1]).split(':')\n user = authenticate(username=uname, password=passwd)\n if user is not None:\n if user.is_active:\n return view_func(request, *args, **kwargs)\n response = HttpResponse(\"Authorization Required\", status=401)\n response['WWW-Authenticate'] = 'Basic realm=\"Secure Area\"'\n return response\n return _auth", "def http_basic_auth(func):\r\n\t@wraps(func)\r\n\tdef _decorator(request, *args, **kwargs):\r\n\r\n\t\tif request.META.has_key('HTTP_AUTHORIZATION'):\r\n\t\t\ttry:\r\n\t\t\t\tauthmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)\r\n\t\t\t\tif authmeth.lower() == 'basic':\r\n\t\t\t\t\tauth = auth.strip().decode('base64')\r\n\t\t\t\t\tusername, password = auth.split(':', 1)\r\n\t\t\t\t\tuser = authenticate(username=username, password=password)\r\n\r\n\t\t\t\t\tif user:\r\n\r\n\t\t\t\t\t\tlogin(request, user)\r\n\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\r\n\t\t\t\t\t\treturn HttpResponseForbidden()\r\n\r\n\t\t\texcept ValueError:\r\n\t\t\t\t# Bad HTTP_AUTHORIZATION header\r\n\t\t\t\treturn HttpResponseForbidden()\r\n\t\t\t\t\r\n\t\treturn func(request, *args, **kwargs)\r\n\treturn _decorator", "def unauthorized(self, message=None):\n return self.send_message(message, status=401)", "def basic_auth_required(fn):\n @wraps(fn)\n def _wrapper(request, *args, **kwargs):\n authentication = request.headers.get('Authentication', None)\n\n if authentication:\n if not authentication.startswith(\"Basic \"):\n request.response.status = 401\n\n return {\n 'error': \"Authentication failed!\"\n }\n\n auth_data = authentication[6:]\n\n try:\n username, password = base64.urlsafe_b64decode(auth_data).decode(\"UTF8\").split(\":\")\n\n user = request.dbsession.query(User).filter(\n User.email == username\n ).one()\n\n if user.is_password(password.encode(\"UTF8\")):\n return fn(request, *args, **kwargs)\n except (ValueError, NoResultFound):\n pass\n\n request.response.status = 401\n\n return {\n 'error': 'Authentication failed!'\n }\n\n return _wrapper", "def test_user_get_failure_using_basic_auth(self):\n # setup\n user = self.generate_username_password()\n resp = self.create_user(user)\n resp_body = resp.json()\n try:\n assert resp.status_code == 201\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body[\"username\"] == user[\"userName\"]\n assert resp_body[\"userID\"] != \"\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n uuid_ = str(uuid.uuid4())\n\n # test\n resp2 = self.get_user_basic_auth(uuid_, user)\n resp_body2 = resp2.json()\n assert resp2.status_code == 401\n assert resp2.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body2[\"code\"] == \"1207\"\n assert resp_body2[\"message\"] == \"User not found!\"\n\n # teardown:\n resp3 = self.delete_user_basic_auth(resp_body[\"userID\"], user)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.get(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_no_auth(self):\n url = 'https://domain.com/project/objects.inv'\n expected = 'https://domain.com/project/objects.inv'\n actual = _strip_basic_auth(url)\n assert expected == actual", "def test_unauthorized_access(flask_test_client, http_method, endpoint):\n response = flask_test_client.open(\n method=http_method, path=endpoint, headers=get_headers()\n )\n assert response.status == \"401 UNAUTHORIZED\"\n assert response.content_type == \"application/json\"\n assert response.json[\"message\"] == \"Access token is invalid or expired.\"", "def test_get_unauthenticated(self):\n del self.client.request_kwargs['auth']\n self.verify_get_response(self.client.get(STATUS_PATH))", "def get_authenticate_header(self):\n return f'Basic realm=\"{self.www_authenticate_realm}\"'", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def basic_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.auth, \n \"Content-type\": \"text/plain\" }", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def challenge_view(self, request):\n headerlist = [(\"Content-Type\", \"text/plain\")]\n headerlist.extend(self._get_challenge_headers(request))\n return Response(\"Unauthorized\", status=\"401 Unauthorized\",\n headerlist=headerlist)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def test_401_unauthorized(app, client):\n\n @app.route(\"/401\")\n def unauthorized():\n abort(401)\n\n response = client.get(\"/401\")\n assert response.status_code == 401\n assert \"401 Unauthorized\" in str(response.data)", "def test_status_unauthenticated(self):\n rv = self.client.post('/statusize/', data={'message': 'foo'},\n follow_redirects=True)\n eq_(rv.status_code, 403)", "def test_call_unauthenticated(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(401)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.post(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_users_unauthorized(setup_client):\n client = setup_client\n res = client.get(ME_URL)\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def __call__(self, resp):\r\n if not self.auth_token:\r\n self.auth()\r\n resp.register_hook('response', self.handle_error)\r\n resp.headers['X-Auth-Token'] = self.auth_token\r\n return resp", "def _handle_401(self, response, **kwargs):\n if not response.status_code == 401 and not response.status_code == 403:\n return response\n\n # Free the original connection\n response.content\n response.close()\n\n # copy the request to resend\n newreq = response.request.copy()\n\n self._access_token = None\n self._logger.debug(\"_handle_401, cleared _access_token, retrying with new token\")\n\n newreq.headers[\"Authorization\"] = self._get_auth_value()\n\n _response = response.connection.send(newreq, **kwargs)\n _response.history.append(response)\n _response.request = newreq\n\n return _response" ]
[ "0.8093736", "0.80926424", "0.7958403", "0.7852648", "0.7843114", "0.7824419", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.7810746", "0.776624", "0.7754693", "0.771945", "0.7667712", "0.7664151", "0.76493174", "0.76140213", "0.7613103", "0.7591923", "0.74499536", "0.74152935", "0.7389793", "0.73102117", "0.72994584", "0.70069116", "0.6989699", "0.6985984", "0.69489896", "0.69403917", "0.69043815", "0.6843354", "0.6842244", "0.6816229", "0.67638755", "0.6701405", "0.6700694", "0.6666637", "0.66607744", "0.66428447", "0.66164446", "0.66088307", "0.6551196", "0.6548378", "0.64217687", "0.6410419", "0.6393135", "0.6381609", "0.6327866", "0.629943", "0.6287014", "0.62427884", "0.6242011", "0.6230159", "0.62130815", "0.6168915", "0.61408406", "0.6088527", "0.60420007", "0.6041198", "0.6029272", "0.6020013", "0.601565", "0.5991479", "0.59679294", "0.5945756", "0.59284157", "0.59284157", "0.5922706", "0.59027636", "0.58849794", "0.58700204", "0.58687663", "0.5866924", "0.58479726", "0.58452344", "0.5829068" ]
0.774767
34
Set pin as high
def set_pin_high(pin): HIGH_PINS.append(pin)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_high(pin):\n _write_value(HIGH, \"{0}/gpio{1}/value\".format(_path_prefix, pin))", "def gpio_output_high(self, pin: int) -> None:\n self._pins[pin - 1] = \"OUTPUT_HIGH\"", "def set_pin_low(pin):\n if pin in HIGH_PINS:\n HIGH_PINS.remove(pin)", "def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)", "def set_high(self,chip,line):\n self.output(chip,line, HIGH)", "def gpio_output_low(self, pin: int) -> None:\n self._pins[pin - 1] = \"OUTPUT_LOW\"", "def set_tms_pin_high(self):\n self._dll.JLINKARM_SetTMS()", "def high(self, high):\n\n self._high = high", "def toggle_pin(self, pin=TIOCM_DTR, time=1000):\n\n\t\tlogging.debug(\"Set pin high\")\n\t\tioctl(self.fd, TIOCMBIS, struct.pack('I', pin))\n\n\t\tsleep(float(time) / 1000.)\n\n\t\tlogging.debug(\"Set pin low\")\n\t\tioctl(self.fd, TIOCMBIC, struct.pack('I', pin))", "def set_low(pin):\n _write_value(LOW, \"{0}/gpio{1}/value\".format(_path_prefix, pin))", "def high(self) -> None:\n self._fd.write(SYSFS_GPIO_VALUE_HIGH)\n self._fd.seek(0)", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def light_set(self, pin='D13', value='0'):\n self.bridge.put(str(pin), str(value))", "def set_tdi_pin_high(self):\n self._dll.JLINKARM_SetTDI()", "def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)", "def changeHigh(self):\n self.changeLowHigh(self.ui.t_high, t_type=\"high\")", "def pin_toggle(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n port_state = gpio.HIGH\n if gpio.input(port_num) == gpio.HIGH:\n port_state = gpio.LOW\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, port_state)", "def pin_on(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, gpio.HIGH)", "def set_low(self,chip,line):\n self.output(chip,line, LOW)", "def initialize_heating_pin(pi, pin):\n pi.set_pull_up_down(pin, pigpio.PUD_DOWN)", "def set_reset_pin_high(self):\n self._dll.JLINKARM_SetRESET()\n return None", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def pin_off(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, gpio.LOW)", "def set_pin(self, pin):\n if pin not in range(0, 14):\n raise Exception(\"Incorrect pin {} selected. Pins available (0 to 13)\".format(pin))\n else:\n self.pin = pin\n self.gpio_pin = mraa.Gpio(pin)", "def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)", "def set_tck_pin_high(self):\n res = self._dll.JLINKARM_SetTCK()\n if res < 0:\n raise errors.JLinkException('Feature not supported.')\n return None", "def setHack(self, pin, value, board=0):\n msg = [int(pin), int(value)]\n return self.callModule('hackp', board, 0, 'write', msg)", "def ask_high(self, ask_high):\n\n self._ask_high = ask_high", "def set_interrupt_on_pin(self, pin, value):\n\n pin = pin - 1\n if pin < 8:\n self.__inta = self.__helper.updatebyte(self.__inta, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPINTENA, self.__inta)\n else:\n self.__intb = self.__helper.updatebyte(self.__intb, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPINTENB, self.__intb)\n return", "def set_trst_pin_high(self):\n self._dll.JLINKARM_SetTRST()", "def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)", "def write_pin(self, pin: int, value: bool):\n RPi.GPIO.output(pin, value)", "def bid_high(self, bid_high):\n\n self._bid_high = bid_high", "def low(self) -> None:\n self._fd.write(SYSFS_GPIO_VALUE_LOW)\n self._fd.seek(0)", "def write_pin(self, pin, value):\n\n pin = pin - 1\n if pin < 8:\n self.__port_a_value = self.__helper.updatebyte(\n self.__port_a_value, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPIOA, self.__port_a_value)\n else:\n self.__port_b_value = self.__helper.updatebyte(\n self.__port_b_value, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPIOB, self.__port_b_value)\n return", "def light_on(self, pin='D13'):\n self.light_set(pin, '1')", "def low(self, low):\n\n self._low = low", "def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)", "def write_pin(self, attr):\n \n self.logging.debug(\"Setting \" + attr.label + \" to \" + str(attr.value) + \" on pin \" + str(attr.io_pin))\n GPIO.output(attr.io_pin, attr.value)", "def price_high(self, price_high):\n\n self._price_high = price_high", "def set_target_temperature_high(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._target_temperature_high = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._target_temperature_high = celsius_to_kelvin(value)\r\n else:\r\n self._target_temperature_high = value\r\n\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"target_temperature_high\", value))", "def light_off(self, pin='D13'):\n self.light_set(pin, '0')", "def set_tms_pin_low(self):\n self._dll.JLINKARM_ClrTMS()", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def high(self):", "def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value", "def gpio_set_input_pullup(self, pin: int) -> None:\n self._pins[pin - 1] = \"INPUT_PULLUP\"", "def do_high(self, high_reference_pH):\n if high_reference_pH:\n ise.calibrateProbeHigh(float(high_reference_pH))\n\n print(\"\\thigh reference | reading: \" + str(ise.getCalibrateHighReference()\n ) + \" | \" + str(ise.getCalibrateHighReading()))", "def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)", "def set_high_current(self, high_current):\n self.target_high_current = high_current\n # Setting SEM to target high current must be implemented in child class!", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def high(self, high: float):\n if high is None:\n raise ValueError(\"Invalid value for `high`, must not be `None`\") # noqa: E501\n\n self._high = high", "def set_armed(self, arm):\n pin = 0 if arm else 1\n self.mcp_misc[0].output(pin, MCP23008.LOW)\n self.mcp_misc[0].output(pin, MCP23008.HIGH)\n self.mcp_misc[0].output(pin, MCP23008.LOW)", "def set_pupd(self, port, bit, x):\n hw = self.device.peripherals[port].PUPDR\n mode = {'pu':1,'pd':2}.get(x, 0)\n shift = (bit & 15) << 1\n val = hw.rd()\n val &= ~(3 << shift)\n val |= mode << shift\n hw.wr(val)", "def _pwm_pin(self, pin_obj):\n self.hw_interfaces[\"pwm\"][pin_obj.name] = PWM(pin=pin_obj)", "def setInternalPulser(self,pulserEnable,pulseHeight):\n pass", "def set_tdi_pin_low(self):\n self._dll.JLINKARM_ClrTDI()", "def __init__(self, pin, pinishigh=True):\n self.pin = pin\n self.pinishigh = pinishigh", "def _baste_off(self):\n GPIO.output(self._baster_pin, GPIO.LOW)", "def hold(self):\n self.dev.write(1, 'H')", "def gpio_set_input(self, pin: int) -> None:\n self._pins[pin - 1] = \"INPUT\"", "def set_digital_latch(self, pin, threshold_type, cb):\n with self.pymata.data_lock:\n self.digital_latch_table[pin] = [self.LATCH_ARMED, threshold_type, 0, 0, cb]", "def toggle(self):\n try:\n self.pin.toggle()\n except NotImplementedError:\n if self.ison():\n self.off()\n else:\n self.on()", "def reset_low_high(self, name):\n self.range_dict[name][\"low\"] = self.range_dict[name][\"low_default\"]\n self.range_dict[name][\"high\"] = self.range_dict[name][\"high_default\"]\n self.limit_dict[name][\"low\"] = 0.0\n self.limit_dict[name][\"high\"] = 100.0\n self.show_image()", "def set(self, state):\n if self.mode == gpio.OUT:\n # Write an event to the buffer. \n self._buffer_write(state, time.time()*1000)\n\n gpio.output(self.bcm_id, state)\n self.state = state", "def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)", "def read_pin(self, attr):\n \n self.logging.debug(\"Setting \" + attr.label + \" to \" + str(attr.value) + \" from pin \" + str(attr.io_pin))\n attr.pvalue = attr.value\n attr.value = bool(GPIO.input(attr.io_pin))\n if attr.value != attr.pvalue:\n attr.event = True", "def setBuzzerPin(Pin):\n global buzzerPin\n buzzerPin = Pin\n # Replaces old pin value with the new Pin argument.", "def set_frequency(self, pin, frequency):\n raise NotImplementedError", "def pin_pulldown(self, pin):\n port_num = self._convert_pin_port(pin)\n gpio.pullup(port_num, gpio.PULLDOWN)", "def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [CHURCH, CHURCH + 1, HALL, HALL + 1]:\n GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def __init__(self, pin =0):\n\t\tself.uv_sensor = pin\n\t\tgrovepi.pinMode(self.uv_sensor, \"INPUT\")", "def pin_pullup(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.pullup(port_num, gpio.PULLUP)", "def set_reset_pin_low(self):\n self._dll.JLINKARM_ClrRESET()\n return None", "def set_analog_latch(self, pin, threshold_type, threshold_value, cb):\n with self.pymata.data_lock:\n self.analog_latch_table[pin] = [self.LATCH_ARMED, threshold_type, threshold_value, 0, 0, cb]", "def changeLow(self):\n self.changeLowHigh(self.ui.t_low, t_type=\"low\")", "def set_pin_mode(self, pin_number, mode):\n command = (''.join(('M',mode,str(pin_number)))).encode()\n #print 'set_pin_mode =',command,(''.join(('M',mode,str(pin_number))))\n self.conn.write(command)", "async def digital_pin_write(self, pin, value):\n\n command = (PrivateConstants.SET_DIGITAL_PIN_VALUE, pin, value)\n\n await self._send_command(command)", "def set_eco_temperature_high(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._eco_temperature_high = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._eco_temperature_high = celsius_to_kelvin(value)\r\n else:\r\n self._eco_temperature_high = value\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"eco_temperature_high\",\r\n f\"{value} {self.temperature_scale}\"))", "async def set_digital_latch(self, pin, threshold_value, cb=None):\n if 0 <= threshold_value <= 1:\n key = 'D' + str(pin)\n self.latch_map[key] = [Constants.LATCH_ARMED, Constants.LATCH_EQ,\n threshold_value, 0, 0, cb]\n return True\n else:\n return False", "def pin(self) -> int:", "def SetPinConfig(self,pin, pintype, **kwargs):\n if self.Pins[pin]!=None:\n del self.Pins[pin]\n if self.PinConfig.has_key(pin):\n if self.PinConfig[pin].has_key('property'):\n delattr(self,self.PinConfig[pin]['property'])\n del self.PinConfig[pin]\n if not self.PinConfig.has_key(pin):\n self.PinConfig[pin] = kwargs \n self.PinConfig[pin][\"device\"]=pintype\n c = self.ADCChannels\n self.Pins[pin]=pintype(pin,self)\n self.DoPinConfig()\n if c!= self.ADCChannels:\n self.Bus.Write_uInt8(self.Address,0x80,self.ADCChannels)", "def changeLowHigh(self, text_widget, t_type=\"low\"):\n check = re.search(\"(0[.][0-9]+|1)\", text_widget.text())\n if check and self.ui.t_low.text() != self.ui.t_high.text():\n num = check.group()\n i_num = float(num)\n if t_type == \"low\":\n self.low = i_num\n else:\n self.high = i_num\n text_widget.setText(str(i_num))\n else:\n logging.info(\"Wrong Input For low or high\")\n if t_type == \"low\":\n text_widget.setText(\"0.0\")\n self.low = 0.0\n else:\n text_widget.setText(\"1\")\n self.high = 1", "def target_temperature_high(self):\n return self._device.setpoint_cool", "def set_led(self, on=True):\n if on:\n GPIO.output(self.LED, GPIO.HIGH)\n else:\n GPIO.output(self.LED, GPIO.LOW)", "def digital_write(self, pin, value):\n #logstring(\"going for pin {} and value {} while pincount is {}\".format(pin, value, len(self._digital_pins_directly)))\n self._digital_pins_directly[pin].DigitalWrite(value)\n #logstring(\"finished digital write\")", "def toggleEdgeMode(self, PWMpin):\n mask = 1 << PWMpin\n self._injectFault(\"PWM1PCR\", self.PCR, mask)", "def off(self):\n if self._is_on:\n self._pwms.disable(self._pin_index)\n self._is_on = False", "def digital_pin_write(self, pin, value):\n\n self._digital_pins_directly[pin].DigitalWrite(value, PermitWriteToInputPin = False)", "def read_virtual_pin_handler(pin):\n try:\n # send moisture read value to Virtual Pin\n blynk.virtual_write(5, track_moisture_level())\n\n if LIMIT_FLAG == 3:\n blynk.set_property(5, 'color', '#FF0000') # Red\n blynk.set_property(6, 'color', '#FF0000')\n blynk.virtual_write(6, \"LOW\")\n elif LIMIT_FLAG == 2:\n blynk.set_property(5, 'color', '#FFD700') # Yellow\n blynk.set_property(6, 'color', '#FFD700')\n blynk.virtual_write(6, \"NORMAL\")\n else:\n blynk.set_property(5, 'color', '#00BFFF') # Blue\n blynk.set_property(6, 'color', '#00BFFF')\n blynk.virtual_write(6, \"HIGH\")\n except Exception as e:\n logging_write(e)", "def int_handle_encoder(self,pin):\n\t\t#print \"DEBUG: self.int_handle_encoder! for pin: {0}\".format(pin)\n\t\t\t\n\t\tdevice = self.get_device_config_by_pin(pin)\n\t\t\n\t\tencoder_pinA = device['clk']\n\t\tencoder_pinB = device['dt']\n\n\t\tSwitch_A = self.gpio.input(encoder_pinA)\n\t\tSwitch_B = self.gpio.input(encoder_pinB)\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\t#sleep(0.02)\n\t\t#if not self.gpio.input(encoder_pinA) == self.pins_config[encoder_pinA]:\n\t\t#\treturn None\n\t\t#if not self.gpio.input(encoder_pinB) == self.pins_config[encoder_pinB]:\n\t\t#\treturn None\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# now check if state of A or B has changed\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if not that means that bouncing caused it\t\n\t\tCurrent_A = self.pins_state[encoder_pinA]\n\t\tCurrent_B = self.pins_state[encoder_pinB]\n\t\tif Current_A == Switch_A and Current_B == Switch_B:\t\t# Same interrupt as before (Bouncing)?\n\t\t\treturn\t\t\t\t\t\t\t\t\t\t\t\t# ignore interrupt!\n\n\t\tself.pins_state[encoder_pinA] = Switch_A\t\t\t\t# remember new state\n\t\tself.pins_state[encoder_pinB] = Switch_B\t\t\t\t# for next bouncing check\n\t\t\n\t\t# -------------------------------\n\t\tfunction = self.get_encoder_function_by_pin(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\tif function is not None:\n\t\t\tif (Switch_A and Switch_B):\t\t\t\t\t\t# Both one active? Yes -> end of sequence\n\t\t\t\tthis_chg = datetime.now()\n\t\t\t\tdelta = this_chg - self.encoder_last_chg\n\t\t\t\t#print \"diff: {0}\".format(delta.total_seconds())\n\t\t\t\t#print type(delta.total_seconds())\t#float\n\t\t\t\tif delta.total_seconds() < 0.1:\n\t\t\t\t\tself.encoder_fast_count += 1\n\t\t\t\t\t#if self.encoder_fast_count > 3:\n\t\t\t\t\t#\tprint \"FAST {0}\".format(self.encoder_fast_count)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tprint \"Maybe.....\"\n\t\t\t\telse:\n\t\t\t\t\tself.encoder_fast_count = 0\n\t\t\t\n\t\t\t\t\"\"\" why do we do this?\n\t\t\t\tif self.modes.active_modes():\n\t\t\t\t\t#self.reset_mode_timer(self.modes_old[0]['reset'])\n\t\t\t\t\tif 'reset' in self.mode_sets[function['mode_cycle']]:\n\t\t\t\t\t\tself.reset_mode_timer(self.mode_sets[function['mode_cycle']]['reset'])\n\t\t\t\t\"\"\"\n\n\t\t\t\tf_args = None\n\t\t\t\tif pin == encoder_pinB:\t\t\t\t\t\t\t# Turning direction depends on \n\t\t\t\t\t#COUNTER CLOCKWISE (CCW) or DECREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_ccw' in function:\t\t\t\t\n\t\t\t\t\t\tkey = 'function_fast_ccw'\n\t\t\t\t\t\tkey_args = 'function_fast_ccw_args'\n\n\t\t\t\t\telif 'function_ccw' in function:\n\t\t\t\t\t\tkey = 'function_ccw'\n\t\t\t\t\t\tkey_args = 'function_ccw_args'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#CLOCKWISE (CW) or INCREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_cw' in function:\n\t\t\t\t\t\tkey = 'function_fast_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\t\t\t\t\t\n\t\t\t\t\telif 'function_cw' in function:\n\t\t\t\t\t\tkey = 'function_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\n\t\t\t\t# prepare arguments\n\t\t\t\tif key_args in function:\n\t\t\t\t\tif isinstance(function[key_args],str):\n\t\t\t\t\t\t#f_args = [function[key_args]]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#f_args = *function[key_args]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *function[key_args])\n\t\t\t\telse:\n\t\t\t\t\tself.__exec_function_by_code(function[key])\n\t\t\t\t\t\n\t\t\t\t# execute\n\t\t\t\t#self.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\t\t\n\t\t\t\tself.encoder_last_chg = this_chg\n\t\telse:\n\t\t\tself.__printer(\"Encoder, no function\",level=LL_DEBUG)\n\n\n\t\t\tpigpio.pi()", "def setoffset(self,value):\n self.instrument.write('VOLT:OFFS {0}'.format(value))", "def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on", "def set_lowht(self, lowht):\n self._lowht = lowht", "def __init__(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin,GPIO.OUT)", "def setOutput(self):\n self.stopListening()\n\n gpio.setup(self.bcm_id, gpio.OUT)\n self.mode = gpio.OUT", "def write_reg2(self, value: int) -> None:\n self.timer_low = value\n\n self.output.setFreq(1789773 / (((value | self.timer_high) + 1) << 5))", "def bid_low(self, bid_low):\n\n self._bid_low = bid_low", "def setPIN(self, DPIN, value):\n try:\n with open('/sys/class/gpio/' + DPIN + '/value', 'a') as f:\n f.write(value)\n\n except Exception as err:\n LOG.error(\"Error setting PIN value: \" + str(err))" ]
[ "0.8202024", "0.7684762", "0.76168716", "0.756904", "0.74249405", "0.720764", "0.7134327", "0.711949", "0.71125674", "0.7103002", "0.70253396", "0.69540894", "0.68812466", "0.6861089", "0.6766074", "0.67559886", "0.6692666", "0.66920656", "0.66851836", "0.6638315", "0.6575943", "0.6559691", "0.6537048", "0.6448162", "0.6396823", "0.63529974", "0.6337604", "0.6327708", "0.62815267", "0.623364", "0.6216751", "0.6208945", "0.6196828", "0.6188757", "0.6081948", "0.60740787", "0.60380346", "0.6021822", "0.6002586", "0.5997411", "0.5995002", "0.5973633", "0.5958842", "0.5951819", "0.59447956", "0.5929268", "0.5897759", "0.5825407", "0.58064234", "0.58030635", "0.57824945", "0.5770595", "0.57361877", "0.57354575", "0.5717931", "0.57075113", "0.5701261", "0.5697417", "0.5695858", "0.5681105", "0.5675674", "0.5626174", "0.56236124", "0.56224644", "0.56220365", "0.56160194", "0.56088245", "0.5589956", "0.55880725", "0.558679", "0.55844295", "0.5584237", "0.55737996", "0.5552937", "0.5538685", "0.5527168", "0.5525475", "0.5508741", "0.5507395", "0.5506246", "0.55044186", "0.55031085", "0.5500935", "0.54874766", "0.5483619", "0.5468932", "0.54576623", "0.5456029", "0.5454649", "0.54534614", "0.54441273", "0.5443488", "0.54318655", "0.543153", "0.54244155", "0.5423068", "0.54207665", "0.54206955", "0.5418761", "0.5414488" ]
0.85144776
0
Set pin as high
def set_pin_low(pin): if pin in HIGH_PINS: HIGH_PINS.remove(pin)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_pin_high(pin):\n HIGH_PINS.append(pin)", "def set_high(pin):\n _write_value(HIGH, \"{0}/gpio{1}/value\".format(_path_prefix, pin))", "def gpio_output_high(self, pin: int) -> None:\n self._pins[pin - 1] = \"OUTPUT_HIGH\"", "def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)", "def set_high(self,chip,line):\n self.output(chip,line, HIGH)", "def gpio_output_low(self, pin: int) -> None:\n self._pins[pin - 1] = \"OUTPUT_LOW\"", "def set_tms_pin_high(self):\n self._dll.JLINKARM_SetTMS()", "def high(self, high):\n\n self._high = high", "def toggle_pin(self, pin=TIOCM_DTR, time=1000):\n\n\t\tlogging.debug(\"Set pin high\")\n\t\tioctl(self.fd, TIOCMBIS, struct.pack('I', pin))\n\n\t\tsleep(float(time) / 1000.)\n\n\t\tlogging.debug(\"Set pin low\")\n\t\tioctl(self.fd, TIOCMBIC, struct.pack('I', pin))", "def set_low(pin):\n _write_value(LOW, \"{0}/gpio{1}/value\".format(_path_prefix, pin))", "def high(self) -> None:\n self._fd.write(SYSFS_GPIO_VALUE_HIGH)\n self._fd.seek(0)", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def light_set(self, pin='D13', value='0'):\n self.bridge.put(str(pin), str(value))", "def set_tdi_pin_high(self):\n self._dll.JLINKARM_SetTDI()", "def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)", "def changeHigh(self):\n self.changeLowHigh(self.ui.t_high, t_type=\"high\")", "def pin_toggle(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n port_state = gpio.HIGH\n if gpio.input(port_num) == gpio.HIGH:\n port_state = gpio.LOW\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, port_state)", "def pin_on(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, gpio.HIGH)", "def set_low(self,chip,line):\n self.output(chip,line, LOW)", "def initialize_heating_pin(pi, pin):\n pi.set_pull_up_down(pin, pigpio.PUD_DOWN)", "def set_reset_pin_high(self):\n self._dll.JLINKARM_SetRESET()\n return None", "def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return", "def pin_off(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, gpio.LOW)", "def set_pin(self, pin):\n if pin not in range(0, 14):\n raise Exception(\"Incorrect pin {} selected. Pins available (0 to 13)\".format(pin))\n else:\n self.pin = pin\n self.gpio_pin = mraa.Gpio(pin)", "def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)", "def set_tck_pin_high(self):\n res = self._dll.JLINKARM_SetTCK()\n if res < 0:\n raise errors.JLinkException('Feature not supported.')\n return None", "def setHack(self, pin, value, board=0):\n msg = [int(pin), int(value)]\n return self.callModule('hackp', board, 0, 'write', msg)", "def ask_high(self, ask_high):\n\n self._ask_high = ask_high", "def set_interrupt_on_pin(self, pin, value):\n\n pin = pin - 1\n if pin < 8:\n self.__inta = self.__helper.updatebyte(self.__inta, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPINTENA, self.__inta)\n else:\n self.__intb = self.__helper.updatebyte(self.__intb, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPINTENB, self.__intb)\n return", "def set_trst_pin_high(self):\n self._dll.JLINKARM_SetTRST()", "def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)", "def write_pin(self, pin: int, value: bool):\n RPi.GPIO.output(pin, value)", "def bid_high(self, bid_high):\n\n self._bid_high = bid_high", "def low(self) -> None:\n self._fd.write(SYSFS_GPIO_VALUE_LOW)\n self._fd.seek(0)", "def write_pin(self, pin, value):\n\n pin = pin - 1\n if pin < 8:\n self.__port_a_value = self.__helper.updatebyte(\n self.__port_a_value, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPIOA, self.__port_a_value)\n else:\n self.__port_b_value = self.__helper.updatebyte(\n self.__port_b_value, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPIOB, self.__port_b_value)\n return", "def light_on(self, pin='D13'):\n self.light_set(pin, '1')", "def low(self, low):\n\n self._low = low", "def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)", "def write_pin(self, attr):\n \n self.logging.debug(\"Setting \" + attr.label + \" to \" + str(attr.value) + \" on pin \" + str(attr.io_pin))\n GPIO.output(attr.io_pin, attr.value)", "def price_high(self, price_high):\n\n self._price_high = price_high", "def set_target_temperature_high(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._target_temperature_high = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._target_temperature_high = celsius_to_kelvin(value)\r\n else:\r\n self._target_temperature_high = value\r\n\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"target_temperature_high\", value))", "def light_off(self, pin='D13'):\n self.light_set(pin, '0')", "def set_tms_pin_low(self):\n self._dll.JLINKARM_ClrTMS()", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "def high(self):", "def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value", "def gpio_set_input_pullup(self, pin: int) -> None:\n self._pins[pin - 1] = \"INPUT_PULLUP\"", "def do_high(self, high_reference_pH):\n if high_reference_pH:\n ise.calibrateProbeHigh(float(high_reference_pH))\n\n print(\"\\thigh reference | reading: \" + str(ise.getCalibrateHighReference()\n ) + \" | \" + str(ise.getCalibrateHighReading()))", "def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)", "def set_high_current(self, high_current):\n self.target_high_current = high_current\n # Setting SEM to target high current must be implemented in child class!", "def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)", "def high(self, high: float):\n if high is None:\n raise ValueError(\"Invalid value for `high`, must not be `None`\") # noqa: E501\n\n self._high = high", "def set_armed(self, arm):\n pin = 0 if arm else 1\n self.mcp_misc[0].output(pin, MCP23008.LOW)\n self.mcp_misc[0].output(pin, MCP23008.HIGH)\n self.mcp_misc[0].output(pin, MCP23008.LOW)", "def set_pupd(self, port, bit, x):\n hw = self.device.peripherals[port].PUPDR\n mode = {'pu':1,'pd':2}.get(x, 0)\n shift = (bit & 15) << 1\n val = hw.rd()\n val &= ~(3 << shift)\n val |= mode << shift\n hw.wr(val)", "def _pwm_pin(self, pin_obj):\n self.hw_interfaces[\"pwm\"][pin_obj.name] = PWM(pin=pin_obj)", "def setInternalPulser(self,pulserEnable,pulseHeight):\n pass", "def set_tdi_pin_low(self):\n self._dll.JLINKARM_ClrTDI()", "def __init__(self, pin, pinishigh=True):\n self.pin = pin\n self.pinishigh = pinishigh", "def _baste_off(self):\n GPIO.output(self._baster_pin, GPIO.LOW)", "def hold(self):\n self.dev.write(1, 'H')", "def gpio_set_input(self, pin: int) -> None:\n self._pins[pin - 1] = \"INPUT\"", "def set_digital_latch(self, pin, threshold_type, cb):\n with self.pymata.data_lock:\n self.digital_latch_table[pin] = [self.LATCH_ARMED, threshold_type, 0, 0, cb]", "def toggle(self):\n try:\n self.pin.toggle()\n except NotImplementedError:\n if self.ison():\n self.off()\n else:\n self.on()", "def reset_low_high(self, name):\n self.range_dict[name][\"low\"] = self.range_dict[name][\"low_default\"]\n self.range_dict[name][\"high\"] = self.range_dict[name][\"high_default\"]\n self.limit_dict[name][\"low\"] = 0.0\n self.limit_dict[name][\"high\"] = 100.0\n self.show_image()", "def set(self, state):\n if self.mode == gpio.OUT:\n # Write an event to the buffer. \n self._buffer_write(state, time.time()*1000)\n\n gpio.output(self.bcm_id, state)\n self.state = state", "def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)", "def read_pin(self, attr):\n \n self.logging.debug(\"Setting \" + attr.label + \" to \" + str(attr.value) + \" from pin \" + str(attr.io_pin))\n attr.pvalue = attr.value\n attr.value = bool(GPIO.input(attr.io_pin))\n if attr.value != attr.pvalue:\n attr.event = True", "def setBuzzerPin(Pin):\n global buzzerPin\n buzzerPin = Pin\n # Replaces old pin value with the new Pin argument.", "def set_frequency(self, pin, frequency):\n raise NotImplementedError", "def pin_pulldown(self, pin):\n port_num = self._convert_pin_port(pin)\n gpio.pullup(port_num, gpio.PULLDOWN)", "def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [CHURCH, CHURCH + 1, HALL, HALL + 1]:\n GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def __init__(self, pin =0):\n\t\tself.uv_sensor = pin\n\t\tgrovepi.pinMode(self.uv_sensor, \"INPUT\")", "def pin_pullup(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.pullup(port_num, gpio.PULLUP)", "def set_reset_pin_low(self):\n self._dll.JLINKARM_ClrRESET()\n return None", "def set_analog_latch(self, pin, threshold_type, threshold_value, cb):\n with self.pymata.data_lock:\n self.analog_latch_table[pin] = [self.LATCH_ARMED, threshold_type, threshold_value, 0, 0, cb]", "def changeLow(self):\n self.changeLowHigh(self.ui.t_low, t_type=\"low\")", "def set_pin_mode(self, pin_number, mode):\n command = (''.join(('M',mode,str(pin_number)))).encode()\n #print 'set_pin_mode =',command,(''.join(('M',mode,str(pin_number))))\n self.conn.write(command)", "async def digital_pin_write(self, pin, value):\n\n command = (PrivateConstants.SET_DIGITAL_PIN_VALUE, pin, value)\n\n await self._send_command(command)", "def set_eco_temperature_high(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._eco_temperature_high = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._eco_temperature_high = celsius_to_kelvin(value)\r\n else:\r\n self._eco_temperature_high = value\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"eco_temperature_high\",\r\n f\"{value} {self.temperature_scale}\"))", "async def set_digital_latch(self, pin, threshold_value, cb=None):\n if 0 <= threshold_value <= 1:\n key = 'D' + str(pin)\n self.latch_map[key] = [Constants.LATCH_ARMED, Constants.LATCH_EQ,\n threshold_value, 0, 0, cb]\n return True\n else:\n return False", "def pin(self) -> int:", "def SetPinConfig(self,pin, pintype, **kwargs):\n if self.Pins[pin]!=None:\n del self.Pins[pin]\n if self.PinConfig.has_key(pin):\n if self.PinConfig[pin].has_key('property'):\n delattr(self,self.PinConfig[pin]['property'])\n del self.PinConfig[pin]\n if not self.PinConfig.has_key(pin):\n self.PinConfig[pin] = kwargs \n self.PinConfig[pin][\"device\"]=pintype\n c = self.ADCChannels\n self.Pins[pin]=pintype(pin,self)\n self.DoPinConfig()\n if c!= self.ADCChannels:\n self.Bus.Write_uInt8(self.Address,0x80,self.ADCChannels)", "def changeLowHigh(self, text_widget, t_type=\"low\"):\n check = re.search(\"(0[.][0-9]+|1)\", text_widget.text())\n if check and self.ui.t_low.text() != self.ui.t_high.text():\n num = check.group()\n i_num = float(num)\n if t_type == \"low\":\n self.low = i_num\n else:\n self.high = i_num\n text_widget.setText(str(i_num))\n else:\n logging.info(\"Wrong Input For low or high\")\n if t_type == \"low\":\n text_widget.setText(\"0.0\")\n self.low = 0.0\n else:\n text_widget.setText(\"1\")\n self.high = 1", "def target_temperature_high(self):\n return self._device.setpoint_cool", "def set_led(self, on=True):\n if on:\n GPIO.output(self.LED, GPIO.HIGH)\n else:\n GPIO.output(self.LED, GPIO.LOW)", "def digital_write(self, pin, value):\n #logstring(\"going for pin {} and value {} while pincount is {}\".format(pin, value, len(self._digital_pins_directly)))\n self._digital_pins_directly[pin].DigitalWrite(value)\n #logstring(\"finished digital write\")", "def toggleEdgeMode(self, PWMpin):\n mask = 1 << PWMpin\n self._injectFault(\"PWM1PCR\", self.PCR, mask)", "def off(self):\n if self._is_on:\n self._pwms.disable(self._pin_index)\n self._is_on = False", "def digital_pin_write(self, pin, value):\n\n self._digital_pins_directly[pin].DigitalWrite(value, PermitWriteToInputPin = False)", "def read_virtual_pin_handler(pin):\n try:\n # send moisture read value to Virtual Pin\n blynk.virtual_write(5, track_moisture_level())\n\n if LIMIT_FLAG == 3:\n blynk.set_property(5, 'color', '#FF0000') # Red\n blynk.set_property(6, 'color', '#FF0000')\n blynk.virtual_write(6, \"LOW\")\n elif LIMIT_FLAG == 2:\n blynk.set_property(5, 'color', '#FFD700') # Yellow\n blynk.set_property(6, 'color', '#FFD700')\n blynk.virtual_write(6, \"NORMAL\")\n else:\n blynk.set_property(5, 'color', '#00BFFF') # Blue\n blynk.set_property(6, 'color', '#00BFFF')\n blynk.virtual_write(6, \"HIGH\")\n except Exception as e:\n logging_write(e)", "def int_handle_encoder(self,pin):\n\t\t#print \"DEBUG: self.int_handle_encoder! for pin: {0}\".format(pin)\n\t\t\t\n\t\tdevice = self.get_device_config_by_pin(pin)\n\t\t\n\t\tencoder_pinA = device['clk']\n\t\tencoder_pinB = device['dt']\n\n\t\tSwitch_A = self.gpio.input(encoder_pinA)\n\t\tSwitch_B = self.gpio.input(encoder_pinB)\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\t#sleep(0.02)\n\t\t#if not self.gpio.input(encoder_pinA) == self.pins_config[encoder_pinA]:\n\t\t#\treturn None\n\t\t#if not self.gpio.input(encoder_pinB) == self.pins_config[encoder_pinB]:\n\t\t#\treturn None\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# now check if state of A or B has changed\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if not that means that bouncing caused it\t\n\t\tCurrent_A = self.pins_state[encoder_pinA]\n\t\tCurrent_B = self.pins_state[encoder_pinB]\n\t\tif Current_A == Switch_A and Current_B == Switch_B:\t\t# Same interrupt as before (Bouncing)?\n\t\t\treturn\t\t\t\t\t\t\t\t\t\t\t\t# ignore interrupt!\n\n\t\tself.pins_state[encoder_pinA] = Switch_A\t\t\t\t# remember new state\n\t\tself.pins_state[encoder_pinB] = Switch_B\t\t\t\t# for next bouncing check\n\t\t\n\t\t# -------------------------------\n\t\tfunction = self.get_encoder_function_by_pin(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\tif function is not None:\n\t\t\tif (Switch_A and Switch_B):\t\t\t\t\t\t# Both one active? Yes -> end of sequence\n\t\t\t\tthis_chg = datetime.now()\n\t\t\t\tdelta = this_chg - self.encoder_last_chg\n\t\t\t\t#print \"diff: {0}\".format(delta.total_seconds())\n\t\t\t\t#print type(delta.total_seconds())\t#float\n\t\t\t\tif delta.total_seconds() < 0.1:\n\t\t\t\t\tself.encoder_fast_count += 1\n\t\t\t\t\t#if self.encoder_fast_count > 3:\n\t\t\t\t\t#\tprint \"FAST {0}\".format(self.encoder_fast_count)\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tprint \"Maybe.....\"\n\t\t\t\telse:\n\t\t\t\t\tself.encoder_fast_count = 0\n\t\t\t\n\t\t\t\t\"\"\" why do we do this?\n\t\t\t\tif self.modes.active_modes():\n\t\t\t\t\t#self.reset_mode_timer(self.modes_old[0]['reset'])\n\t\t\t\t\tif 'reset' in self.mode_sets[function['mode_cycle']]:\n\t\t\t\t\t\tself.reset_mode_timer(self.mode_sets[function['mode_cycle']]['reset'])\n\t\t\t\t\"\"\"\n\n\t\t\t\tf_args = None\n\t\t\t\tif pin == encoder_pinB:\t\t\t\t\t\t\t# Turning direction depends on \n\t\t\t\t\t#COUNTER CLOCKWISE (CCW) or DECREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_ccw' in function:\t\t\t\t\n\t\t\t\t\t\tkey = 'function_fast_ccw'\n\t\t\t\t\t\tkey_args = 'function_fast_ccw_args'\n\n\t\t\t\t\telif 'function_ccw' in function:\n\t\t\t\t\t\tkey = 'function_ccw'\n\t\t\t\t\t\tkey_args = 'function_ccw_args'\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#CLOCKWISE (CW) or INCREASE\n\t\t\t\t\tif self.encoder_fast_count > 3 and 'function_fast_cw' in function:\n\t\t\t\t\t\tkey = 'function_fast_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\t\t\t\t\t\n\t\t\t\t\telif 'function_cw' in function:\n\t\t\t\t\t\tkey = 'function_cw'\n\t\t\t\t\t\tkey_args = 'function_cw_args'\n\n\t\t\t\t# prepare arguments\n\t\t\t\tif key_args in function:\n\t\t\t\t\tif isinstance(function[key_args],str):\n\t\t\t\t\t\t#f_args = [function[key_args]]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#f_args = *function[key_args]\n\t\t\t\t\t\tself.__exec_function_by_code(function[key], *function[key_args])\n\t\t\t\telse:\n\t\t\t\t\tself.__exec_function_by_code(function[key])\n\t\t\t\t\t\n\t\t\t\t# execute\n\t\t\t\t#self.__exec_function_by_code(function[key], *[function[key_args]])\n\t\t\t\t\t\t\n\t\t\t\tself.encoder_last_chg = this_chg\n\t\telse:\n\t\t\tself.__printer(\"Encoder, no function\",level=LL_DEBUG)\n\n\n\t\t\tpigpio.pi()", "def setoffset(self,value):\n self.instrument.write('VOLT:OFFS {0}'.format(value))", "def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on", "def set_lowht(self, lowht):\n self._lowht = lowht", "def __init__(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin,GPIO.OUT)", "def setOutput(self):\n self.stopListening()\n\n gpio.setup(self.bcm_id, gpio.OUT)\n self.mode = gpio.OUT", "def write_reg2(self, value: int) -> None:\n self.timer_low = value\n\n self.output.setFreq(1789773 / (((value | self.timer_high) + 1) << 5))", "def bid_low(self, bid_low):\n\n self._bid_low = bid_low", "def setPIN(self, DPIN, value):\n try:\n with open('/sys/class/gpio/' + DPIN + '/value', 'a') as f:\n f.write(value)\n\n except Exception as err:\n LOG.error(\"Error setting PIN value: \" + str(err))" ]
[ "0.85144776", "0.8202024", "0.7684762", "0.756904", "0.74249405", "0.720764", "0.7134327", "0.711949", "0.71125674", "0.7103002", "0.70253396", "0.69540894", "0.68812466", "0.6861089", "0.6766074", "0.67559886", "0.6692666", "0.66920656", "0.66851836", "0.6638315", "0.6575943", "0.6559691", "0.6537048", "0.6448162", "0.6396823", "0.63529974", "0.6337604", "0.6327708", "0.62815267", "0.623364", "0.6216751", "0.6208945", "0.6196828", "0.6188757", "0.6081948", "0.60740787", "0.60380346", "0.6021822", "0.6002586", "0.5997411", "0.5995002", "0.5973633", "0.5958842", "0.5951819", "0.59447956", "0.5929268", "0.5897759", "0.5825407", "0.58064234", "0.58030635", "0.57824945", "0.5770595", "0.57361877", "0.57354575", "0.5717931", "0.57075113", "0.5701261", "0.5697417", "0.5695858", "0.5681105", "0.5675674", "0.5626174", "0.56236124", "0.56224644", "0.56220365", "0.56160194", "0.56088245", "0.5589956", "0.55880725", "0.558679", "0.55844295", "0.5584237", "0.55737996", "0.5552937", "0.5538685", "0.5527168", "0.5525475", "0.5508741", "0.5507395", "0.5506246", "0.55044186", "0.55031085", "0.5500935", "0.54874766", "0.5483619", "0.5468932", "0.54576623", "0.5456029", "0.5454649", "0.54534614", "0.54441273", "0.5443488", "0.54318655", "0.543153", "0.54244155", "0.5423068", "0.54207665", "0.54206955", "0.5418761", "0.5414488" ]
0.76168716
3
init a service group
def __init__(self, name): self.name = name self.elements = list()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initService(self):", "def _initGroups(self):\n defaults = self._getGroupDefaults()\n ddict = self._getDefaultGroupDict(defaults)\n\n for group in self._config.sections():\n ddict[\"_name\"] = group\n container = self.getGroupContainer(**ddict)\n self._passConfig(container, group)\n self.groups.append(container)\n\n if not self.groups:\n self.groups.append(self.getGroupContainer(**defaults._dict_))", "def init():\n return _libsbml.GroupsExtension_init()", "def _init_service(self):\n self.robot_variables.check_variables()\n # setting launch id for report portal service\n self.robot_service.init_service(endpoint=self.robot_variables.endpoint,\n project=self.robot_variables.project,\n uuid=self.robot_variables.uuid)", "def __init__(self, context=None):\n self._context = context or google.datalab.Context.default()\n self._client = _utils.make_client(self._context)\n self._group_dict = None", "def __init__(self, service_name):\n self.service_name = service_name", "def __init__(self, groups=dict()):\n self.groups = groups", "def __init__(__self__,\n resource_name: str,\n args: GroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)", "def GroupsExtension_init():\n return _libsbml.GroupsExtension_init()", "def _initialize(self):\n try:\n self._azure_client.get_resource_group(self.resource_group)\n\n except CloudError as cloud_error:\n if cloud_error.error.error == \"ResourceGroupNotFound\":\n resource_group_params = {'location': self.region_name}\n try:\n self._azure_client.\\\n create_resource_group(self.resource_group,\n resource_group_params)\n except CloudError as cloud_error2: # pragma: no cover\n if cloud_error2.error.error == \"AuthorizationFailed\":\n mess = 'The following error was returned by Azure:\\n' \\\n '%s\\n\\nThis is likely because the Role' \\\n 'associated with the given credentials does ' \\\n 'not allow for Resource Group creation.\\nA ' \\\n 'Resource Group is necessary to manage ' \\\n 'resources in Azure. You must either ' \\\n 'provide an existing Resource Group as part ' \\\n 'of the configuration, or elevate the ' \\\n 'associated role.\\nFor more information on ' \\\n 'roles, see: https://docs.microsoft.com/' \\\n 'en-us/azure/role-based-access-control/' \\\n 'overview\\n' % cloud_error2\n raise ProviderConnectionException(mess)\n else:\n raise cloud_error2\n\n else:\n raise cloud_error", "def __init__(__self__,\n resource_name: str,\n args: ManagedNetworkGroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, conf, group):\n self._conf = conf\n self._group = group", "def __init__(__self__,\n resource_name: str,\n args: BasicEndpointGroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def set_service_group(self, service_group):\n self.single_selection_from_static_kendo_dropdown(self.service_group_kendo_dropdown_locator, service_group)", "def __init__(self, name, group_resources, group_yamls):\n self.log = logging.getLogger(__name__)\n self.name = name\n self.clouds = {}\n self.group_resources = group_resources\n self.group_yamls = group_yamls", "def __init__(self) :\n\n class_loglevel = getattr(afs.CONFIG,\"LogLevel_%s\" \\\n % self.__class__.__name__, \"\").upper()\n numeric_loglevel = getattr(logging, class_loglevel, 0)\n self.logger = logging.getLogger(\"afs.lla.%s\" % self.__class__.__name__)\n self.logger.setLevel(numeric_loglevel)\n self.logger.debug(\"initializing %s-Object\" % (self.__class__.__name__))\n self.spool_dir = \"%s/%s\" % (afs.CONFIG.SpoolDirBase, self.__class__.__name__)\n if not os.path.exists(afs.CONFIG.SpoolDirBase) :\n try :\n os.makedirs(afs.CONFIG.SpoolDirBase) \n except :\n raise RuntimeError(\"Cannot create SpoolDirBase %s. Please check your configuration.\" % afs.CONFIG.SpoolDirBase)\n \n # create spool dir\n count = 0\n while 1 :\n if os.path.exists(self.spool_dir) :\n count += 1 \n self.spool_dir = \"%s/%s-%d\" % (afs.CONFIG.SpoolDirBase, self.__class__.__name__, count)\n else :\n os.mkdir(self.spool_dir)\n break\n\n # Async INIT\n # dict of active jobs in this service\n # is of form [sp_idant] = {\"parse_fct\" : , \"parse_parameterlist\": , \"cmd_list\": , \"timeout\" :, \"start_time\" : }\n self.active_subprocesses = {}\n # dict of for [sp_ident] = result_obj \n self.finished_subprocesses = {}\n return", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def pre_security_group_create(self, resource_dict):\n pass", "def __init__(self, name, group=None):\n self.name = name\n self.group = group", "def __init__(self, group_id=None):\n super().__init__()\n self.group_id = group_id", "def __init__(\n self,\n group,\n controls):\n self.group = group\n self.controls = controls", "def __on_group_created(self, logger, *args):", "def __init__(self, auth_args, name, desc,\n net_conf, srv_grp_lst, sep_access_port=False,\n fip_port=None):\n\n self.name = name\n self.desc = desc\n self.net_conf = net_conf\n self.srv_grp_lst = deque(srv_grp_lst)\n self.sep_access_port = sep_access_port\n self.fip_port = fip_port\n\n self.conn = connection.Connection(**auth_args)\n # MARK: Since there is no examples for usage of the orchestration\n # resource in openstack-pythonsdk, the heatclient lib is used here.\n # It SHOULD be replaced with pythonsdk later\n loader = loading.get_plugin_loader('password')\n auth = loader.load_from_options(**auth_args)\n sess = session.Session(auth=auth)\n self.heat_client = heatclient.Client('1', session=sess)\n\n self._get_network_id()", "def __init__(self, groups: Iterable[List[str]]) -> None:\n self.module_map = {} # type: Dict[str, str]\n for names in groups:\n self.module_map.update(make_module_translation_map(names))\n self.translations = {} # type: Dict[Tuple[str, str], str]\n self.used_names = set() # type: Set[str]", "async def groups_service_handler(service: ServiceCall) -> None:\n object_id = service.data[ATTR_OBJECT_ID]\n entity_id = f\"{DOMAIN}.{object_id}\"\n group = component.get_entity(entity_id)\n\n # new group\n if service.service == SERVICE_SET and group is None:\n entity_ids = (\n service.data.get(ATTR_ENTITIES)\n or service.data.get(ATTR_ADD_ENTITIES)\n or None\n )\n\n extra_arg = {\n attr: service.data[attr]\n for attr in (ATTR_ICON,)\n if service.data.get(attr) is not None\n }\n\n await Group.async_create_group(\n hass,\n service.data.get(ATTR_NAME, object_id),\n object_id=object_id,\n entity_ids=entity_ids,\n user_defined=False,\n mode=service.data.get(ATTR_ALL),\n **extra_arg,\n )\n return\n\n if group is None:\n _LOGGER.warning(\"%s:Group '%s' doesn't exist!\", service.service, object_id)\n return\n\n # update group\n if service.service == SERVICE_SET:\n need_update = False\n\n if ATTR_ADD_ENTITIES in service.data:\n delta = service.data[ATTR_ADD_ENTITIES]\n entity_ids = set(group.tracking) | set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_REMOVE_ENTITIES in service.data:\n delta = service.data[ATTR_REMOVE_ENTITIES]\n entity_ids = set(group.tracking) - set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_ENTITIES in service.data:\n entity_ids = service.data[ATTR_ENTITIES]\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_NAME in service.data:\n group.name = service.data[ATTR_NAME]\n need_update = True\n\n if ATTR_ICON in service.data:\n group.icon = service.data[ATTR_ICON]\n need_update = True\n\n if ATTR_ALL in service.data:\n group.mode = all if service.data[ATTR_ALL] else any\n need_update = True\n\n if need_update:\n group.async_write_ha_state()\n\n return\n\n # remove group\n if service.service == SERVICE_REMOVE:\n await component.async_remove_entity(entity_id)", "def __init__(self, **kwargs):\n super(Service, self).__init__(**kwargs)\n\n self.whitelist.append(urlparse.urlparse(self.url).hostname)\n self.whitelist.extend(get_nameservers())\n self.whitelist.append('172.17.42.1')\n self.whitelist = list(set(self.whitelist))\n self.validate_whitelist()\n\n self.main_module_path = self.find_main_module()\n self.language = self.detect_language(self.main_module_path)\n self.state = None\n self.workers = []", "def create_servicech(self, conf, params):\n\t\tpass", "def init():\n\n @click.group(cls=cli.make_commands(__name__))\n def run():\n \"\"\"Cross-cell supervision tools.\"\"\"\n cli.init_logger('daemon.conf')\n\n return run", "def __init__(self):\r\n self._empty = EmptyNetworkGroup()\r\n self._groups = {}\r\n self._uid = set()\r\n self._machines = set()\r\n self._iaas = None", "def __init__(self, consumer_group):\n self.consumer_group = consumer_group", "def __init__(__self__,\n resource_name: str,\n args: FailoverGroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def __init__(__self__, *,\n group_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n message: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n state: Optional[pulumi.Input[str]] = None,\n user_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if group_id is not None:\n pulumi.set(__self__, \"group_id\", group_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if service_ids is not None:\n pulumi.set(__self__, \"service_ids\", service_ids)\n if state is not None:\n pulumi.set(__self__, \"state\", state)\n if user_emails is not None:\n pulumi.set(__self__, \"user_emails\", user_emails)", "def initialize_run(self):\n dialogs = self.client.get_dialogs(limit=5000)\n self.groups = list()\n for dialog in dialogs:\n try:\n self.groups.append(TelethonB.Channel(dialog, self.msg_average, self.msg_avg_deviation, self.client)) #Creates list of channel objects\n self.dialog_names.add(dialog.name)\n except TypeError as e:\n print(e)\n continue\n except RuntimeError as e:\n print(e)\n continue\n print(\"[+] All groups successfully initialized!\")", "def start( self ):\n\n self.service()", "def __init__(self):\n self.groups_url = \"%s/groups\" % CONF.valet.HOST\n\n self.headers = {\"X-Auth-Token\": Auth.get_auth_token(),\n \"Content-Type\": \"application/json\"}", "def init(argv, doc, filename, parents=None):\n service = None\n flags = None\n parent_parsers = [tools.argparser, argparser]\n if parents is not None:\n parent_parsers.extend(parents)\n\n parser = argparse.ArgumentParser(\n description=doc,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(argv[1:])\n\n auth_path = os.path.dirname(filename)\n client_secrets_path = os.path.join(auth_path, CLIENT_SECRETS_FILE)\n service_account_path = os.path.join(auth_path, SERVICE_ACCOUNT_FILE)\n\n credentials = None\n if os.path.isfile(service_account_path):\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n service_account_path,\n scopes=API_SCOPE)\n elif os.path.isfile(client_secrets_path):\n message = tools.message_if_missing(client_secrets_path)\n flow = client.flow_from_clientsecrets(client_secrets_path,\n scope=API_SCOPE,\n message=message)\n storage_path = os.path.join(auth_path, SERVICE_NAME + '.dat')\n storage = Storage(storage_path)\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n else:\n print('No OAuth2 authentication files found. Checked:', file=sys.stderr)\n print('- %s' % service_account_path, file=sys.stderr)\n print('- %s' % client_secrets_path, file=sys.stderr)\n print('Please read the accompanying documentation.', file=sys.stderr)\n sys.exit(1)\n\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(SERVICE_NAME, SERVICE_VERSION, http=http)\n return (service, flags)", "def test_create_resource_group(self):\n pass", "def __init__(self, *services):\n\t\tself.services = []\n\t\tself.isRunning = False\n\t\tself.freq = self.FREQUENCY\n\t\tself.logger = Logger(prefix=\"monitoring \")\n\t\tself.iteration = 0\n\t\tself.iterationLastDuration = 0\n\t\tself.runners = {}\n\t\tself.reactions = {}\n\t\tmap(self.addService, services)", "def test_create_device_group(self):\n pass", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)", "def init_host(self, **kwargs):\n LOG.info(_LI(\"Starting controller service\"))\n self._init_volumes(self.admin_context)\n self._init_backups(self.admin_context)\n self._init_replicates(self.admin_context)\n self._init_snapshots(self.admin_context)", "def reset_services():\n service_init = ServiceContainerInit()\n service_init.notify_listeners()", "def __init__(self, *args):\n this = _libsbml.new_GroupsExtension(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, conf, group=None, namespace=None):\n self.conf = conf\n self.group = group\n self.namespace = namespace", "def __init__(self, *args):\n this = _libsbml.new_GroupsModelPlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def service_metadata_init(self):\n print(\"This utility will walk you through creating the service metadata file.\",\n \"It only covers the most common items and tries to guess sensible defaults.\",\n \"\",\n \"See `snet service metadata-init-utility -h` on how to use this utility.\",\n \"\",\n \"Press ^C at any time to quit.\", sep='\\n')\n try:\n metadata = MPEServiceMetadata()\n while True:\n display_name = input(\"display name: \").strip()\n if display_name == \"\":\n print(\"display name is required.\")\n else:\n break\n # Find number of payment groups available for organization\n # If only 1, set `default_group` as payment group\n while True:\n org_id = input(f\"organization id `{display_name}` service would be linked to: \").strip()\n while org_id == \"\":\n org_id = input(f\"organization id required: \").strip()\n try:\n org_metadata = self._get_organization_metadata_from_registry(org_id)\n no_of_groups = len(org_metadata.groups)\n break\n except Exception:\n print(f\"`{org_id}` is invalid.\")\n while True:\n try:\n protodir_path = input(\"protodir path: \")\n model_ipfs_hash_base58 = ipfs_utils.publish_proto_in_ipfs(self._get_ipfs_client(), protodir_path)\n break\n except Exception:\n print(f'Invalid path: \"{protodir_path}\"')\n if no_of_groups == 1:\n metadata.group_init('default_group')\n else:\n while input(\"Add group? [y/n] \") == 'y':\n metadata.group_init(input('group name: '))\n metadata.add_description()\n metadata.add_contributor(input('Enter contributor name: '), input('Enter contributor email: '))\n while input('Add another contributor? [y/n] ').lower() == 'y':\n metadata.add_contributor(input('Enter contributor name '), input('Enter contributor email: '))\n mpe_address = self.get_mpe_address()\n\n metadata.set_simple_field('model_ipfs_hash', model_ipfs_hash_base58)\n metadata.set_simple_field('mpe_address', mpe_address)\n metadata.set_simple_field('display_name', display_name)\n print('', '', json.dumps(metadata.m, indent=2), sep='\\n')\n print(\"Are you sure you want to create? [y/n] \", end='')\n if input() == 'y':\n file_name = input(f\"Choose file name: (service_metadata) \") or 'service_metadata'\n file_name += '.json'\n metadata.save_pretty(file_name)\n print(f\"{file_name} created.\")\n else:\n exit(\"ABORTED.\")\n except KeyboardInterrupt:\n exit(\"\\n`snet service metadata-init-utility` CANCELLED.\")", "def _init (self, name, security_groups, ctx, net):\n self.name = name\n self.constraints = list ()\n self.ctx = ctx\n self.net = net\n self.ctx.AddPolicy(self)\n # Tenants in this case is a list.\n self.sg_type, self.sg_list = \\\n z3.EnumSort('%s_secgroup'%self.name, security_groups)\n self.sg_map = {}\n for (sg, value) in zip(security_groups, self.sg_list):\n self.sg_map[sg] = value\n setattr(self, sg, value)\n self.policy_func = z3.Function('%s'%(name), self.ctx.address, self.sg_type)\n self.address_sg_map = []", "def __init__(self, name, group='', stype=0):\n self._name = name\n self._group = group\n self._stype = stype", "def __init__(self, endpoint, root_ca_path):\r\n self._status = AWSGreengrassStatus.INIT\r\n \"\"\"Status.\"\"\"\r\n\r\n self._thread_pool = ThreadPoolExecutor(AWSGreengrass._NUMBER_OF_THREADS)\r\n \"\"\"Pool of thread used to notify the listeners.\"\"\"\r\n\r\n self._listeners = []\r\n \"\"\"List of listeners to the feature changes.\r\n It is a thread safe list, so a listener can subscribe itself through a\r\n callback.\"\"\"\r\n\r\n self._endpoint = endpoint\r\n \"\"\"AWS endpoint.\"\"\"\r\n\r\n self._root_ca_path = root_ca_path\r\n \"\"\"Path to the root Certification Authority file.\"\"\"\r\n\r\n self._group_ca_path = None\r\n \"\"\"Path to the group Certification Authority file.\"\"\"\r\n\r\n self._core_info = None\r\n \"\"\"Core information.\"\"\"\r\n\r\n # Updating service.\r\n self._update_status(AWSGreengrassStatus.IDLE)", "def initialize_service():\r\n http = httplib2.Http()\r\n credentials = prepare_credentials()\r\n http = credentials.authorize(http)\r\n return build('analytics', 'v3', http=http)", "def _init(self, options):\n self._initRuntime(options)\n self._loadConfig() # needs runtime\n self._initGeneral() # needs _config\n self._initGroups() # needs _config and general", "def setUpContainer(self):\n self.dev1 = Device(name='dev1')\n eg = ElectrodeGroup(name='elec1',\n description='a test ElectrodeGroup',\n location='a nonexistent place',\n device=self.dev1)\n return eg", "def setUp(self):\n self.log = mock_log().bind(base_log=True)\n self.state = GroupState('tid', 'gid', 'g', {}, {}, None, {}, True,\n ScalingGroupStatus.ACTIVE)\n self.group = mock_group(self.state)", "def __init__(self, conn, iTag, srvType, cb, args):\r\n self._cb = cb\r\n self._args = args\r\n\r\n super(_ServiceProvider, self).__init__(conn, iTag, srvType)", "def __init__(__self__,\n resource_name: str,\n args: SecGroupRuleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def setup(self):\n base = automap_base()\n engine = create_engine(\"mysql+pymysql://\" + csconfig.config.db_user + \":\" +\n csconfig.config.db_password + \"@\" +\n csconfig.config.db_host + \":\" +\n str(csconfig.config.db_port) +\n \"/\" + csconfig.config.db_name)\n base.prepare(engine, reflect=True)\n session = Session(engine)\n cloud_yaml = base.classes.csv2_group_resource_yaml\n\n for cloud in self.group_resources:\n cloud_yamls = session.query(cloud_yaml).\\\n filter(cloud_yaml.group_name == self.name,\n cloud_yaml.cloud_name == cloud.cloud_name)\n cloud_yaml_list = []\n for yam in cloud_yamls:\n cloud_yaml_list.append([yam.yaml_name, yam.yaml, yam.mime_type])\n if cloud.cloud_type == 'localhost':\n newcloud = cloudscheduler.localhostcloud.LocalHostCloud(extrayaml=cloud_yaml_list, resource=cloud)\n else:\n newcloud = cloudscheduler.openstackcloud.\\\n OpenStackCloud(extrayaml=cloud_yaml_list, resource=cloud)\n self.clouds[newcloud.name] = newcloud\n self.log.debug(\"Added all clouds for group: %s\", self.name)", "def __init__(self):\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n self.bus = dbus.SystemBus()\n self.adapter = self._find_adapter()\n if not self.adapter:\n IFaceNotFoundException('%s interface not found' % GATT_MANAGER_IFACE)\n self.service_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter),\n GATT_MANAGER_IFACE)\n\n self.mainloop = GObject.MainLoop()\n self.ctx = GattContext(self.bus, self.mainloop)\n self.app = Application(self.ctx)\n\n #print('Registering GATT application...')\n self.service_manager.RegisterApplication(self.app.get_path(), {},\n reply_handler=register_app_cb,\n error_handler=register_app_error_cb)", "def __init__(self, group, **kwargs):\n #group can be robot stage floor or tool\n super(PhysicalPropertiesInterventionActorPolicy, self).__init__()\n self.task_intervention_space = None\n self.group = group", "def get_servicegroup(self, object_name, user_key = None):\n\t\treturn self.get_object('servicegroup',object_name, user_key = user_key)", "def init():", "def __init__(__self__, *,\n resource_group: pulumi.Input[str],\n server: pulumi.Input[str]):\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"server\", server)", "def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED", "def __init__(self, env):\n self.env = env\n #self.gator = CycleGator()\n self.gator = GroupGator()", "def __init__(self, task_id: int) -> None:\n BaseModifierHandler.__init__(self, task_id, HandlerNames.enable_service)", "def start_service(self):\n logger = logging.getLogger(self.dkr_name)\n logger.info(\"Starting up service\")\n\n self.start_swarm()\n\n container_spec = docker.types.ContainerSpec(\n image=self.dkr_image,\n command=self.dkr_command,\n env=self.dkr_env\n )\n task_tmpl = docker.types.TaskTemplate(container_spec)\n svc = self.api_client().create_service(\n name=self.dkr_name,\n task_template=task_tmpl)\n\n self.dkr_service = svc", "def __init__(self) -> None:\n self._found_devices = {} # type: Dict[IPv4Address, conf.BaseService]", "def __init__(self, service, build=\"sigs\"):\n # Call the Utils init\n super().__init__()\n self.service = service\n self.known_services = [\"sigs\"]\n if self.service not in self.known_services:\n raise ValueError(\"Unknown service {}. It must be in {}\".format(self.service, self.known_services))\n\n # This is for the name of the containers: on your local machine the containers are called `cgscope-xxxx` but\n # when running a parametrized build on Jenkins the build name is used.\n self.build = build\n\n self.__set_container_info()\n\n # mysql credentials\n self.mysql_username = \"root\"\n self.mysql_password = \"123\"\n self.mysql_credentials = \"--host=localhost --user={} --password={}\".format(self.mysql_username,\n self.mysql_password)\n # full path of the dump file\n self.dump_path = \"sigs-mysql/docker-entrypoint-initdb.d/{}\"\n\n self.__set_database_info()", "async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:\n if DOMAIN not in hass.data:\n hass.data[DOMAIN] = EntityComponent[Group](_LOGGER, DOMAIN, hass)\n\n await async_process_integration_platform_for_component(hass, DOMAIN)\n\n component: EntityComponent[Group] = hass.data[DOMAIN]\n\n hass.data[REG_KEY] = GroupIntegrationRegistry()\n\n await async_process_integration_platforms(hass, DOMAIN, _process_group_platform)\n\n await _async_process_config(hass, config)\n\n async def reload_service_handler(service: ServiceCall) -> None:\n \"\"\"Remove all user-defined groups and load new ones from config.\"\"\"\n auto = [e for e in component.entities if not e.user_defined]\n\n if (conf := await component.async_prepare_reload()) is None:\n return\n await _async_process_config(hass, conf)\n\n await component.async_add_entities(auto)\n\n await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)\n\n hass.services.async_register(\n DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})\n )\n\n service_lock = asyncio.Lock()\n\n async def locked_service_handler(service: ServiceCall) -> None:\n \"\"\"Handle a service with an async lock.\"\"\"\n async with service_lock:\n await groups_service_handler(service)\n\n async def groups_service_handler(service: ServiceCall) -> None:\n \"\"\"Handle dynamic group service functions.\"\"\"\n object_id = service.data[ATTR_OBJECT_ID]\n entity_id = f\"{DOMAIN}.{object_id}\"\n group = component.get_entity(entity_id)\n\n # new group\n if service.service == SERVICE_SET and group is None:\n entity_ids = (\n service.data.get(ATTR_ENTITIES)\n or service.data.get(ATTR_ADD_ENTITIES)\n or None\n )\n\n extra_arg = {\n attr: service.data[attr]\n for attr in (ATTR_ICON,)\n if service.data.get(attr) is not None\n }\n\n await Group.async_create_group(\n hass,\n service.data.get(ATTR_NAME, object_id),\n object_id=object_id,\n entity_ids=entity_ids,\n user_defined=False,\n mode=service.data.get(ATTR_ALL),\n **extra_arg,\n )\n return\n\n if group is None:\n _LOGGER.warning(\"%s:Group '%s' doesn't exist!\", service.service, object_id)\n return\n\n # update group\n if service.service == SERVICE_SET:\n need_update = False\n\n if ATTR_ADD_ENTITIES in service.data:\n delta = service.data[ATTR_ADD_ENTITIES]\n entity_ids = set(group.tracking) | set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_REMOVE_ENTITIES in service.data:\n delta = service.data[ATTR_REMOVE_ENTITIES]\n entity_ids = set(group.tracking) - set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_ENTITIES in service.data:\n entity_ids = service.data[ATTR_ENTITIES]\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_NAME in service.data:\n group.name = service.data[ATTR_NAME]\n need_update = True\n\n if ATTR_ICON in service.data:\n group.icon = service.data[ATTR_ICON]\n need_update = True\n\n if ATTR_ALL in service.data:\n group.mode = all if service.data[ATTR_ALL] else any\n need_update = True\n\n if need_update:\n group.async_write_ha_state()\n\n return\n\n # remove group\n if service.service == SERVICE_REMOVE:\n await component.async_remove_entity(entity_id)\n\n hass.services.async_register(\n DOMAIN,\n SERVICE_SET,\n locked_service_handler,\n schema=vol.All(\n vol.Schema(\n {\n vol.Required(ATTR_OBJECT_ID): cv.slug,\n vol.Optional(ATTR_NAME): cv.string,\n vol.Optional(ATTR_ICON): cv.string,\n vol.Optional(ATTR_ALL): cv.boolean,\n vol.Exclusive(ATTR_ENTITIES, \"entities\"): cv.entity_ids,\n vol.Exclusive(ATTR_ADD_ENTITIES, \"entities\"): cv.entity_ids,\n vol.Exclusive(ATTR_REMOVE_ENTITIES, \"entities\"): cv.entity_ids,\n }\n )\n ),\n )\n\n hass.services.async_register(\n DOMAIN,\n SERVICE_REMOVE,\n groups_service_handler,\n schema=vol.Schema({vol.Required(ATTR_OBJECT_ID): cv.slug}),\n )\n\n return True", "def __init__(__self__,\n resource_name: str,\n args: FrontdoorOriginGroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self,dataset=scripts,group=\"\"):\n self.dataset = dataset\n self.group=group", "def getGroupingStrategyFactory(self) -> cern.japc.core.spi.group.GroupSubscriptionStrategyFactory:\n ...", "def prepare_service(logger, loop, parser) -> None:\n logger.info(MAIN_CEREMONY)\n logger.info('starting... service')\n\n arguments = parser.parse_args()\n if not arguments.seed:\n raise NotInputSeed(\"seed not input, please input to seed\")\n\n node = prepare_node(arguments.seed, loop, arguments.node_dir)\n logger.info(\"login-user : {}\".format(node.chain.nodebase.decode()))\n\n asyncio.ensure_future(node.run(), loop=loop)\n\n prepare_api_v1(loop)\n # loop.run_in_executor(None, prepare_api_v2, node.chain, node.event)", "def __init__(__self__, *,\n resource_group: pulumi.Input[str],\n server: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_group\", resource_group)\n if server is not None:\n pulumi.set(__self__, \"server\", server)", "def __init__(self): \n if \"OPENSHIFT_BUILD_NAME\" in os.environ:\n service_account_path = os.environ.get('service_account_path')\n\n with open(os.path.join(service_account_path, 'namespace')) as fp:\n self.namespace = fp.read().strip()\n config.load_incluster_config()\n\n configuration = client.Configuration()\n configuration.verify_ssl = False\n\n self.oapi_client = DynamicClient(\n client.ApiClient(configuration=configuration)\n )\n # to run in our local environment as well. \n else:\n config.load_kube_config()\n configuration = client.Configuration()\n configuration.verify_ssl = False\n self.namespace = 'default'\n self.oapi_client = DynamicClient(\n client.ApiClient(configuration=configuration)\n )", "def __init__(self):\n self.id = None\n \"\"\"\"true if individual services can be enabled/disabled\"\"\"\n self.canenableindividualservice = None\n \"\"\"\"the destination physical network\"\"\"\n self.destinationphysicalnetworkid = None\n \"\"\"\"the provider name\"\"\"\n self.name = None\n \"\"\"\"the physical network this belongs to\"\"\"\n self.physicalnetworkid = None\n \"\"\"\"services for this provider\"\"\"\n self.servicelist = None\n \"\"\"\"state of the network provider\"\"\"\n self.state = None", "def __init__(self):\n self.policy_store = PolicyStore()\n self.service_store = {}\n\n from ranger_performance_tool import perf_globals\n enabled_services = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"enabled_services\")\n service_type_mapping = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"service_type_mapping\")\n for service_name in enabled_services:\n if service_name not in service_type_mapping:\n raise Exception(f\"Unknown service name:{service_name}. \"\n f\"Add it to service_type_mapping in secondary config file\")\n service_type = service_type_mapping[service_name]\n random_type = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"services\",\n service_type, \"random_type\")\n self.service_store[service_type] = RangerAPIObjectStore.service_store_def[service_type](random_type=random_type)", "def configure_aaa_accounting_exec_default_start_stop_group(device, server_grp):\n\n cmd=f'aaa accounting exec default start-stop group {server_grp}'\n\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(f\"Could not configure aaa accounting exec default start-stop group. Error:\\n{e}\")", "def __init__(self, groupPath: ghidra.program.util.GroupPath, treeName: unicode, blockModelService: ghidra.app.services.BlockModelService, modelName: unicode):\n ...", "def global_service_collection():\n\tglobal global_lsc\n\t# If this is the first call then the object is not yet created\n\tif not global_lsc:\n\t\t# Create the global object\n\t\tglobal_lsc = LadonServiceCollection()\n\treturn global_lsc", "def __init__(self):\n self.config = get_config()\n self.log = get_logger(self)\n\n self.factory = SugarServerFactory(\"wss://*:5505\")\n self.factory.protocol = SugarServerProtocol\n\n self.console_factory = SugarConsoleServerFactory(\"wss://localhost:5507\")\n self.console_factory.protocol = SugarConsoleServerProtocol\n\n self.api = APIService(self.config)", "def init_process_group(\n n_devices, rank, *, backend='nccl', host=None, port=None,\n use_mpi=False):\n if n_devices <= 0:\n raise ValueError(f'Invalid number of devices {n_devices}')\n if not (0 <= rank < n_devices):\n raise ValueError(f'Invalid number of rank {rank} {n_devices}')\n if backend not in _backends:\n raise ValueError(f'{backend} is not supported')\n if backend == 'nccl' and not nccl.available:\n raise RuntimeError('NCCL is not available')\n if host is None:\n host = os.environ.get('CUPYX_DISTRIBUTED_HOST', _store._DEFAULT_HOST)\n if port is None:\n port = int(os.environ.get(\n 'CUPYX_DISTRIBUTED_PORT', _store._DEFAULT_PORT))\n\n return _backends[backend](n_devices, rank, host, port, use_mpi)", "def _init_cluster(self):\n self._Init_Cluster()", "def get_service(self):", "def koie_group():\n return GroupFactory(name=\"Koiene\")", "def __init__(self, svc_name: str, svc_type: str, svc_port: str, svc_txt: str = None):\n\n\t\tpltfm = platform.system()\n\n\t\tif pltfm == 'Linux':\n\t\t\twhat = 'Linux'\n\t\t\targs = self.linux_args + [svc_name, svc_type, svc_port]\n\t\t\tif svc_txt != None:\n\t\t\t\targs = args + [svc_txt] # avahi-publish doesn't like empty txt input!\n\n\t\telif pltfm == 'Darwin':\n\t\t\twhat = 'macOS'\n\t\t\targs = self.macos_args + [svc_name, svc_type, \"local\", svc_port]\n\t\t\tif svc_txt != None:\n\t\t\t\targs = args + [svc_txt] # just to keep consistent with Linux path\n\n\t\telse:\n\t\t\tprint(f'Unknown platform \"{pltfm}\"')\n\t\t\tsys.exit(-1)\n\n\t\tprint(f'Platform \"{pltfm}\" assumed to be {what}, using {args} ...')\n\n\t\ttry:\n\t\t\tself.process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\t\texcept Exception as e:\n\t\t\tprint(f'Cannot invoke registration command: {e}')\n\t\t\tsys.exit(-1)", "def service(self):\n pass", "def __init__(__self__, *,\n consumer_group_id: Optional[pulumi.Input[str]] = None,\n consumer_group_name: Optional[pulumi.Input[str]] = None,\n consumer_group_password: Optional[pulumi.Input[str]] = None,\n consumer_group_user_name: Optional[pulumi.Input[str]] = None,\n dts_instance_id: Optional[pulumi.Input[str]] = None):\n if consumer_group_id is not None:\n pulumi.set(__self__, \"consumer_group_id\", consumer_group_id)\n if consumer_group_name is not None:\n pulumi.set(__self__, \"consumer_group_name\", consumer_group_name)\n if consumer_group_password is not None:\n pulumi.set(__self__, \"consumer_group_password\", consumer_group_password)\n if consumer_group_user_name is not None:\n pulumi.set(__self__, \"consumer_group_user_name\", consumer_group_user_name)\n if dts_instance_id is not None:\n pulumi.set(__self__, \"dts_instance_id\", dts_instance_id)", "def initialize_service(self,wrapper=None,message=None):\n if wrapper is not None:\n name = wrapper.name\n remap = wrapper.remap\n elif message is not None:\n name = message.get(\"service\")\n remap = message.get(\"_remap\")\n \n self.setup_service_remaps(name,remap)", "def init_valet_groups(self):\n\n for rk, r in self.stack.items():\n properties = r.get(\"properties\", {})\n metadata = properties.get(\"metadata\", {})\n\n if len(metadata) > 0:\n valet_rules = metadata.get(\"valet_groups\", None)\n\n if valet_rules is not None and valet_rules != \"\":\n rule_list = []\n if isinstance(valet_rules, six.string_types):\n rules = valet_rules.split(\",\")\n for gr in rules:\n rule_list.append(gr.strip())\n else:\n self.status = \"incorrect valet group metadata format\"\n self.logger.error(self.status)\n return\n\n # Check rule validation of valet_groups.\n self.status = self.resource.check_valid_rules(self.tenant_id,\n rule_list,\n use_ex=self.use_dha)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n self.status = self._make_valet_groups(properties.get(\"name\"),\n properties[\"availability_zone\"][0],\n rule_list)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n # Check and create server groups if they do not exist.\n scheduler_hints = properties.get(\"scheduler_hints\", {})\n if len(scheduler_hints) > 0:\n for hint_key in scheduler_hints.keys():\n if hint_key == \"group\":\n hint = scheduler_hints[hint_key]\n self.status = self._make_group(properties.get(\"name\"), hint)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return", "def test_add_group(self):\n pass", "def init_processes(rank, size, backend='gloo'):\n os.environ['MASTER_ADDR'] = '12.12.10.13'\n os.environ['MASTER_PORT'] = '29500'\n dist.init_process_group(backend, rank=rank, world_size=size)", "def pre_service_instance_create(self, resource_dict):\n pass", "def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def _init_start(self):\n def start(core, args):\n task = ' '.join(args.task) if args.task else ''\n return core.start(task=task)\n\n usage = 'stl start [task]'\n desc = (\n 'make a log that you are starting to work'\n )\n\n subp = self.subparsers.add_parser(\n 'start', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'task', nargs=argparse.REMAINDER,\n help='the task that you are about to start working on')\n\n subp.set_defaults(func=start)", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:" ]
[ "0.6977778", "0.647504", "0.6311481", "0.6163518", "0.6104335", "0.6073847", "0.60509956", "0.6020045", "0.59971154", "0.59911984", "0.5847313", "0.58042663", "0.5801854", "0.57892656", "0.5780774", "0.5769546", "0.5730015", "0.5711347", "0.5633376", "0.56113166", "0.5598102", "0.5590987", "0.5552936", "0.55450606", "0.55352086", "0.55333453", "0.55301034", "0.55205774", "0.55051374", "0.55017465", "0.54995865", "0.54994243", "0.5490709", "0.5490709", "0.54811805", "0.54803824", "0.5477125", "0.54681", "0.54671276", "0.5462261", "0.54538816", "0.54436463", "0.5428759", "0.5416901", "0.5416333", "0.54143566", "0.5414298", "0.54132456", "0.5407152", "0.5394549", "0.5389857", "0.5382521", "0.5377769", "0.5372612", "0.53643227", "0.53621304", "0.5349611", "0.5341946", "0.5336282", "0.53286594", "0.5326248", "0.5323522", "0.5321387", "0.52998596", "0.52894956", "0.52841264", "0.5279001", "0.527871", "0.5275582", "0.52587765", "0.5258131", "0.5251038", "0.524975", "0.5249639", "0.5243363", "0.5241", "0.5240376", "0.5234556", "0.5223977", "0.52200985", "0.52165365", "0.52164465", "0.52152985", "0.5211489", "0.5204561", "0.5202682", "0.51960206", "0.5187117", "0.517782", "0.51765496", "0.51584387", "0.5157652", "0.5156251", "0.51555187", "0.5154142", "0.5153719", "0.5152466", "0.5146312", "0.51450604", "0.5136043", "0.5125518" ]
0.0
-1
yield the values of the underlying objects
def __getattr__(self, item): if item == 'value': return [s.value for s in self.elements] else: raise AttributeError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n for val in self.value:\n yield val", "def __iter__(self):\n for value in self.__dict__.values():\n yield value", "def __iter__(self):\n yield from chain.from_iterable(self.data.values())", "def __iter__(self):\n for v in self._items:\n yield v", "def iter_values(self):\n values = self.values\n if (values is not None):\n yield from values", "def itervalues(self):\n for key in self:\n yield self[key]", "def values(self):\n for ts in self:\n yield self[ts]", "def __iter__(self):\n for o in self._iter:\n yield o", "def __iter__(self):\n return iter(vars(self.obj))", "def __iter__(self) -> Tuple[str, Any]:\n for attr_name, attr_val in self.__dict__.items():\n yield attr_name, attr_val", "def iterator(self):\n yield", "def __iter__(self):\n yield from self.gen", "def __iter__(self):\n for datum in self.data[self.name]:\n yield datum", "def __iter__(self):\n for b in self.x:\n yield b", "def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()", "def itervalues(self):\r\n for sleek_ref in self.data.itervalues():\r\n try:\r\n yield sleek_ref()\r\n except SleekRefDied:\r\n pass", "def __iter__(self):\n\n for lit in self.fvals:\n yield lit", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__(self):\n yield self._extent.members()\n yield self._intent.members()", "def __iter__(self):\n for key, value in self.read():\n yield key, value", "def __iter__(self):\n yield self", "def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)", "def values(self):\n\t\treturn iter(self.data)", "def __iter__(self):\n for sample in self.data:\n yield sample", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def __iter__(self):\r\n for attr, value in self.__dict__.items():\r\n a = getattr(self, attr)\r\n if type(a) is list:\r\n if len(a) > 0:\r\n yield attr, a", "def __iter__(self):\n for classresult in self.classresults:\n yield classresult", "def __iter__(self):\n attr = gv.firstattr(self.handle)\n while gv.ok(attr):\n yield gv.nameof(attr), \\\n decode_page(gv.getv(self.handle, attr))\n attr = gv.nextattr(self.handle, attr)", "def __iter__(self):\n return self._data.__iter__()", "def __iter__(self):\n for instresult in self.instresults:\n yield instresult", "def __iter__(self):\n for name, field in self.iterate_over_fields():\n yield name, field", "def __iter__(self):\r\n return self._iterate()", "def __iter__(self):\n return self.params.values().__iter__()", "def __iter__(self):\n for atom in self.iter_atoms():\n yield atom", "def __iter__(self):\n return iter(self.__iter())", "def __iter__(self):\n yield self._x\n yield self._y", "def itervalues(self, *args, **kwargs):\n for key in self.iterkeys():\n yield self._get(key, *args, **kwargs)", "def __iter__(self):\n yield self.x\n yield self.y\n # Or, you could also do:\n # return iter([self.x, self.y])", "def __iter__(self) -> (str, np.ndarray):\n for k, v in self.fields.items():\n yield k, v", "def __iter__(self):\n items = self._fetch()\n for item in items:\n yield item", "def values(self):\n for key in self.metadb.values():\n yield key, self.datadb[key]", "def __iter__(self):\n for itm in self._user_data:\n yield itm", "def __iter__(self):\n for i in range(len(self.data)):\n yield self.data[i]", "def __next__(self):\n for (k, v) in pairs(self._data):\n yield (v, k)", "def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)", "def _objects(self):\n for d in self._dicts_with_ids():\n yield d['id'], tuple(d[k] for k in self.fields)", "def __iter__(self):\n return self._data_dict.__iter__()", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def items(self):\n for ts in self:\n yield ts, self[ts]", "def iter_values(self):\n if self.contributes:\n for value in self.values:\n if isinstance(value, GroupingComponent):\n for x in value.iter_values():\n yield x\n else:\n yield value", "def __iter__(self):\n handle = self.parent.handle\n cur = getattr(gv, \"first%s\" % self.type)(handle)\n nextitem = getattr(gv, \"next%s\" % self.type)\n while gv.ok(cur):\n yield self.get(gv.nameof(cur))\n cur = nextitem(handle, cur)", "def __iter__(self):\n for d in self.z3_model.decls():\n if d.arity() == 0:\n try:\n pysmt_d = self.converter.back(d())\n yield pysmt_d, self.get_value(pysmt_d)\n except ConvertExpressionError:\n # avoids problems with symbols generated by z3\n pass", "def __iter__(self):\n yield from self.qc_mol\n yield from self.br_mol\n yield from self.pc_mol", "def iterate(self):\n yield self\n for x in self:\n for y in x.iterate():\n yield y", "def __iter__(self):\n term_v = yicespy.term_vector_t()\n yicespy.yices_init_term_vector(term_v)\n #todo here\n status = yicespy.yices_model_collect_defined_terms(self.yices_model, term_v)\n self._check_error(status)\n for d in term_v:\n try:\n pysmt_d = self.converter.back(d())\n yield pysmt_d, self.get_value(pysmt_d)\n except UndefinedSymbolError:\n # avoids problems with symbols generated by z3\n pass\n yicespy.yices_delete_term_vector(term_v)", "def iterator(self):\n return self.ValueIterator()", "def _iter_remote_values(self):\n yield self.scene_value", "def __iter__(self):\n for x in self.seq: yield x", "def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v", "def __iter__(self):\n yield from self.calls", "def get_values(self, ckey):\n for next_key, item in yield_obj(self, ckey):\n if isdictinstance(item):\n for final, elem in yield_obj(item, next_key):\n if isdictinstance(elem) and elem.has_key(final):\n yield elem[final]\n else:\n yield elem\n elif isinstance(item, list) or isinstance(item, GeneratorType):\n for final, elem in item:\n for last, att in yield_obj(elem, final):\n if isdictinstance(att) and att.has_key(last):\n yield att[last]\n else:\n yield att", "def __iter__(self):\n\n for each in list(self.keys()):\n yield each", "def __iter__(self):\n return iter(self._d)", "def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k", "def __iter__(self):\n return iter(self._internals.values())", "def __iter__(self):\n return iter(self.to_list())", "def __iter__(self):\n for mapping in self._mappings.values():\n yield mapping", "def itervaluerefs(self):\n for value in self.itervalues():\n yield ref(value)", "def iteritems(self):", "def __iter__(self):\n return iter(self.data)", "def __iter__(self):\n return iter(self.data)", "def __iter__(self):\n return iter(self.data)", "def __iter__(self):\n return iter({})", "def __iter__(cls):\n return iter(cls.__by_number.values())", "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "def __iter__(self):\n return self._product_generator()", "def __iter__(self):\n for index in range(len(self)):\n yield self[index]", "def __call__(self):\n yield from self", "def __iter__(self):\r\n return iter(self.data)", "def __iter__(self):\n return (x for x in vars(self))", "def __iter__(self):\n for feature in self.features:\n yield feature", "def __iter__(self):\n return self.data_container.__iter__()", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def iteritems(self):\n\t\tfor attribute_name in dir(self):\n\t\t\tif self._valuable(attribute_name):\n\t\t\t\tyield (attribute_name, getattr(self, attribute_name))", "def __iter__(self):\n for item in self._reader:\n yield item", "def __iter__(self):\n prefix = len(META_NS) + 2\n for key, value in self.stats.items():\n yield (key[prefix:-6], int(value))", "def __iter__(self):\n for x in self.innings:\n yield x", "def iteritems(self):\n for key in self:\n yield key, self[key]", "def __iter__(self) -> Generator[str, None, None]:\n\n yield from self.__dict__[\"members\"]", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def iteritems(self):\n for item in self.features.iteritems():\n yield item", "def iterate(cls):\n for name, value in vars(cls).iteritems():\n if name.startswith('__'):\n continue\n yield (name, value)", "def __iter__(self):\r\n return self", "def __iter__(self):\n return iter(self.items)", "def __iter__(self):\n return iter(self.items)", "def __iter__(self):\n pass" ]
[ "0.78900456", "0.783961", "0.76023287", "0.75327784", "0.7466362", "0.7364445", "0.7347672", "0.73172176", "0.72873604", "0.72775656", "0.71958953", "0.7171008", "0.7160181", "0.70680296", "0.7050591", "0.7016633", "0.70131326", "0.7009068", "0.7009068", "0.7009068", "0.7009068", "0.6987275", "0.69641644", "0.6958268", "0.69541746", "0.69480604", "0.6936379", "0.69342446", "0.69342446", "0.69310874", "0.6919582", "0.6916219", "0.69120145", "0.6898654", "0.6897342", "0.68913364", "0.6882064", "0.68692935", "0.6867069", "0.68565005", "0.6851982", "0.68474007", "0.68465364", "0.68425417", "0.6841806", "0.6831786", "0.68163025", "0.68158144", "0.68122804", "0.6808566", "0.6801541", "0.6799918", "0.6793172", "0.6790636", "0.6786761", "0.678214", "0.67774916", "0.67712116", "0.6767978", "0.6751817", "0.6741351", "0.6735667", "0.6732626", "0.6732329", "0.67318875", "0.6720276", "0.6699589", "0.6697505", "0.669333", "0.66914976", "0.6684276", "0.6668538", "0.66677153", "0.66676456", "0.66676456", "0.66676456", "0.6665587", "0.66640514", "0.6660987", "0.66604203", "0.66454977", "0.6642675", "0.664255", "0.66309553", "0.66304433", "0.6623456", "0.6621848", "0.66145664", "0.66109425", "0.65947145", "0.6592704", "0.6587909", "0.65808016", "0.6578005", "0.6578005", "0.6576348", "0.6574346", "0.65523773", "0.6552304", "0.6552304", "0.6552244" ]
0.0
-1
Return a related filter_name, using the filterset relationship if present.
def related(filterset, filter_name): if not filterset.relationship: return filter_name return LOOKUP_SEP.join([filterset.relationship, filter_name])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_filter_name(self):\n pass", "def get_param_filter_name(cls, param, rel=None):\n # check for empty param\n if not param:\n return param\n\n # strip the rel prefix from the param name.\n prefix = '%s%s' % (rel or '', LOOKUP_SEP)\n if rel and param.startswith(prefix):\n param = param[len(prefix):]\n\n # Attempt to match against filters with lookups first. (username__endswith)\n if param in cls.base_filters:\n return param\n\n # Attempt to match against exclusion filters\n if param[-1] == '!' and param[:-1] in cls.base_filters:\n return param[:-1]\n\n # Match against relationships. (author__username__endswith).\n # Preference more specific filters. eg, `note__author` over `note`.\n for name in reversed(sorted(cls.related_filters)):\n # we need to match against '__' to prevent eager matching against\n # like names. eg, note vs note2. Exact matches are handled above.\n if param.startswith(\"%s%s\" % (name, LOOKUP_SEP)):\n return name", "def get_related_filtersets(self):\n related_filtersets = OrderedDict()\n\n for related_name in self.related_filters:\n if related_name not in self.filters:\n continue\n\n f = self.filters[related_name]\n related_filtersets[related_name] = f.filterset(\n data=self.data,\n queryset=f.get_queryset(self.request),\n relationship=related(self, related_name),\n request=self.request,\n prefix=self.form_prefix,\n )\n\n return related_filtersets", "async def get_filter(self, **kwargs: Any) -> str:\n return self._telescope.filter_name", "def _filter_related_fk(self, rel):\n field = rel.field\n if isinstance(field, models.ForeignKey):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def filter(self, **kwargs):\n related_names = []\n for argname, _ in kwargs.iteritems():\n related_name = argname.split('__')\n if len(related_name) > 1:\n related_names.append(\"__\".join(related_name[:-1]))\n if len(related_names) > 0:\n return super(\n JeevesQuerySet, self).filter(\n **kwargs).select_related(*related_names)\n else:\n return super(JeevesQuerySet, self).filter(**kwargs)", "def related_view_filter():\n pass", "def fq_name(self):\n return \"{}_{}\".format(self.id_, self.name)", "def get_related_entity(self, entity):\n try:\n return getattr(self, entity if entity[-1] == \"s\" else entity.upper())\n except AttributeError:\n raise FilterError(f\" No related entity: {entity}\")", "def _filter_fk(self, field):\n if isinstance(field, models.ForeignKey):\n if self._join_allowed(field.model, field.rel.to, field):\n return field", "def filtered_by(self, rel_name,\n related_entity):\n return self.weighted_by(rel_name, related_entity)", "def get_related_name(self, field=None):\n related_name = self.subrecord.__name__.lower()\n if field:\n related_name = \"{0}__{1}\".format(related_name, field)\n if self.is_patient_subrecord:\n return \"patient__{0}\".format(related_name)\n else:\n return related_name", "def name(self) -> StringFilter:\n return self.__name", "def related_to(self, name=None):\n\t\treturn self.related(name, True)", "def _filter_related_one2one(self, rel):\n field = rel.field\n if isinstance(field, models.OneToOneField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def _get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def filter(self) -> Optional[str]:\n return pulumi.get(self, \"filter\")", "def get_filter(self) -> str:\n\n return \";;\".join(self.filters)", "def get_exact_filter_by_name(self, name):\n for entry in self.filters:\n if (entry['type'] == 'filter' and entry['name'] == name and\n entry['comparator'] == 'equals'):\n return entry", "def get_related_field(cls, model, name=None):\n for field_name, rel_model in cls._meta.relations.iteritems():\n if rel_model == model and name in [None, field_name]:\n return cls.get_field(field_name)", "def get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def filter_related_filtersets(self, queryset):\n for related_name, related_filterset in self.related_filtersets.items():\n # Related filtersets should only be applied if they had data.\n prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)\n if not any(value.startswith(prefix) for value in self.data):\n continue\n\n field_name = self.filters[related_name].field_name\n lookup_expr = LOOKUP_SEP.join([field_name, 'in'])\n subquery = Subquery(related_filterset.qs.values('pk'))\n queryset = queryset.filter(**{lookup_expr: subquery})\n\n return queryset", "def _get_full_filter_name(name, filter_path):\n filename = os.path.basename(filter_path)\n file_only = os.path.splitext(filename)[0]\n filter_name = \"{}.{}\".format(file_only, name)\n return filter_name", "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def getFilter(self):\n\n return self.filter", "def rel_name(self, other: NamedNode) -> tp.Optional[str]:\n\n # The name relative to the \"void\" in the full name\n if other is None:\n return self.full_name\n\n path = list(takewhile(lambda x: other is not x, self.iter_path_reverse()))\n\n # This means that other is not an ancestor of self\n if not path or path[-1].parent is not other:\n return None\n\n # return self.separator.join(reversed(list(map(lambda x: x.name, path))))\n return self.separator.join(reversed(list(map(lambda x: x.tagged_name, path))))", "def set_FilterName(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterName', value)", "def _filter_one2one(self, field):\n if isinstance(field, models.OneToOneField):\n if self._join_allowed(field.model, field.rel.to, field):\n return field", "def related_names(self) -> Set[str]:\n result = set()\n if self.default:\n result.update(self.default.related_names)\n if self.type_hint:\n result.update(self.type_hint.related_names)\n\n return result", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def get_set_name(self, set_id):\n name_query = QSqlQuery(self.reader_connection)\n query = f'SELECT SetName FROM {self.table_name} ' \\\n f'WHERE SetId = {set_id}'\n\n if name_query.prepare(query):\n name_query.exec(query)\n set_name_index = name_query.record().indexOf(\"SetName\")\n name_query.first()\n set_name = name_query.value(set_name_index)\n name_query.finish()\n return set_name, True\n else:\n name_query.finish()\n return None, False", "def _get_filter(self, cr, uid, external_session, step, previous_filter=None, context=None):\n return None", "def _filter_related_m2m(self, rel):\n field = rel.field\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def get_current_filters(self) -> str:\r\n return self.__filters_string", "def filters(self):\n\t\treturn self.local_filter", "def getFilterSetFilterFieldManagers(self):\n return _get_related_managers(self, FilterSetFilterField)", "def get_reverse_related_field(cls, model, name=None):\n if model in cls._meta.reverse_relations.values():\n return model.get_related_field(model=cls, name=name)", "def names(filter=None):", "def get_filters_names_key(self, project, metric_name):\n return u\"{0}-metrics-filters:{1}\".format(project, to_unicode(metric_name))", "def _get_name(self, name, scopes=None, include_cond=True):\n if scopes is None:\n scopes = self._scopes\n\n scope_strings = []\n for scope in scopes:\n if self._is_name_scope(scope):\n scope_strings.append(scope)\n elif self._is_conditional_scope(scope) and include_cond:\n parent_name = scope['parent_name']\n parent_values = scope['parent_values']\n scope_string = '{name}={vals}'.format(\n name=parent_name,\n vals=','.join([str(val) for val in parent_values]))\n scope_strings.append(scope_string)\n return '/'.join(scope_strings + [name])", "def filter_for_term_relationships(src, relationship_type, object_id, target=True):\n filters = [\n Filter(\"type\", \"=\", \"relationship\"),\n Filter(\"relationship_type\", \"=\", relationship_type),\n ]\n if target:\n filters.append(Filter(\"target_ref\", \"=\", object_id))\n else:\n filters.append(Filter(\"source_ref\", \"=\", object_id))\n\n results = src.query(filters)\n return remove_deprecated(results)", "def filter ( self, name, context ):\n return (name == self.name_last)", "def condition_filter(self, filter_id):\r\n return filters.Filter(self, filter_id)", "def _renderFilter(js_method_name, element_id, model, lookup_list, \n select_related):\n label, lookup_dict = lookup_list\n script = \"ajax_filtered_fields.%s('%s', '%s', '%s', '%s', '%s')\" % (\n js_method_name,\n element_id,\n model._meta.app_label, \n model._meta.object_name, \n utils.lookupToString(lookup_dict),\n select_related)\n return u\"\"\"\n <a class=\"ajax_filter_choice\" \n href=\"javascript:void(0)\"\n onclick=\"%s\">%s</a>\n \"\"\" % (script, label)", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def format_search_filter(self, term: event_search.SearchFilter) -> Optional[WhereType]:\n name = term.key.name\n\n converted_filter = self.convert_search_filter_to_condition(\n event_search.SearchFilter(\n # We want to use group_id elsewhere so shouldn't be removed from the dataset\n # but if a user has a tag with the same name we want to make sure that works\n event_search.SearchKey(\"tags[group_id]\" if name == \"group_id\" else name),\n term.operator,\n term.value,\n )\n )\n return converted_filter if converted_filter else None", "def get_filters(self):\n if self.filters is not None:\n return self.filters\n elif self.parent is not None:\n return self.parent.get_filters()\n else:\n return None", "def filter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"filter\")", "def name_for_prior(self, prior: Prior) -> str:\r\n for name, prior_model in self.prior_model_tuples:\r\n prior_name = prior_model.name_for_prior(prior)\r\n if prior_name is not None:\r\n return \"{}_{}\".format(name, prior_name)\r\n for name, direct_prior in self.direct_prior_tuples:\r\n if prior == direct_prior:\r\n return name", "def filter(self):\n return self._filter", "def filter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"filter\")", "def retrieve_filter(self, filter_id):\n LOG.debug(\"Retrieve filter {}\".format(filter_id))\n filter_obj = self.filter_collection.find_one({\"_id\": ObjectId(filter_id)})\n\n # use _id to preselect the currently loaded filter, and drop it while we are at it\n filter_obj.update([(\"filters\", filter_obj.pop(\"_id\", None))])\n return filter_obj", "def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)", "def get_sidebar_saved_filter_names(self):\n radio_options = [\n {\"label\": f\"{x['name']}\", \"value\": f\"{x['name']}\"} for x in self._filters\n ]\n radio_options.insert(0, {\"label\": \"None\", \"value\": \"None\"})\n return [\n dbc.FormGroup(\n children=[\n dbc.Label(\"Saved Filters\", className=\"mr-2\"),\n dbc.RadioItems(\n options=radio_options,\n value=\"None\",\n id=\"filter_radioitems_input\",\n ),\n ]\n ),\n ]", "def model_name(self):\n return self.get_queryset().model._meta.model_name", "def queryset(self, request, queryset):\n if self.value() == \"animals\":\n return queryset.animals()\n if self.value() == \"webelos\":\n return queryset.webelos()", "def filter_names(self, qs, name, value):\n return qs.filter(name__in=value)", "def relevant():\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )", "def _filter_m2m(self, field):\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(field.model, field.rel.to, field):\n return field", "def get_filename_to_save(self, path, filter, caption=\"\"):\r\n\r\n raise NotImplementedError", "def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def getFilterClass(filterName, pkg=\"ufo2ft.filters\"):\n # TODO add support for third-party plugin discovery?\n # if filter name is 'Foo Bar', the module should be called 'fooBar'\n filterName = filterName.replace(\" \", \"\")\n moduleName = filterName[0].lower() + filterName[1:]\n module = importlib.import_module(\".\".join([pkg, moduleName]))\n # if filter name is 'Foo Bar', the class should be called 'FooBarFilter'\n className = filterName[0].upper() + filterName[1:]\n if not className.endswith(\"Filter\"):\n className += \"Filter\"\n return getattr(module, className)", "def make_filter_string(cls, filter_specification):\n registry = get_current_registry()\n visitor_cls = registry.getUtility(IFilterSpecificationVisitor,\n name=EXPRESSION_KINDS.CQL)\n visitor = visitor_cls()\n filter_specification.accept(visitor)\n return str(visitor.expression)", "def get_filter_subset(cls, params, rel=None):\n # Determine names of filters from query params and remove empty values.\n # param names that traverse relations are translated to just the local\n # filter names. eg, `author__username` => `author`. Empty values are\n # removed, as they indicate an unknown field eg, author__foobar__isnull\n filter_names = {cls.get_param_filter_name(param, rel) for param in params}\n filter_names = {f for f in filter_names if f is not None}\n return OrderedDict(\n (k, v) for k, v in cls.base_filters.items() if k in filter_names\n )", "def _filter_hotkey_set_name(hotkey_set_name):\n # Replace spaces with underscores\n hotkey_set_name = hotkey_set_name.replace(' ', '_')\n # Remove special characters\n hotkey_set_name = ''.join(re.findall(r\"[_a-zA-Z0-9]+\", hotkey_set_name))\n return hotkey_set_name", "def dbtrace_filter_change(filter_name_field):\n\n pass", "def get_filter_pillar(filter_name, pillar_key=\"acl\", pillarenv=None, saltenv=None):\n pillar_cfg = _get_pillar_cfg(pillar_key, pillarenv=pillarenv, saltenv=saltenv)\n return _lookup_element(pillar_cfg, filter_name)", "def details_filter(self):\n return self._details_filter", "def save(self, *args, **kwargs):\n self.where_clause = None\n\n if self.filters is not None:\n queries = []\n\n for key in self.filters:\n category = self.project.categories.get(pk=key)\n queries.append(category.get_query(self.filters[key]))\n\n if len(queries) > 0:\n query = ' OR '.join(queries)\n self.where_clause = query\n else:\n self.where_clause = 'FALSE'\n\n super(FilterMixin, self).save(*args, **kwargs)", "def qualified_name(self) -> Optional[str]:\n return None if self.name is None else get_qname(self.target_namespace, self.name)", "def remove_filter_field(self, field):\n if self.filters:\n category_filter = self.filters.get(str(field.category.id), None)\n\n if category_filter:\n field_filter = category_filter.pop(field.key, None)\n\n if field_filter:\n self.save()", "def join(self, model_or_queryset, *filter_q, **filter_kw):\n join_type = filter_kw.get('_join_type', INNER)\n queryset = super(With, self).join(model_or_queryset, *filter_q, **filter_kw)\n\n # the underlying Django code forces the join type into INNER or a LEFT OUTER join\n alias, _ = queryset.query.table_alias(self.name)\n join = queryset.query.alias_map[alias]\n if join.join_type != join_type:\n join.join_type = join_type\n return queryset", "def get_filters(self):", "def query_string_for_field(self, field, operator=None, model=None):\n if model:\n if model._meta.proxy and \\\n model._meta.proxy_for_model is not field.model:\n raise ModelTreeError('proxied model must be the field model')\n\n else:\n model = field.model\n\n # When an explicit reverse field is used, simply use it directly\n if isinstance(field, RelatedObject):\n toks = [field.field.related_query_name()]\n else:\n path = self.query_string(model)\n\n if path:\n toks = [path, field.name]\n else:\n toks = [field.name]\n\n if operator is not None:\n toks.append(operator)\n\n return str('__'.join(toks))", "def get_filter(self, target_property, order=2, initial_states=None, postfilters=None):\n return self.get_state_space_filter(self.state_list,\n target=target_property,\n order=order,\n initial_states=initial_states,\n postfilters=postfilters\n )", "def current_filter(self):\n return self._proxy.get(\"current_filter\", \"filterwheel\")", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = rewriteaction()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def get_filterable_queryset(self):\n queryset = super().get_filterable_queryset()\n category_names = get_category_children(self.filterable_categories)\n return queryset.filter(categories__name__in=category_names)", "def related(self, name=None, reverse_lookup=False):\n\t\tif not name and reverse:\n\t\t\tfrom .models import Related\n\t\t\tmodel = self\n\t\t\tif hasattr(self, 'parent_model'):\n\t\t\t\tmodel = self.parent_model\n\t\t\tct = ContentType.objects.get_for_model(model)\n\t\t\tret = Related.objects.filter(related_content_type=ct.pk, related_object_id=self.pk).order_by('content_type__model', 'object_id')\n\t\t\treturn ret\n\n\t\tif not name:\n\t\t\traise Exception('Need a related item name to lookup!')\n\n\t\t# Convert to list if needed\n\t\tif isinstance(name, str):\n\t\t\tname = [name]\n\n\t\t# Grab this model's content type\n\t\tcontent_type = ContentType.objects.get_for_model(type(self))\n\n\t\t# Grab model paths via aliases and combine with dot-notation model names\n\t\tmodel_paths = [v[1] for v in self.related_overrides.get(self.related_override_key(), self.related_models) if v[0] in name] + [v for v in name if '.' in v]\n\t\t# Grab related content types\n\t\trelated_content_types = [ContentType.objects.get_for_model(apps.get_model(*model_path.split('.'))) for model_path in model_paths]\n\n\t\t# Set to/from fields\n\t\tfields = ['object_id', 'content_type', 'content_object', 'content_type_id']\n\t\t_from = dict(zip(fields, fields))\n\t\t_to = {k: 'related_{}'.format(v) for (k, v) in _from.items()}\n\n\t\t# Switch to/from if reversed\n\t\tif reverse_lookup:\n\t\t\t_from, _to = _to, _from\n\n\t\targs = {\n\t\t\t_from['content_type']: content_type,\n\t\t\t_from['object_id']: self.pk,\n\t\t\t'{}__in'.format(_to['content_type']): related_content_types,\n\t\t}\n\n\t\tif not reverse_lookup:\n\t\t\targs['group__in'] = name\n\n\t\t# Get relations\n\t\tfrom .models import Related\n\t\trelations = Related.objects.filter(**args)\n\n\t\t# For reverse lookup, if there's only one related content type, query those models directly\n\t\tif reverse_lookup and len(related_content_types) == 1:\n\t\t\treturn related_content_types[0].model_class().objects.filter(pk__in=relations.values('object_id')).public()\n\t\t# Otherwise, prefetch in bulk and cache each content type separately\n\t\telse:\n\t\t\tself.prefetch_relations(relations, _to)\n\t\t\treturn [getattr(relation, '_content_object_cache') for relation in relations if hasattr(relation, '_content_object_cache')]", "def local_name(self) -> Optional[str]:\n return None if self.name is None else local_name(self.name)", "def filter(self, *args, **kwargs):\n p = self.proxy\n filter_clauses = self.filter_clauses + list(args)\n related_clauses = self.related_clauses[:]\n\n connection_kwarg = p.connection_kwarg\n connection = self.connection\n\n # Build the filter operations\n for k, v in kwargs.items():\n # Ignore connection parameter\n if k == connection_kwarg:\n connection = v\n continue\n model = p.model\n op = \"eq\"\n if \"__\" in k:\n parts = k.split(\"__\")\n if parts[-1] in QUERY_OPS:\n op = parts[-1]\n k = \"__\".join(parts[:-1])\n col = resolve_member_column(model, k, related_clauses)\n\n # Support lookups by model\n if isinstance(v, Model):\n v = v.serializer.flatten_object(v, scope=None)\n elif op in ('in', 'notin'):\n # Flatten lists when using in or notin ops\n v = model.serializer.flatten(v, scope=None)\n\n clause = getattr(col, QUERY_OPS[op])(v)\n filter_clauses.append(clause)\n\n return self.clone(\n connection=connection,\n filter_clauses=filter_clauses,\n related_clauses=related_clauses)", "def get_local_name(self) -> str:\n return self.alias or self.name or self.source.render()", "def _fk(feature_name, channel, target):\n return \"{}::{}::{}\".format(feature_name, channel, target)", "def filters(self):\n return self.England_filter", "def groupfilter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"groupfilter\")", "def groupfilter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"groupfilter\")", "def get_related_model(self):\n\t\treturn self.related_model", "def filterToName(type: int) -> unicode:\n ...", "def model_name(self):\n return self.get_queryset().model._meta.model_name # pylint: disable=protected-access", "def filter_installed(self, queryset, name, value):\n if str2bool(value):\n return queryset.exclude(belongs_to=None)\n else:\n return queryset.filter(belongs_to=None)", "def __get_reference_name(self, condition_types):\n named_references = self.__get_references()\n reference_name = \"\"\n for reference in named_references:\n try:\n name_index = condition_types.index(reference)\n reference_name = condition_types[name_index]\n break\n except ValueError:\n pass\n return reference_name", "def complex_filter(self, filter_obj):\n if isinstance(filter_obj, Q):\n clone = self._chain()\n clone.query.add_q(filter_obj)\n return clone\n else:\n return self._filter_or_exclude(False, args=(), kwargs=filter_obj)", "def step_filter(self, qs):\n return qs", "def by_type(self, type):\n return self.filter(related_type__title=type)", "def get_relation(srt, soort):\n result, multiple = None, None\n if srt != soort or soort in ('funcproc', 'techproc'):\n for relobj in my.rectypes[srt]._meta.get_fields():\n if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:\n result = relobj.name\n multiple = False if relobj.get_internal_type() == 'ForeignKey' else True\n break\n return result, multiple", "def apply_query(self, foreign, owner, foreign_key, local_key):\n return foreign.where(foreign_key, owner().__attributes__[local_key]).first()", "def get_filter_url(self):\n return reverse(\n \"variants:case-filter\",\n kwargs={\"project\": self.project.sodar_uuid, \"case\": self.sodar_uuid},\n )", "def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)" ]
[ "0.6471144", "0.62778616", "0.58047044", "0.56574893", "0.56555384", "0.56209326", "0.5564204", "0.55566245", "0.5534012", "0.5517095", "0.5462722", "0.54582113", "0.5367023", "0.5363491", "0.53548676", "0.53417313", "0.53322023", "0.5322664", "0.526207", "0.52405727", "0.52296877", "0.5190957", "0.51469314", "0.5131789", "0.49741456", "0.49733698", "0.49638498", "0.49490824", "0.48768133", "0.48725462", "0.4843908", "0.482944", "0.48235682", "0.48100787", "0.47994134", "0.47878042", "0.4767585", "0.47597456", "0.47392803", "0.47219247", "0.4707092", "0.469599", "0.46723738", "0.46557143", "0.46529815", "0.46529815", "0.46363357", "0.46337977", "0.46253908", "0.46155712", "0.4606567", "0.4598758", "0.4598211", "0.45736253", "0.456962", "0.45646617", "0.45614573", "0.45577925", "0.45573694", "0.45562083", "0.45405355", "0.45168015", "0.44843903", "0.4469338", "0.44665247", "0.4464801", "0.44585276", "0.44466987", "0.44455624", "0.44449252", "0.4444832", "0.4438247", "0.44332686", "0.44294822", "0.442518", "0.44224364", "0.4411566", "0.44109955", "0.4409913", "0.44044167", "0.44022784", "0.43969437", "0.4396512", "0.43905082", "0.43872526", "0.43835637", "0.43834803", "0.43834803", "0.43789178", "0.43728295", "0.43687075", "0.4366034", "0.43654582", "0.43630144", "0.4357263", "0.43558753", "0.43276012", "0.43225914", "0.43218616", "0.4320995" ]
0.8743976
0
Resolve `AutoFilter`s into their perlookup filters. `AutoFilter`s are a declarative alternative to the `Meta.fields` dictionary syntax, and use the same machinery internally.
def expand_auto_filters(cls, new_class): # get reference to opts/declared filters orig_meta, orig_declared = new_class._meta, new_class.declared_filters # override opts/declared filters w/ copies new_class._meta = copy.deepcopy(new_class._meta) new_class.declared_filters = new_class.declared_filters.copy() for name in new_class.auto_filters: f = new_class.declared_filters[name] # Remove auto filters from declared_filters so that they *are* overwritten # RelatedFilter is an exception, and should *not* be overwritten if not isinstance(f, filters.RelatedFilter): del new_class.declared_filters[name] # Use meta.fields to generate auto filters new_class._meta.fields = {f.field_name: f.lookups or []} for gen_name, gen_f in new_class.get_filters().items(): # get_filters() generates param names from the model field name # Replace the field name with the parameter name from the filerset gen_name = gen_name.replace(f.field_name, name, 1) new_class.base_filters[gen_name] = gen_f # restore reference to opts/declared filters new_class._meta, new_class.declared_filters = orig_meta, orig_declared
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadFilters(ufo):\n preFilters, postFilters = [], []\n for filterDict in ufo.lib.get(FILTERS_KEY, []):\n namespace = filterDict.get(\"namespace\", \"ufo2ft.filters\")\n try:\n filterClass = getFilterClass(filterDict[\"name\"], namespace)\n except (ImportError, AttributeError):\n from pprint import pformat\n\n logger.exception(\"Failed to load filter: %s\", pformat(filterDict))\n continue\n filterObj = filterClass(\n *filterDict.get(\"args\", []),\n include=filterDict.get(\"include\"),\n exclude=filterDict.get(\"exclude\"),\n pre=filterDict.get(\"pre\", False),\n **filterDict.get(\"kwargs\", {}),\n )\n if filterObj.pre:\n preFilters.append(filterObj)\n else:\n postFilters.append(filterObj)\n return preFilters, postFilters", "def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def _populate_model_filters_to_resolved_values(\n manifest_specs_cached_values: Dict[str, Any],\n model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues],\n model_filters: Operator,\n) -> None:\n for model_filter in model_filters:\n if model_filter.key in manifest_specs_cached_values:\n cached_model_value = manifest_specs_cached_values[model_filter.key]\n evaluated_expression: BooleanValues = evaluate_filter_expression(\n model_filter, cached_model_value\n )\n model_filters_to_resolved_values[model_filter] = evaluated_expression", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def _resolveAutos(cls, fields: Sequence[str]) -> List[Tuple[str, int]]:\n # There is some opportunity for code re-use between this and the metaclass...\n resolved = []\n for field in fields:\n while cls._autoAt in cls._valuesTaken:\n cls._autoAt *= 2\n value = cls._autoAt\n resolved.append((field, value))\n cls._autoAt *= 2\n return resolved", "def addAutoSaveFilter(filter):", "def filter(self, filters):", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .container_filter import ContainerFilter\n from .group_filter import GroupFilter\n from .object_mapping import ObjectMapping\n from .string_key_string_value_pair import StringKeyStringValuePair\n\n from .container_filter import ContainerFilter\n from .group_filter import GroupFilter\n from .object_mapping import ObjectMapping\n from .string_key_string_value_pair import StringKeyStringValuePair\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"containerFilter\": lambda n : setattr(self, 'container_filter', n.get_object_value(ContainerFilter)),\n \"editable\": lambda n : setattr(self, 'editable', n.get_bool_value()),\n \"groupFilter\": lambda n : setattr(self, 'group_filter', n.get_object_value(GroupFilter)),\n \"id\": lambda n : setattr(self, 'id', n.get_str_value()),\n \"metadata\": lambda n : setattr(self, 'metadata', n.get_collection_of_object_values(StringKeyStringValuePair)),\n \"name\": lambda n : setattr(self, 'name', n.get_str_value()),\n \"objectMappings\": lambda n : setattr(self, 'object_mappings', n.get_collection_of_object_values(ObjectMapping)),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"priority\": lambda n : setattr(self, 'priority', n.get_int_value()),\n \"sourceDirectoryName\": lambda n : setattr(self, 'source_directory_name', n.get_str_value()),\n \"targetDirectoryName\": lambda n : setattr(self, 'target_directory_name', n.get_str_value()),\n }\n return fields", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def get_filters(self):", "def build_filters(self, filters=None):\n\n if filters is None:\n filters = {}\n\n orm_filters = super(EmployeeResource, self).build_filters(filters)\n\n if 'role' in filters:\n ids = (Employee.by_assignment_role(filters['role'])\n .values_list('id', flat=True))\n orm_filters['pk__in'] = ids\n\n return orm_filters", "def set_restriction_filters(self):\n self.restriction_filters[\"pk__exact\"] = self.request.user.pk", "def removeAutoSaveFilter(filter):", "def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def load_all_filters(self, interp=True, lamb=None):\n raise NotImplementedError", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(AreaResource, self).build_filters(filters)\n \n if \"level\" in filters:\n orm_filters[\"layout__level\"] = int(filters[\"level\"])\n \n return orm_filters", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def normalize_prefetch_lookups(lookups, prefix=None):\n ret = []\n for lookup in lookups:\n if not isinstance(lookup, Prefetch):\n lookup = Prefetch(lookup)\n if prefix:\n lookup.add_prefix(prefix)\n ret.append(lookup)\n return ret", "def _base_proxies_filter(self, category: str, filters: list) -> list:\n\n data_filtered = []\n \n if category == 'country':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=0, filters=filters)\n )\n \n elif category == 'anonymity':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=1, filters=filters)\n )\n\n elif category == 'protocol':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=2, filters=filters)\n )\n \n elif category == 'google_passed':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=3, filters=filters)\n )\n\n return data_filtered", "def _get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def lookup_filter(self, pbc, name=None, flags={}):\n d = []\n uplookup = None\n updesc = None\n for desc in pbc.descriptions:\n # pick methods but ignore already-bound methods, which can come\n # from an instance attribute\n if (isinstance(desc, description.MethodDesc)\n and desc.selfclassdef is None):\n methclassdef = desc.originclassdef\n if methclassdef is not self and methclassdef.issubclass(self):\n pass # subclasses methods are always candidates\n elif self.issubclass(methclassdef):\n # upward consider only the best match\n if uplookup is None or methclassdef.issubclass(uplookup):\n uplookup = methclassdef\n updesc = desc\n continue\n # for clsdef1 >= clsdef2, we guarantee that\n # clsdef1.lookup_filter(pbc) includes\n # clsdef2.lookup_filter(pbc) (see formal proof...)\n else:\n continue # not matching\n # bind the method by giving it a selfclassdef. Use the\n # more precise subclass that it's coming from.\n desc = desc.bind_self(methclassdef, flags)\n d.append(desc)\n if uplookup is not None:\n d.append(updesc.bind_self(self, flags))\n\n if d:\n return SomePBC(d, can_be_None=pbc.can_be_None)\n elif pbc.can_be_None:\n return s_None\n else:\n return s_ImpossibleValue", "def get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def RegisterCommonFilters(filtermap):\n\n # General casing for output naming\n filtermap['camelcase'] = stringcase.camelcase\n filtermap['capitalcase'] = stringcase.capitalcase\n filtermap['constcase'] = stringcase.constcase\n filtermap['pascalcase'] = stringcase.pascalcase\n filtermap['snakecase'] = stringcase.snakecase\n filtermap['spinalcase'] = stringcase.spinalcase", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower for filenames (ascii convention)\n r = [k for k in content if fname.lower() in k]\n\n if len(r) > 1:\n print(\"auto correction found multiple choices\")\n print(r)\n raise ValueError('Refine name to one of {0}'.format(r))\n elif len(r) <= 0:\n raise ValueError('Cannot find filter {0}'.format(fname))\n else:\n fil = UnitFilter.from_ascii(r[0], *args, **kwargs)\n if (interp is True) and (lamb is not None):\n return fil.reinterp(lamb)\n else:\n return fil", "def _get_rbac_field_filters(self, request_context):\n return [\n {'term': {'tenant_id': request_context.owner}},\n ]", "def _set_attr_name_map(self):\n self.attr_name_map = {}\n for object_query in self.query:\n object_name = object_query[\"object_name\"]\n object_class = self.object_map[object_name]\n aliases = AttributeInfo.gather_aliases(object_class)\n self.attr_name_map[object_class] = {}\n for key, value in aliases.items():\n filter_by = None\n if isinstance(value, dict):\n filter_name = value.get(\"filter_by\", None)\n if filter_name is not None:\n filter_by = getattr(object_class, filter_name, None)\n value = value[\"display_name\"]\n if value:\n self.attr_name_map[object_class][value.lower()] = (key.lower(),\n filter_by)\n custom_attrs = AttributeInfo.get_custom_attr_definitions(\n object_class)\n for key, definition in custom_attrs.items():\n if not key.startswith(\"__custom__:\") or \\\n \"display_name\" not in definition:\n continue\n try:\n # Global custom attribute definition can only have a single id on\n # their name, so it is safe for that. Currently the filters do not\n # work with object level custom attributes.\n attr_id = definition[\"definition_ids\"][0]\n except KeyError:\n continue\n filter_by = CustomAttributeValue.mk_filter_by_custom(object_class,\n attr_id)\n name = definition[\"display_name\"].lower()\n self.attr_name_map[object_class][name] = (name, filter_by)", "def get_filter_config(\n platform,\n filter_name,\n filter_options=None,\n terms=None,\n prepend=True,\n pillar_key=\"acl\",\n pillarenv=None,\n saltenv=None,\n merge_pillar=True,\n only_lower_merge=False,\n revision_id=None,\n revision_no=None,\n revision_date=True,\n revision_date_format=\"%Y/%m/%d\",\n):\n if not filter_options:\n filter_options = []\n if not terms:\n terms = []\n if merge_pillar and not only_lower_merge:\n acl_pillar_cfg = _get_pillar_cfg(\n pillar_key, saltenv=saltenv, pillarenv=pillarenv\n )\n filter_pillar_cfg = _lookup_element(acl_pillar_cfg, filter_name)\n filter_options = filter_options or filter_pillar_cfg.pop(\"options\", None)\n if filter_pillar_cfg:\n # Only when it was able to find the filter in the ACL config\n pillar_terms = filter_pillar_cfg.get(\n \"terms\", []\n ) # No problem if empty in the pillar\n terms = _merge_list_of_dict(terms, pillar_terms, prepend=prepend)\n # merge the passed variable with the pillar data\n # any filter term not defined here, will be appended from the pillar\n # new terms won't be removed\n filters = []\n filters.append(\n {\n filter_name: {\n \"options\": _make_it_list({}, filter_name, filter_options),\n \"terms\": terms,\n }\n }\n )\n return get_policy_config(\n platform,\n filters=filters,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n only_lower_merge=True,\n revision_id=revision_id,\n revision_no=revision_no,\n revision_date=revision_date,\n revision_date_format=revision_date_format,\n )", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def filters(self, filters):\n\n self._filters = filters", "def get_filters(self, saving):\n self.filter_entry_dict.clear()\n\n for entry, var in self.filter_entries_list:\n if (entry.get() != \"\") and (var.get() != \"\") and (not saving):\n self.filter_entry_dict[var.get()] = entry.get()\n elif saving and var.get() != \"\":\n self.filter_entry_dict[var.get()] = entry.get()", "def _put_resolved_booleans_into_filter(\n filter_operator: Operator, model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues]\n) -> None:\n for operator in _model_filter_in_operator_generator(filter_operator):\n model_filter = operator.unresolved_value\n operator.resolved_value = model_filters_to_resolved_values.get(\n model_filter, BooleanValues.UNKNOWN\n )", "def build_filters(self, filters=None):\n filters.pop('username')\n return super(UserResource, self).build_filters(filters)", "def filter_config(resource, first_pub_pair=None, last_pub_pair=None):\n if resource in constants.ALLOWS_FILTER:\n # Process eventual filter parameters:\n if first_pub_pair:\n a_filter = query_client.TemporalFilter(\n query_client.PUBLISHED_FIRST, *first_pub_pair)\n elif last_pub_pair:\n a_filter = query_client.TemporalFilter(\n query_client.PUBLISHED_LAST, *last_pub_pair)\n else: # Default is 'empty' filter\n a_filter = query_client.Filter()\n else:\n a_filter = None\n\n return {'a_filter': a_filter}", "def find_filters_by_fields(self, **kwargs):\n return [\n filter_.filter_id\n for filter_ in self\n if all(\n (getattr(filter_, key) == value for key, value in kwargs.iteritems())\n )\n ]", "def display_representation(self, filters=None):\n attrs = OrderedDict()\n for field in get_fields(type(self)):\n name = field.field_name\n value = getattr(self, name)\n attrs[name] = value\n \n if filters is None:\n return attrs\n elif isinstance(filters, list):\n filter_attrs = OrderedDict()\n for attr in filters:\n if attr in attrs:\n filter_attrs[attr] = attrs[attr]\n return filter_attrs", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def configure_filters(app):\n\n for (name, filter) in _filters.iteritems():\n app.jinja_env.filters[name] = filter", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def apply_filters(filters, items):\n return scom.apply_filters(filters, items)", "def filters(self):\n\t\treturn self.local_filter", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .app_management_configuration import AppManagementConfiguration\n from .policy_base import PolicyBase\n\n from .app_management_configuration import AppManagementConfiguration\n from .policy_base import PolicyBase\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"applicationRestrictions\": lambda n : setattr(self, 'application_restrictions', n.get_object_value(AppManagementConfiguration)),\n \"isEnabled\": lambda n : setattr(self, 'is_enabled', n.get_bool_value()),\n \"servicePrincipalRestrictions\": lambda n : setattr(self, 'service_principal_restrictions', n.get_object_value(AppManagementConfiguration)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f", "def _filter(\n self,\n data: List[dict],\n filters: List[Tuple[str, SupportedFilterType]],\n state_dataclass: StateSchema,\n detail: bool,\n ) -> List[dict]:\n filters = _convert_filters_type(filters, state_dataclass)\n result = []\n for datum in data:\n match = True\n for filter_column, filter_predicate, filter_value in filters:\n filterable_columns = state_dataclass.filterable_columns()\n filter_column = filter_column.lower()\n if filter_column not in filterable_columns:\n raise ValueError(\n f\"The given filter column {filter_column} is not supported. \"\n f\"Supported filter columns: {filterable_columns}\"\n )\n\n if filter_column not in datum:\n match = False\n elif filter_predicate == \"=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n # Case insensitive match for string filter values.\n match = datum[filter_column].lower() == filter_value.lower()\n else:\n match = datum[filter_column] == filter_value\n elif filter_predicate == \"!=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n match = datum[filter_column].lower() != filter_value.lower()\n else:\n match = datum[filter_column] != filter_value\n else:\n raise ValueError(\n f\"Unsupported filter predicate {filter_predicate} is given. \"\n \"Available predicates: =, !=.\"\n )\n\n if not match:\n break\n\n if match:\n result.append(filter_fields(datum, state_dataclass, detail))\n return result", "def _set_runtime_filters(self):\n runtime_filters = []\n if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):\n raise TypeError(\n '%s: filters must be a sequence of tuple with length=3'\n ' got %r instead' % (self.__class__.__name__, self.filters))\n\n for filter_type, filter_operator, filter_value in self.filters:\n if isinstance(filter_type, ValueProvider):\n filter_type = filter_type.get()\n if isinstance(filter_operator, ValueProvider):\n filter_operator = filter_operator.get()\n if isinstance(filter_value, ValueProvider):\n filter_value = filter_value.get()\n runtime_filters.append((filter_type, filter_operator, filter_value))\n\n return runtime_filters or ()", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def process_filters(self, filters, queryset, view):\n return filters", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def get_filters_params(self, params=None):\n if not params:\n params = self.params\n lookup_params = params.copy() # a dictionary of the query string {'rede__id__exact':'1'}\n # Remove all the parameters that are globally and systematically\n # ignored.\n for ignored in IGNORED_PARAMS:\n if ignored in lookup_params:\n del lookup_params[ignored]\n return lookup_params", "def get_filters_params(self, params=None):\n if not params:\n params = self.params\n lookup_params = params.copy() # a dictionary of the query string {'rede__id__exact':'1'}\n # Remove all the parameters that are globally and systematically\n # ignored.\n for ignored in IGNORED_PARAMS:\n if ignored in lookup_params:\n del lookup_params[ignored]\n return lookup_params", "def convert_fields(fields, _fields):\n mapper = {\n \"id\": \"local_id\",\n \"local_id\": \"id\"\n }\n fields = deepcopy(fields)\n for field in fields:\n if field['name'] in _fields:\n field['name'] = mapper[field['name']]\n return fields", "def extend(cls, fields: Dict[str, Union[int, auto]]):\n # add explicit values first, so that autos know about them\n for field, value in ((f, v) for f, v in fields.items() if isinstance(v, int)):\n cls._registerField(field, value)\n toResolve = [field for field, val in fields.items() if isinstance(val, auto)]\n resolved = cls._resolveAutos(toResolve)\n for field, value in resolved:\n cls._registerField(field, value)", "def get_default_filters(self, **resources):\r\n return dict((k, (v, False)) for k, v in resources.items()\r\n if k in self._meta.fields)", "def _make_functions(namespace):\n for fil in registry.filters:\n func_name = camel2enthought(fil.id)\n class_name = fil.id\n if func_name.endswith('_filter'):\n func_name = func_name[:-7]\n class_name = class_name[:-6]\n class_name = class_name + 'Factory'\n\n # Don't create any that are already defined.\n if class_name in namespace:\n continue\n\n # The class to wrap.\n klass = new.classobj(class_name, \n (_AutomaticFilterFactory,),\n {'__doc__': fil.help,}\n )\n klass._metadata = fil\n\n # The mlab helper function.\n func = make_function(klass)\n\n # Inject class/function into the namespace and __all__.\n namespace[class_name] = klass\n namespace[func_name] = func\n __all__.append(func_name)", "def _split_filters(self, filters):\n # specifying ancestor_location returns an ANDFilter and does not have a column name\n # assume that it should go into inner filters\n complex_filters = [f for f in filters if not hasattr(f, 'column_name')]\n simple_filters = [f for f in filters if hasattr(f, 'column_name')]\n inner_filters = [f for f in simple_filters if f.column_name not in self.AGGREGATE_FILTERS]\n outer_filters = [f for f in simple_filters if f.column_name in self.AGGREGATE_FILTERS]\n return {\n 'inner': inner_filters + complex_filters,\n 'outer': outer_filters,\n }", "def prepare_advanced_filter(filter_options: str) -> dict:\n import ast\n import json\n\n if filter_options:\n if os.path.isfile(filter_options):\n with open(filter_options, 'r') as f:\n filter_options = json.load(f)\n # advanced filter do not specify collections!\n if 'collections' in filter_options:\n del filter_options['collections']\n else:\n filter_options = ast.literal_eval(filter_options)\n return filter_options\n return None", "def get_filters(self, request, **resources):\r\n filters = dict()\r\n\r\n if not self._meta.fields:\r\n return filters\r\n\r\n for field in request.GET.iterkeys():\r\n tokens = field.split(LOOKUP_SEP)\r\n field_name = tokens[0]\r\n\r\n if not field_name in self._meta.fields:\r\n continue\r\n\r\n exclude = False\r\n if tokens[-1] == 'not':\r\n exclude = True\r\n tokens.pop()\r\n\r\n converter = self._meta.model._meta.get_field(\r\n field_name).to_python if len(tokens) == 1 else lambda v: v\r\n value = map(converter, request.GET.getlist(field))\r\n\r\n if len(value) > 1:\r\n tokens.append('in')\r\n else:\r\n value = value.pop()\r\n\r\n filters[LOOKUP_SEP.join(tokens)] = (value, exclude)\r\n\r\n return filters", "def addAutoSaveRestoreFilter(filter):", "def get_filters(**params):\n filters = {}\n\n for column, data in params.items():\n if data is not None:\n if isinstance(data, six.string_types):\n f_type, value = _extract_filter_type_and_value(data)\n\n create_or_update_filter(column, value, f_type, filters)\n else:\n create_or_update_filter(column, data, _filter=filters)\n\n return filters", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def filter_queryset(self,queryset):\n filters = {}\n for backend in list(self.filter_backends):\n backendobj = backend()\n queryset = backendobj.filter_queryset(self.request, queryset, self)\n if hasattr(backendobj,'get_applied_filters'):\n filters.update(backendobj.get_applied_filters())\n self. applied_filters = OrderedDict()\n for key,value in filters.items():\n if isinstance(value,datetime.datetime):\n self.applied_filters[key]=value\n del filters[key]\n self.applied_filters.update(sorted(filters.items(),key=itemgetter(1),reverse=True))\n return queryset", "def addAutoSaveDeleteFilter(filter):", "def post_process(klass, results, unsafe_filters):\n logging.info(u'DatastoreModel.post_process() handled unsafe filters:')\n logging.info(u'{}'.format(unsafe_filters))\n all_matching_sets = []\n for k, v in unsafe_filters.items():\n matches = set([e for e in results if getattr(e, k) in v])\n all_matching_sets.append(matches)\n return set.intersection(*all_matching_sets)", "def dbtrace_filter_change(filter_name_field):\n\n pass", "def init_filters(model: Model, settings: Model) -> None:\n filters = [\n {\"name\": \"Project\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Attachments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Priority\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Resolved\", \"filtration_type\": \"date\"},\n {\"name\": \"Labels\", \"filtration_type\": \"string\"},\n {\"name\": \"Created\", \"filtration_type\": \"date\"},\n {\"name\": \"Comments\", \"filtration_type\": \"numeric\"},\n {\"name\": \"Status\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Key\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Summary\", \"filtration_type\": \"string\"},\n {\"name\": \"Resolution\", \"filtration_type\": \"drop-down\"},\n {\"name\": \"Description\", \"filtration_type\": \"string\"},\n {\"name\": \"Components\", \"filtration_type\": \"string\"},\n ]\n for filter_ in filters:\n model.objects.create(\n name=filter_[\"name\"],\n filtration_type=filter_[\"filtration_type\"],\n settings=settings,\n )", "def fieldToFilter(fields):\n if len(fields) == 0:\n return dict()\n return dict(\n pdir_keywords=dict(\n query=[\"%s:%s\" % (k, v) for (k, v) in fields.items()],\n operator=\"and\",\n )\n )", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def get_object_from_filter(obj, components):\n\n components = components[:]\n while len(components) > 2:\n obj = getattr(obj, components.pop(0))\n if len(components) == 2:\n if components[-1] != \"regex\":\n if not hasattr(obj, f\"__{components[-1]}__\"):\n obj = getattr(obj, components[0])\n return obj", "def set_fields(self, upstream_obj, nonparam_fields=None):\n default_data = upstream_obj.default_data(start_year=self.start_year,\n metadata=True)\n\n if self.raw_input_fields is None:\n self.raw_input_fields = {}\n for field in self._meta.fields:\n if (getattr(self, field.attname, None) and\n field.name not in nonparam_fields):\n raw_val = getattr(self, field.attname)\n if field.name.endswith(\"cpi\") and isinstance(raw_val, bool):\n raw_val = str(raw_val)\n self.raw_input_fields[field.name] = raw_val\n\n input_fields, failed_lookups = param_formatters.parse_fields(\n self.raw_input_fields,\n default_data\n )\n\n if failed_lookups:\n # distinct elements\n potential_failed_lookups = set(failed_lookups)\n # only keep parameters that used to be in the upstream package\n set_failed_lookups = potential_failed_lookups - nonparam_fields\n if self.deprecated_fields is None:\n self.deprecated_fields = []\n # drop parameters that we already know are deprecated\n set_failed_lookups.difference_update(self.deprecated_fields)\n self.deprecated_fields += list(set_failed_lookups)\n\n self.input_fields = input_fields", "def test_filter_settings(self):\n self.es.register_filter(foo='bar')\n self.assertTrue(callable(self.es.filter['all'][0]))\n self.es.register_filter(bar='baz')\n self.assertLength(self.es.filter['all'], 2)", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def conforms_to_template_filter(self, template_filter):\n\n if not isinstance(template_filter, self.__class__):\n raise TypeError(\"AttributeFilter can only check conformance against \\\n another template filter, %s provided\" % (template_filter.__class__.__name__))\n\n #:\n #: Keys from the template\n #:\n template_filter_keys = template_filter.keys()\n # Keys from the object itself\n this_filter_keys = self.keys()\n\n #:\n #: 1. Check to see if the client has provided unwanted keys\n #:\n unwanted_keys = set(this_filter_keys) - set(template_filter_keys)\n if len(unwanted_keys) > 0:\n raise prestans.exception.AttributeFilterDiffers(list(unwanted_keys))\n\n #:\n #: 2. Make a attribute_filter that we send back\n #:\n evaluated_attribute_filter = AttributeFilter()\n\n #:\n #: 3. Evaluate the differences between the two, with template_filter as the standard\n #:\n for template_key in template_filter_keys:\n\n if template_key in this_filter_keys:\n\n value = getattr(self, template_key)\n\n #:\n #: If sub filter and boolean provided with of true, create default filter\n #: with value of true\n #:\n if isinstance(value, bool) and \\\n value is True and \\\n isinstance(getattr(template_filter, template_key), AttributeFilter):\n setattr(evaluated_attribute_filter, template_key, \\\n getattr(template_filter, template_key))\n elif isinstance(value, bool):\n setattr(evaluated_attribute_filter, template_key, value)\n elif isinstance(value, self.__class__):\n # Attribute lists sort themselves out, to produce sub Attribute Filters\n template_sub_list = getattr(template_filter, template_key)\n this_sub_list = getattr(self, template_key)\n setattr(evaluated_attribute_filter, template_key, \\\n this_sub_list.conforms_to_template_filter(template_sub_list))\n else:\n setattr(evaluated_attribute_filter, template_key, \\\n getattr(template_filter, template_key))\n\n return evaluated_attribute_filter", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n for field, options in applicable_filters[\"field_facets\"].items():\n queryset = queryset.facet(field, **options)\n\n for field, options in applicable_filters[\"date_facets\"].items():\n queryset = queryset.date_facet(field, **options)\n\n for field, options in applicable_filters[\"query_facets\"].items():\n queryset = queryset.query_facet(field, **options)\n\n return queryset", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def _unwrap_filter(self, filter_dict, interface_dict=None):\n if not interface_dict:\n interface_dict = dict((new, old) for old, new in self._interface)\n\n translated = {}\n for k, v in filter_dict.items():\n tran_k = interface_dict[k]\n if tran_k != None:\n translated[tran_k] = v\n else:\n if v != self._missing:\n raise _FilterValueError('Missing column can only be '\n 'filtered to missing value.')\n return translated", "def get_filter_types(verbose=False):\n if verbose:\n pprint(filter_types)\n return filter_types", "def pre_filter(self, qs):\n return qs", "def filter_data(f):\n @functools.wraps(f, assigned=[])\n def wrapper(*args, **kwds):\n out = f(*args, **kwds)\n\n def _filter(obj):\n if isinstance(obj, list):\n new_list = []\n for o in obj:\n new_list.append(_filter(o))\n obj = new_list\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(k, str):\n obj[k.lower()] = _filter(v)\n return obj\n return _filter(out)\n return wrapper", "def office_prefilter_data(parser, args, params):\n local_args = parser.parse_known_args(args)\n \n control.prefilter_data(params)", "def handle_filters(filters: Optional[List[str]]) -> List[Dict[str, Any]]:\n filters_to_send = []\n for filter_ in filters or []:\n split_filter = filter_.split('=')\n if len(split_filter) != 2 or not split_filter[0] or not split_filter[1]:\n raise DemistoException('Filters should be in the format of \"filtername1=filtervalue1,filtername2=filtervalue2\". '\n f'The filter \"{filter_}\" doesn\\'t meet this requirement.')\n filters_to_send.append({'name': split_filter[0],\n 'operator': '=',\n 'value': split_filter[1]})\n return filters_to_send", "def removeAutoSaveDeleteFilter(filter):", "def __format_conditional_filters(field: dict) -> dict:\n if \"conditionalFormats\" in field:\n for cf in field[\"conditionalFormats\"]:\n if \"filter\" in cf and isinstance(\n cf[\"filter\"], QueryFilter\n ): # Supports one QueryFilter without list form\n cf[\"filter\"] = encode_conditional_format_filter(cf[\"filter\"])\n\n elif \"filter\" in cf and isinstance(cf[\"filter\"], list): # Supports list of QueryFilters\n filters = []\n for query_filter in cf[\"filter\"]:\n filters.append(encode_conditional_format_filter(query_filter))\n if len(filters) > 2:\n raise Exception(\"Too many QueryFilters given for one conditional format.\")\n cf[\"filter\"] = filters[0] + \"&\" + filters[1] if len(filters) == 2 else filters[0]\n\n return field", "def _fields_to_cache(self):\r\n scope_map = defaultdict(set)\r\n for descriptor in self.descriptors:\r\n for field in descriptor.fields.values():\r\n scope_map[field.scope].add(field)\r\n return scope_map", "def filter_by(action_fields, target):\n if action_fields == 'action_type':\n return target.action_type\n elif action_fields == 'delay':\n return target.delay\n elif action_fields == 'queue':\n return target.queue\n elif action_fields == 'units':\n return target.units\n elif action_fields == 'target_unit':\n return target.target_unit\n elif action_fields == 'target_location':\n return target.target_location", "def get_v1_filters(args: Dict[str, Any]) -> List[str]:\n filters = []\n args_name_to_filter_name = {\n 'alert-status': 'alert.status',\n 'policy-name': 'policy.name',\n 'policy-label': 'policy.label',\n 'policy-compliance-standard': 'policy.complianceStandard',\n 'cloud-account': 'cloud.account',\n 'cloud-account-id': 'cloud.accountId',\n 'cloud-region': 'cloud.region',\n 'alert-rule-name': 'alertRule.name',\n 'resource-id': 'resource.id',\n 'resource-name': 'resource.name',\n 'resource-type': 'resource.type',\n 'alert-id': 'alert.id',\n 'cloud-type': 'cloud.type',\n 'policy-type': 'policy.type',\n 'policy-severity': 'policy.severity',\n }\n for arg_name, filter_name in args_name_to_filter_name.items():\n if arg_value := args.get(arg_name):\n filters.append(f'{filter_name}={arg_value}')\n\n return filters", "def doFiltering(self, searchfunc, filters=None):\n F=[]\n for f in self.filters:\n F.append(f.getFilter())\n #print F\n sets = []\n for f in F:\n col, val, op, boolean = f\n names = searchfunc(col, val, op)\n sets.append((set(names), boolean))\n names = sets[0][0]\n for s in sets[1:]:\n b=s[1]\n if b == 'AND':\n names = names & s[0]\n elif b == 'OR':\n names = names | s[0]\n elif b == 'NOT':\n names = names - s[0]\n names = list(names)\n self.updateResults(len(names))\n return names", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def _convertFilterToSQLite(self, mongofilter: t.Mapping[t.Text, t.Any]\n ) -> t.Mapping[t.Text, t.Any]:\n if any(field[0] == \"$\" for field in mongofilter):\n invalid_fields = [f for f in mongofilter if f[0] == \"$\"]\n warnings.warn(RuntimeWarning(\n f\"You have a top level mongo query operator {invalid_fields} \"\n \"in your filter. This should work, as long as you don't try \"\n \"querying any non-sqlite primitive types, ie array/object.\"))\n return mongofilter\n\n if not any(op[0] == \"$\" for val in mongofilter.values()\n if isinstance(val, dict) # val might be just a str\n for op in val\n ):\n # check if any column filters have a mongodb query operator\n # ie $eq, $lte, etc.\n # if they don't, we can easily convert the given row to sql\n sql_filter, = self._convertDataToSQLite((mongofilter, ))\n return sql_filter\n\n sql_filter = dict()\n prim_types = {\n TDX_TYPE.BOOLEAN, TDX_TYPE.DATE, TDX_TYPE.STRING, TDX_TYPE.NUMBER}\n banned_types = {TDX_TYPE.NDARRAY}\n\n # convert ugly TDX Data Schema to Map of Column to Type.\n # might need to be changed in the future for new TDX dataschema schema\n dataschema: t.Dict[t.Text, TDX_TYPE] = {}\n for column, column_type in self.tdx_data_schema.items():\n if isinstance(column_type, collections.abc.Mapping):\n dataschema[column] = TDX_TYPE(\n column_type.get(\"__tdxType\", [TDX_TYPE.OBJECT])[0])\n elif isinstance(column_type, collections.abc.Sequence):\n dataschema[column] = TDX_TYPE.ARRAY\n\n for field, val in mongofilter.items():\n tdx_type = dataschema[field]\n if tdx_type in prim_types: # no need for conversion\n sql_filter[field] = val\n continue\n\n if tdx_type in banned_types: # cannot query\n raise TypeError(\n f\"All queries are banned on tdx_type {tdx_type}. \"\n \"Given item was {field}.\")\n\n if not isinstance(val, dict) or all(op[0] != \"$\" for op in val):\n # val is array/or dict with NO mongo query ops\n # can convert normally\n con_row, = self._convertDataToSQLite([{field: val}])\n sql_filter[field] = con_row[field]\n continue\n\n raise TypeError(\n \"MongoDB Style Queries are only supported on items \"\n f\"with TDX Type values of {prim_types}. Given \"\n \"item was {field} with type {tdx_type}. \"\n f\"Mongo Op given was {next(op for op in val if op[0] == '$')}\")\n return sql_filter", "def _pre_filter_decode(self):\n\n pass", "def field_filter(paths, fields, catchall='rois'):\n filt_paths = {} \n\n for p in paths:\n captured = False\n for (name, filts) in fields.items():\n ps = p.strip('/')\n elts = ps.split('/')\n goes_in = False\n for i in range(len(filts)):\n if len(filts[i]):\n if len(elts) > i:\n if elts[i] in filts[i]:\n goes_in = True\n else:\n captured = True\n break\n \n if goes_in:\n filt_paths.setdefault(name, []).append(p)\n captured = True\n\n # If a path was not explicitly captured by a filter rule, and the\n # catchall didn't have any filters, we include it in the catchall\n if not captured and not fields.setdefault(catchall, []):\n filt_paths.setdefault(catchall, []).append(p)\n\n return filt_paths" ]
[ "0.5492552", "0.5431763", "0.5252897", "0.52475154", "0.5196505", "0.5192317", "0.5170377", "0.5147656", "0.5139516", "0.50391585", "0.4980402", "0.49512303", "0.49480322", "0.49272498", "0.49130383", "0.49125457", "0.489388", "0.48330957", "0.48206162", "0.4809472", "0.4801828", "0.47809687", "0.47597542", "0.47553548", "0.47464135", "0.4741792", "0.4739589", "0.47164077", "0.4695261", "0.46817344", "0.46817327", "0.46631557", "0.46538416", "0.46497232", "0.46414563", "0.46387616", "0.46281302", "0.46274495", "0.46142182", "0.46082225", "0.46067002", "0.45753047", "0.4570341", "0.4570297", "0.45677108", "0.45628208", "0.45614752", "0.45531416", "0.45491114", "0.45397598", "0.45228758", "0.45184618", "0.45164278", "0.44990388", "0.44965318", "0.44943342", "0.44917253", "0.4490707", "0.4490707", "0.4482824", "0.4468967", "0.44643223", "0.4459135", "0.4451359", "0.4446426", "0.44428754", "0.44412595", "0.44376436", "0.4433227", "0.44196197", "0.4414154", "0.44083792", "0.44021127", "0.4401214", "0.43911654", "0.43874413", "0.43862975", "0.4372658", "0.43717", "0.43691134", "0.43676046", "0.43619576", "0.4359566", "0.43589586", "0.4356316", "0.4354946", "0.4353596", "0.43528387", "0.43473297", "0.43416682", "0.43407476", "0.43394756", "0.43334818", "0.43307364", "0.4325505", "0.4324119", "0.4321109", "0.43174103", "0.43154854", "0.43144837" ]
0.5948139
0
Returns the subset of filters that should be initialized by the FilterSet, dependent on the requested `params`. This helps minimize the cost of initialization by reducing the number of deepcopy ops. The `rel` argument is used for related filtersets to strip the param of its relationship prefix. See `.get_param_filter_name()` for info.
def get_filter_subset(cls, params, rel=None): # Determine names of filters from query params and remove empty values. # param names that traverse relations are translated to just the local # filter names. eg, `author__username` => `author`. Empty values are # removed, as they indicate an unknown field eg, author__foobar__isnull filter_names = {cls.get_param_filter_name(param, rel) for param in params} filter_names = {f for f in filter_names if f is not None} return OrderedDict( (k, v) for k, v in cls.base_filters.items() if k in filter_names )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_request_filters(self):\n # build the compiled set of all filters\n requested_filters = OrderedDict()\n for filter_name, f in self.filters.items():\n requested_filters[filter_name] = f\n\n # exclusion params\n exclude_name = '%s!' % filter_name\n if related(self, exclude_name) in self.data:\n # deepcopy the *base* filter to prevent copying of model & parent\n f_copy = copy.deepcopy(self.base_filters[filter_name])\n f_copy.parent = f.parent\n f_copy.model = f.model\n f_copy.exclude = not f.exclude\n\n requested_filters[exclude_name] = f_copy\n\n return requested_filters", "def get_filters_params(self, params=None):\n if not params:\n params = self.params\n lookup_params = params.copy() # a dictionary of the query string {'rede__id__exact':'1'}\n # Remove all the parameters that are globally and systematically\n # ignored.\n for ignored in IGNORED_PARAMS:\n if ignored in lookup_params:\n del lookup_params[ignored]\n return lookup_params", "def get_filters_params(self, params=None):\n if not params:\n params = self.params\n lookup_params = params.copy() # a dictionary of the query string {'rede__id__exact':'1'}\n # Remove all the parameters that are globally and systematically\n # ignored.\n for ignored in IGNORED_PARAMS:\n if ignored in lookup_params:\n del lookup_params[ignored]\n return lookup_params", "def get_related_filtersets(self):\n related_filtersets = OrderedDict()\n\n for related_name in self.related_filters:\n if related_name not in self.filters:\n continue\n\n f = self.filters[related_name]\n related_filtersets[related_name] = f.filterset(\n data=self.data,\n queryset=f.get_queryset(self.request),\n relationship=related(self, related_name),\n request=self.request,\n prefix=self.form_prefix,\n )\n\n return related_filtersets", "def get_param_filter_name(cls, param, rel=None):\n # check for empty param\n if not param:\n return param\n\n # strip the rel prefix from the param name.\n prefix = '%s%s' % (rel or '', LOOKUP_SEP)\n if rel and param.startswith(prefix):\n param = param[len(prefix):]\n\n # Attempt to match against filters with lookups first. (username__endswith)\n if param in cls.base_filters:\n return param\n\n # Attempt to match against exclusion filters\n if param[-1] == '!' and param[:-1] in cls.base_filters:\n return param[:-1]\n\n # Match against relationships. (author__username__endswith).\n # Preference more specific filters. eg, `note__author` over `note`.\n for name in reversed(sorted(cls.related_filters)):\n # we need to match against '__' to prevent eager matching against\n # like names. eg, note vs note2. Exact matches are handled above.\n if param.startswith(\"%s%s\" % (name, LOOKUP_SEP)):\n return name", "def get_params(self):\n params = super(JoinFilterQuery, self).get_params()\n params.append(('fq', self._query))\n\n return params", "def filters(self):\n\t\treturn self.local_filter", "def condition_filters(self):\r\n return filters.Filters(self)", "def filter_related_filtersets(self, queryset):\n for related_name, related_filterset in self.related_filtersets.items():\n # Related filtersets should only be applied if they had data.\n prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)\n if not any(value.startswith(prefix) for value in self.data):\n continue\n\n field_name = self.filters[related_name].field_name\n lookup_expr = LOOKUP_SEP.join([field_name, 'in'])\n subquery = Subquery(related_filterset.qs.values('pk'))\n queryset = queryset.filter(**{lookup_expr: subquery})\n\n return queryset", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def get_filters(**params):\n filters = {}\n\n for column, data in params.items():\n if data is not None:\n if isinstance(data, six.string_types):\n f_type, value = _extract_filter_type_and_value(data)\n\n create_or_update_filter(column, value, f_type, filters)\n else:\n create_or_update_filter(column, data, _filter=filters)\n\n return filters", "def get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def _get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def filter_by_param(self, pars):\n\n assert(type(pars) == dict)\n for par, vals in pars.items():\n vals = [type(self.sim_confs[0][par])(v) for v in vals]\n self.sim_confs = [conf for conf in self.sim_confs if conf[par] in vals]\n groups = []\n for group in self.sim_groups:\n filt_group = [conf for conf in group if conf[par] in vals]\n groups.append(filt_group)\n self.sim_groups = groups\n assert(len(self.sim_confs) >= 1)\n return self.sim_confs, self.sim_groups", "def filters(self):\n return self.__filters", "def _prune_parameters(self,\n graph,\n scope,\n params,\n ratios,\n place,\n lazy=False,\n only_graph=False,\n param_backup=None,\n param_shape_backup=None):\n _logger.debug('\\n################################')\n _logger.debug('# pruning parameters #')\n _logger.debug('################################\\n')\n _logger.debug(\n '|----------------------------------------+----+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^40}|{:^4}|{:^30}|{:^30}|'.format('parameter', 'axis',\n 'from', 'to'))\n assert len(params) == len(ratios)\n self.pruned_list = [[], []]\n for param, ratio in zip(params, ratios):\n assert isinstance(param, str) or isinstance(param, unicode)\n param = graph.var(param)\n self._forward_pruning_ralated_params(\n graph,\n scope,\n param,\n place,\n ratio=ratio,\n lazy=lazy,\n only_graph=only_graph,\n param_backup=param_backup,\n param_shape_backup=param_shape_backup)\n ops = param.outputs()\n for op in ops:\n if op.type() == 'conv2d':\n brother_ops = self._search_brother_ops(graph, op)\n for broher in brother_ops:\n for p in graph.get_param_by_op(broher):\n self._forward_pruning_ralated_params(\n graph,\n scope,\n p,\n place,\n ratio=ratio,\n lazy=lazy,\n only_graph=only_graph,\n param_backup=param_backup,\n param_shape_backup=param_shape_backup)\n _logger.debug(\n '|----------------------------------------+----+------------------------------+------------------------------|'\n )", "def filters(self):\n return self._filters", "def get_filter_parameters(self):\n if not self.should_filter():\n return []\n\n fields = []\n for filter_backend in self.view.filter_backends:\n fields += self.get_filter_backend_parameters(filter_backend())\n\n return fields", "def find_filters_by_fields(self, **kwargs):\n return [\n filter_.filter_id\n for filter_ in self\n if all(\n (getattr(filter_, key) == value for key, value in kwargs.iteritems())\n )\n ]", "def filter(self, *args, **kwargs):\n clone = self._clone()\n for f in args:\n clone.filter_obj.add_filter(f)\n for key, value in kwargs.items():\n clone.filter_obj.add_filter_param(key, value)\n return clone", "def build_filters(self, filters=None):\n\n if filters is None:\n filters = {}\n\n orm_filters = super(EmployeeResource, self).build_filters(filters)\n\n if 'role' in filters:\n ids = (Employee.by_assignment_role(filters['role'])\n .values_list('id', flat=True))\n orm_filters['pk__in'] = ids\n\n return orm_filters", "def compute_param_slices(self):\r\n self.param_slices = []\r\n count = 0\r\n for p in self.parts:\r\n self.param_slices.append(slice(count, count + p.num_params))\r\n count += p.num_params", "def get_filters(self):\n if self.filters is not None:\n return self.filters\n elif self.parent is not None:\n return self.parent.get_filters()\n else:\n return None", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def filters(self, **kwargs):\n return config.filters(self._host, self._session, **kwargs)", "def filters(self):\n # easy enough\n return self.dcpl.getFilters()", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def filter_empty_subparams(self, param_name):\n param = self.module.params.get(param_name)\n filtered = []\n if isinstance(param, list):\n for subparam in param:\n if isinstance(subparam, dict):\n filtered.append(\n dict(\n (key, value)\n for key, value in subparam.items()\n if value is not None\n )\n )\n else:\n filtered = param\n return filtered", "def _filter_params(self, params):\n\n # INPUT_STATES:\n try:\n input_states_spec = params[INPUT_STATES]\n except KeyError:\n pass\n else:\n # Convert input_states_spec to list if it is not one\n if not isinstance(input_states_spec, list):\n input_states_spec = [input_states_spec]\n # Get inputStates specified in paramClassDefaults\n default_input_states = self.paramClassDefaults[INPUT_STATES].copy()\n # Convert inputStates from paramClassDeafults to a list if it is not one\n if not isinstance(default_input_states, list):\n default_input_states = [default_input_states]\n # Add inputState specified in params to those in paramClassDefaults\n # Note: order is important here; new ones should be last, as paramClassDefaults defines the\n # the primary inputState which must remain first for the inputStates OrderedDictionary\n default_input_states.extend(input_states_spec)\n # Assign full set back to params_arg\n params[INPUT_STATES] = default_input_states\n\n # OUTPUT_STATES:\n try:\n output_states_spec = params[OUTPUT_STATES]\n except KeyError:\n pass\n else:\n # Convert output_states_spec to list if it is not one\n if not isinstance(output_states_spec, list):\n output_states_spec = [output_states_spec]\n # Get outputStates specified in paramClassDefaults\n default_output_states = self.paramClassDefaults[OUTPUT_STATES].copy()\n # Convert outputStates from paramClassDeafults to a list if it is not one\n if not isinstance(default_output_states, list):\n default_output_states = [default_output_states]\n # Add outputState specified in params to those in paramClassDefaults\n # Note: order is important here; new ones should be last, as paramClassDefaults defines the\n # the primary outputState which must remain first for the outputStates OrderedDictionary\n default_output_states.extend(output_states_spec)\n # Assign full set back to params_arg\n params[OUTPUT_STATES] = default_output_states", "def _init_optimizer_params(self):\n order = [\n [Peaking.__name__, True, True], # Peaking\n [LowShelf.__name__, True, True], # Low shelfs\n [HighShelf.__name__, True, True], # High shelfs\n [Peaking.__name__, True, False], # Peaking with fixed q\n [LowShelf.__name__, True, False], # Low shelfs with fixed q\n [HighShelf.__name__, True, False], # High shelfs with fixed q\n [Peaking.__name__, False, True], # Peaking with fixed fc\n [LowShelf.__name__, False, True], # Low shelfs with fixed fc\n [HighShelf.__name__, False, True], # High shelfs with fixed fc\n [Peaking.__name__, False, False], # Peaking with fixed fc and q\n [LowShelf.__name__, False, False], # Low shelfs with fixed fc and q\n [HighShelf.__name__, False, False], # High shelfs with fixed fc and q\n ]\n\n def init_order(filter_ix):\n filt = self.filters[filter_ix]\n ix = order.index([filt.__class__.__name__, filt.optimize_fc, filt.optimize_q])\n val = ix * 100\n if filt.optimize_fc:\n val += 1 / np.log2(filt.max_fc / filt.min_fc)\n return val\n\n # Initialize filter params as list of empty lists, one per filter\n filter_params = [[]] * len(self.filters)\n # Indexes to self.filters sorted by filter init order\n filter_argsort = sorted(list(range(len(self.filters))), key=init_order, reverse=True)\n remaining_target = self.target.copy()\n for ix in filter_argsort: # Iterate sorted filter indexes\n filt = self.filters[ix] # Get filter\n filter_params[ix] = filt.init(remaining_target) # Init filter and place params to list of lists\n remaining_target -= filt.fr # Adjust target\n filter_params = np.concatenate(filter_params).flatten() # Flatten params list\n return filter_params", "def get_filters(self):", "def filter(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n from jetengine.query_builder.transform import validate_fields\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QNot, QCombination)):\n if self._filters:\n self._filters = self._filters & arguments[0]\n else:\n self._filters = arguments[0]\n else:\n validate_fields(self.__klass__, kwargs)\n if self._filters:\n self._filters = self._filters & Q(**kwargs)\n else:\n if arguments and len(arguments) == 1 and isinstance(arguments[0], dict):\n self._filters = Q(arguments[0])\n else:\n self._filters = Q(**kwargs)\n\n return self", "def filters(self, filters):\n\n self._filters = filters", "def pre_filter(self, qs):\n return qs", "def dependency_filter(dependencies,start=0,end=-1,filter_val=None,filter_vals=[],field=None,filter_range='dependent'):\n return [getattr(i, field) if field else i for i in dependencies if \n (start == 0 or getattr(i, filter_range).idx >= start) and \n (end == -1 or getattr(i, filter_range).idx < end) and \n ((filter_val == None and not filter_vals) or i.type in filter_vals + [filter_val] or (filter_val[-1]=='*' and i.type.startswith(filter_val[0:-1])))\n ]", "def filter(self, filters):", "def get_filters(self) -> dict:\n return self._filters", "def filter(self, *args, **kwargs):\n p = self.proxy\n filter_clauses = self.filter_clauses + list(args)\n related_clauses = self.related_clauses[:]\n\n connection_kwarg = p.connection_kwarg\n connection = self.connection\n\n # Build the filter operations\n for k, v in kwargs.items():\n # Ignore connection parameter\n if k == connection_kwarg:\n connection = v\n continue\n model = p.model\n op = \"eq\"\n if \"__\" in k:\n parts = k.split(\"__\")\n if parts[-1] in QUERY_OPS:\n op = parts[-1]\n k = \"__\".join(parts[:-1])\n col = resolve_member_column(model, k, related_clauses)\n\n # Support lookups by model\n if isinstance(v, Model):\n v = v.serializer.flatten_object(v, scope=None)\n elif op in ('in', 'notin'):\n # Flatten lists when using in or notin ops\n v = model.serializer.flatten(v, scope=None)\n\n clause = getattr(col, QUERY_OPS[op])(v)\n filter_clauses.append(clause)\n\n return self.clone(\n connection=connection,\n filter_clauses=filter_clauses,\n related_clauses=related_clauses)", "def initFromParams(self, oDisp): # type: (WuiDispatcherBase) -> self\n\n for oCriterion in self.aCriteria:\n self._initFromParamsWorker(oDisp, oCriterion);\n return self;", "def _split_filters(self, filters):\n # specifying ancestor_location returns an ANDFilter and does not have a column name\n # assume that it should go into inner filters\n complex_filters = [f for f in filters if not hasattr(f, 'column_name')]\n simple_filters = [f for f in filters if hasattr(f, 'column_name')]\n inner_filters = [f for f in simple_filters if f.column_name not in self.AGGREGATE_FILTERS]\n outer_filters = [f for f in simple_filters if f.column_name in self.AGGREGATE_FILTERS]\n return {\n 'inner': inner_filters + complex_filters,\n 'outer': outer_filters,\n }", "def _prune_filters_by_ratio(self,\n scope,\n params,\n ratio,\n place,\n lazy=False,\n only_graph=False,\n param_shape_backup=None,\n param_backup=None):\n if params[0].name() in self.pruned_list[0]:\n return\n param_t = scope.find_var(params[0].name()).get_tensor()\n pruned_idx = self.pruner.cal_pruned_idx(\n params[0].name(), np.array(param_t), ratio, axis=0)\n for param in params:\n assert isinstance(param, VarWrapper)\n param_t = scope.find_var(param.name()).get_tensor()\n if param_backup is not None and (param.name() not in param_backup):\n param_backup[param.name()] = copy.deepcopy(np.array(param_t))\n pruned_param = self.pruner.prune_tensor(\n np.array(param_t), pruned_idx, pruned_axis=0, lazy=lazy)\n if not only_graph:\n param_t.set(pruned_param, place)\n ori_shape = param.shape()\n if param_shape_backup is not None and (\n param.name() not in param_shape_backup):\n param_shape_backup[param.name()] = copy.deepcopy(param.shape())\n new_shape = list(param.shape())\n new_shape[0] = pruned_param.shape[0]\n param.set_shape(new_shape)\n _logger.debug(\n '|----------------------------------------+----+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^40}|{:^4}|{:^30}|{:^30}|'.format(\n str(param.name()),\n str(ratio), str(ori_shape), str(param.shape())))\n self.pruned_list[0].append(param.name())\n return pruned_idx", "def get_filters(self, request, **resources):\r\n filters = dict()\r\n\r\n if not self._meta.fields:\r\n return filters\r\n\r\n for field in request.GET.iterkeys():\r\n tokens = field.split(LOOKUP_SEP)\r\n field_name = tokens[0]\r\n\r\n if not field_name in self._meta.fields:\r\n continue\r\n\r\n exclude = False\r\n if tokens[-1] == 'not':\r\n exclude = True\r\n tokens.pop()\r\n\r\n converter = self._meta.model._meta.get_field(\r\n field_name).to_python if len(tokens) == 1 else lambda v: v\r\n value = map(converter, request.GET.getlist(field))\r\n\r\n if len(value) > 1:\r\n tokens.append('in')\r\n else:\r\n value = value.pop()\r\n\r\n filters[LOOKUP_SEP.join(tokens)] = (value, exclude)\r\n\r\n return filters", "def _set_runtime_filters(self):\n runtime_filters = []\n if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):\n raise TypeError(\n '%s: filters must be a sequence of tuple with length=3'\n ' got %r instead' % (self.__class__.__name__, self.filters))\n\n for filter_type, filter_operator, filter_value in self.filters:\n if isinstance(filter_type, ValueProvider):\n filter_type = filter_type.get()\n if isinstance(filter_operator, ValueProvider):\n filter_operator = filter_operator.get()\n if isinstance(filter_value, ValueProvider):\n filter_value = filter_value.get()\n runtime_filters.append((filter_type, filter_operator, filter_value))\n\n return runtime_filters or ()", "def _build_param_request(self):\n search_params = []\n for param in self.params:\n # print(param)\n if self.params[param] is not None:\n search_params.append(param + '={}'.format(self.params[param]))\n search_params = '&' + '&'.join(search_params)\n return search_params", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def _get_orig_params(\n self,\n module: nn.Module,\n ignored_params: Set[nn.Parameter],\n ) -> Iterator[nn.Parameter]:\n param_gen = module.parameters()\n try:\n while True:\n param = next(param_gen)\n if param not in ignored_params and not _is_fsdp_flattened(param):\n yield param\n except StopIteration:\n pass", "def from_param(cls, param, require_all=False, filter_param=False):\n self = cls(**param)\n self.execute()\n return self", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def _filter_params(params):\n return dict((k, v) for k, v in params.items() if v is not None)", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def getQuery(self):\n # Get a list of object managers, each of which containing the\n # corresponding view and filter set filter field objects of all\n # available filter set classes.\n filter_field_managers = [\n manager for manager in self.getFilterSetFilterFieldManagers()\n ] + [\n manager for manager in self.view.getViewFilterFieldManagers()\n ]\n\n # Create an OR query for all filter fields of the same class\n or_queries = []\n for manager in filter_field_managers:\n filter_fields = manager.all()\n if filter_fields:\n or_queries.append(\n reduce(lambda x, y: x | y, [\n filter_field.getRecordFilter()\n for filter_field in filter_fields\n ]\n )\n )\n\n # If there are different filter field OR queries, combine those\n # queries as one AND query\n if or_queries:\n return reduce(lambda x, y: x & y, [\n or_query for or_query in or_queries\n ]\n )\n # If the filter set does not have any filter fields, we return an empty\n # query, which is equivalent to querying all objects, e.g.:\n # `View.objects.all() == View.objects.filter(Q())`\n else:\n return Q()", "def GetFilters(self, filt_defs):\n # The artifact isn't actually used for anything, it's just required to\n # initialize handlers.\n probe = rdfvalue.Probe(artifact=\"Data\", filters=filt_defs)\n return probe.filters", "def _build_filters(self, criteria: Q):\n composed_query = query.Q()\n\n if criteria.connector == criteria.AND:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query & self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query & ~lookup.as_expression()\n else:\n composed_query = composed_query & lookup.as_expression()\n else:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query | self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query | ~lookup.as_expression()\n else:\n composed_query = composed_query | lookup.as_expression()\n\n return composed_query", "def _filter_params(self):\n default_params = self.get_default_params()\n complete_params = dict(self.get_default_params())\n complete_params.update(self.params)\n\n return utils.format_dictionary(complete_params)", "def get_parameters(self):\n params = []\n query_params = self.build_query_parameters()\n pagination_params = self.build_pagination_parameters()\n query_params.extend(self.build_query_params_from_default_backends())\n\n if django_filters is not None:\n query_params.extend(self.build_query_parameters_from_django_filters())\n\n if query_params:\n params += query_params\n\n if pagination_params:\n params += pagination_params\n\n return params", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def _mock_chain(self, filters, new_filter):\n if filters is None:\n # There are no active filters.\n filters = []\n if isinstance(filters, elasticsearch_dsl_query):\n # An initial filter was passed in. Convert it to a list.\n filters = [filters]\n filters.append(new_filter)\n return filters", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n quote = request.GET.get('quote', None)\n added_by = request.GET.get('added_by', None)\n customer = request.GET.get('customer', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if customer:\n items = items.filter(customer=customer).distinct()\n if quote:\n items = items.filter(quote=quote).distinct()\n if added_by:\n items = items.filter(added_by=added_by).distinct()\n\n return items", "def parameters(self, requires_grad_only=True):\n filter_cond = lambda param: param.requires_grad if requires_grad_only else True\n return (param for param in super().parameters() if filter_cond(param))", "def _build_filters(self, criteria: Q):\n # Decide the function based on the connector type\n func = and_ if criteria.connector == criteria.AND else or_\n params = []\n for child in criteria.children:\n if isinstance(child, Q):\n # Call the function again with the child\n params.append(self._build_filters(child))\n else:\n # Find the lookup class and the key\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n\n # Instantiate the lookup class and get the expression\n lookup = lookup_class(stripped_key, child[1], self.model_cls)\n if criteria.negated:\n params.append(~lookup.as_expression())\n else:\n params.append(lookup.as_expression())\n\n return func(*params)", "def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude", "def filter_config(resource, first_pub_pair=None, last_pub_pair=None):\n if resource in constants.ALLOWS_FILTER:\n # Process eventual filter parameters:\n if first_pub_pair:\n a_filter = query_client.TemporalFilter(\n query_client.PUBLISHED_FIRST, *first_pub_pair)\n elif last_pub_pair:\n a_filter = query_client.TemporalFilter(\n query_client.PUBLISHED_LAST, *last_pub_pair)\n else: # Default is 'empty' filter\n a_filter = query_client.Filter()\n else:\n a_filter = None\n\n return {'a_filter': a_filter}", "def getQueryParams(self, local_params={}, REQUEST=None):\n mn_props = getToolByName(self, \"portal_properties\")['metnav_properties']\n portal = getToolByName(self, \"portal_url\").getPortalObject()\n\n nb_limit = local_params.get('NB_LIMIT', 0)\n if int(nb_limit) <= 0:\n limit = \"\"\n else:\n limit = \", %u\" % int(nb_limit)\n\n\n header_dict = { 'HEAD_SUP':local_params.get('HEAD_SUP', mn_props.getProperty('HEAD_SUP','')),\n 'COLLATION':local_params.get('COLLATION', mn_props.getProperty('DEFAULT_COLLATION','')),\n 'xquery_version': self.getXQueryVersion(),\n }\n\n tail_dict = {'XSL': self.getXSLURL(output=local_params.get('XSL', '')),\n 'XSL_PARAMS': self.getXSLParams(params=local_params.get('XSL_PARAMS', {})),\n 'NB_LIMIT': limit,\n 'START': int(local_params.get('START',0))+1,}\n\n dict_collect = self.getCollection(REQUEST)\n base_dict = {'COLLECTION': dict_collect['collection'],\n 'CONDITION_BASE':mn_props.getProperty('CONDITION_BASE', ''),\n 'HEADER':str(portal.xq_header) % header_dict,\n 'TAIL':str(portal.xq_tail) % tail_dict,\n 'CONDITION_SUP':local_params.get('CONDITION_SUP',''),\n 'CLASSIFICATION_NAME':local_params.get('CLASSIFICATION_NAME', dict_collect['name']),\n 'CLASSIFICATION_URI':local_params.get('CLASSIFICATION_URI', dict_collect['classif']),\n 'XSL_PARAMS': self.getXSLParams(params=local_params.get('XSL_PARAMS', {})),\n 'xquery_version': self.getXQueryVersion(),\n }\n\n\n base_dict.update(local_params)\n return base_dict", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if 'conv' in n:\n continue\n init_with_uniform(n, p, param_init)", "def GetFilters(self, bulk=False):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n\n filters = self._SendRequest(HTTP_GET, \"/%s/filters\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return filters\n else:\n return [f[\"uuid\"] for f in filters]", "def get_filter_args(self):\n return []", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n customer = request.GET.get('customer', None)\n quote = request.GET.get('quote', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if customer:\n items = items.filter(customer=customer).distinct()\n if quote:\n items = items.filter(quote=quote).distinct()\n\n return items", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def get_params(self, deep=True):\n return super().get_params(deep=deep)", "def param_quantizers(self):\n return self._param_quantizers", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def get_filters() -> dict:\n if environment is None or not hasattr(environment, 'loader'):\n return {}\n return environment.filters", "def get_test_params(cls, parameter_set=\"default\"):\n features_to_calc = [\n \"dim_0__quantile__q_0.6\",\n \"dim_0__longest_strike_above_mean\",\n \"dim_0__variance\",\n ]\n\n return [\n {\n \"disable_progressbar\": True,\n \"show_warnings\": False,\n \"default_fc_parameters\": \"minimal\",\n },\n {\n \"disable_progressbar\": True,\n \"show_warnings\": False,\n \"kind_to_fc_parameters\": features_to_calc,\n },\n ]", "def get_resource_params():\n return Parameter.list()", "def get_list_filters(self):\n # look in session for the saved search...\n filters = ListFilter()\n filters.get_list_filter(self.table)\n return filters", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def get_test_params(cls, parameter_set=\"default\"):\n params1 = {\n \"n_clusters\": 3,\n \"n_init\": 2,\n \"max_iter\": 2,\n \"tol\": 1e-3,\n \"verbose\": False,\n \"random_state\": 2,\n }\n params2 = {\n \"n_clusters\": 2,\n \"init_algorithm\": \"random\",\n \"n_init\": 1,\n \"max_iter\": 1,\n \"tol\": 1e-4,\n \"verbose\": False,\n \"random_state\": 1,\n }\n return [params1, params2]", "def initialize_filter(self):\n shape = self.filter_size + (self.input_shape[-1], self.channels)\n self.filter = self.filter_initializer(shape)", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def get_params(self, deep=True):\n return {p: getattr(self, p) for p in self.params}", "def create_filters(date=None, start_date=None, end_date=None,\n distance_min=None, distance_max=None,\n velocity_min=None, velocity_max=None,\n diameter_min=None, diameter_max=None,\n hazardous=None):\n # Using Operator functions to compare the input parameters\n # with passed values.\n filters = []\n if date:\n filters.append(DateFilter(operator.eq, date))\n\n if start_date:\n filters.append(DateFilter(operator.ge, start_date))\n\n if end_date:\n filters.append(DateFilter(operator.le, end_date))\n\n if diameter_min:\n filters.append(DiameterFilter(operator.ge, float(diameter_min)))\n\n if diameter_max:\n filters.append(DiameterFilter(operator.le, diameter_max))\n\n if hazardous is not None:\n filters.append(HazardousFilter(operator.eq, hazardous))\n\n if distance_min:\n filters.append(DistanecFilter(operator.ge, distance_min))\n\n if distance_max:\n filters.append(DistanecFilter(operator.le, distance_max))\n\n if velocity_min:\n filters.append(VelocityFilter(operator.ge, velocity_min))\n\n if velocity_max:\n filters.append(VelocityFilter(operator.le, velocity_max))\n\n return filters", "def setFilters(self, filters):\n self.__filters = filters", "def getFilterSetFilterFieldManagers(self):\n return _get_related_managers(self, FilterSetFilterField)", "def get_queryset(self):\n\n fl = self.request.QUERY_PARAMS.get('fl', None)\n win = self.request.QUERY_PARAMS.get('win', None)\n win_filter = Q(orderstatus__in=['Shipped', 'Unshipped', 'Processing'], fulfillmentchannel='MFN')\n #win_filter = Q(orderstatus__in=['Unshipped', 'Processing'], fulfillmentchannel='MFN')\n\n queryset = None\n if fl is not None and fl.isdigit():\n logger.info(\"Got filter id: %s\", fl)\n try:\n filter = Filter.objects.get(pk=int(fl))\n if filter:\n ancestor_logic = Q() #Create Q object to hold other query\n #If filter is only root node\n if filter.is_root_node():\n ancestor_logic = pickle.loads(filter.logic) #Deserilize the filter logic\n logger.info(\"Filter has only root node, Logic: %s\", ancestor_logic)\n\n #If filter has parents\n else:\n for filter_data in filter.get_ancestors(False, True): #Get all parents including self\n filter_logic = pickle.loads(filter_data.logic) #Deserilize the filter logic\n if ancestor_logic.__len__()==0:\n ancestor_logic = filter_logic\n else:\n ancestor_logic = ancestor_logic & filter_logic\n logger.info(\"Filter has parents, Logic: %s\", ancestor_logic)\n\n if ancestor_logic:\n queryset = AmazonOrders.objects.filter(ancestor_logic & win_filter) #pass the query object to filter\n logger.info(\"Filter query, Query: %s\", queryset.query)\n\n except Exception as e:\n logger.error(\"In queryset exception : %s\",e)\n elif win is not None:\n logic = None\n if win == 'AFN':\n logic = Q(fulfillmentchannel=win, orderstatus='Shipped')\n elif win in ['Pending', 'Canceled']:\n logic = Q(orderstatus=win)\n elif win == 'Unshipped':\n logic = Q(orderstatus=win)\n\n if logic:\n queryset = AmazonOrders.objects.filter(logic)\n logger.info(\"Win query, Query: %s\", queryset.query)\n\n else:\n #queryset = AmazonOrders.objects.all()\n queryset = AmazonOrders.objects.filter(win_filter)\n logger.info(\"Filter not passed, Processing full Query: %s\", queryset.query)\n\n return queryset", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def get_default_filters(self, **resources):\r\n return dict((k, (v, False)) for k, v in resources.items()\r\n if k in self._meta.fields)", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def broadbandfilters(self):\n all = self.allbroadbandfilters\n return [all[layer-1] for layer in self.__layers]", "def get_test_params(cls, parameter_set=\"default\"):\n from sktime.forecasting.exp_smoothing import ExponentialSmoothing\n from sktime.forecasting.trend import TrendForecaster\n from sktime.utils.validation._dependencies import _check_estimator_deps\n\n # accessing a nested parameter\n params = [\n {\n \"forecaster\": TrendForecaster(),\n \"param_names\": [\"regressor__intercept\"],\n }\n ]\n\n # ExponentialSmoothing requires statsmodels\n if _check_estimator_deps(ExponentialSmoothing, severity=\"none\"):\n # accessing a top level parameter\n params = params + [\n {\n \"forecaster\": ExponentialSmoothing(),\n \"param_names\": [\"initial_level\"],\n }\n ]\n\n return params", "def get_queryset(self):\n # Check if the parameter assigned_only is on the request\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n # Make copy of queryset so we do not modify the original\n queryset = self.queryset\n # If the parameter was passed filter on the book not\n # being specified\n if assigned_only:\n queryset = queryset.filter(book__isnull=False)\n\n # Remove duplicates\n return queryset.filter(\n user=self.request.user\n ).order_by('-name').distinct()", "def parameter_combinations(cls, raw=False):\r\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\r\n 1.0, 1.5, 2.0],\r\n 'n_neighbors': [3, 5, 7]}\r\n return cls.generate_parameter_combinations(parameter_combinations, raw)" ]
[ "0.6619125", "0.6226987", "0.6226987", "0.60220295", "0.56563276", "0.557124", "0.5495559", "0.5387023", "0.537955", "0.5361835", "0.5359296", "0.53389424", "0.53347725", "0.5314394", "0.53115654", "0.52742255", "0.5228215", "0.5208534", "0.51974314", "0.5195309", "0.5190603", "0.5171711", "0.5150364", "0.51412547", "0.51395464", "0.5133434", "0.5131028", "0.5098084", "0.50757784", "0.5050293", "0.50388575", "0.5034495", "0.50221926", "0.5003744", "0.49902967", "0.4972976", "0.49688527", "0.4966257", "0.49643832", "0.49620396", "0.49569193", "0.49549103", "0.49548286", "0.49417958", "0.49324444", "0.49181673", "0.4894317", "0.4894317", "0.48812363", "0.48794597", "0.48664513", "0.485488", "0.48425236", "0.48401543", "0.4838348", "0.48302767", "0.48241743", "0.4815987", "0.48054525", "0.48017493", "0.47911468", "0.4788105", "0.47648588", "0.47599316", "0.47590238", "0.47458774", "0.47451437", "0.4738173", "0.4728178", "0.47266215", "0.4723175", "0.47201964", "0.47153935", "0.4707261", "0.4705869", "0.4703054", "0.4700122", "0.46939945", "0.46911547", "0.46906036", "0.46893162", "0.4686629", "0.46840543", "0.4684051", "0.46708792", "0.46683303", "0.4668084", "0.4664631", "0.46606326", "0.4660543", "0.46592486", "0.4655625", "0.46554217", "0.46554217", "0.46455213", "0.46350756", "0.46289366", "0.4618831", "0.46163946", "0.4612389" ]
0.8191257
0
Disable filter subsetting, allowing the form to render the filterset. Note that this decreases performance and should only be used when rendering a form, such as with DRF's browsable API.
def disable_subset(cls, *, depth=0): if not issubclass(cls, SubsetDisabledMixin): cls = type('SubsetDisabled%s' % cls.__name__, (SubsetDisabledMixin, cls), {}) # recursively disable subset for related filtersets if depth > 0: # shallow copy to prevent modifying original `base_filters` cls.base_filters = cls.base_filters.copy() # deepcopy RelateFilter to prevent modifying original `.filterset` for name in cls.related_filters: f = copy.deepcopy(cls.base_filters[name]) f.filterset = f.filterset.disable_subset(depth=depth - 1) cls.base_filters[name] = f return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allow_filtering(self):\r\n clone = copy.deepcopy(self)\r\n clone._allow_filtering = True\r\n return clone", "def non_hidden(self):\n return self.filter(hidden=False)", "def non_hidden(self):\n return self.filter(hidden=False)", "def __disableSearchEdit(self):\n self.__searchEdit.setEnabled(False)", "def exclude(self, *args, **kwargs):\n return self.filter(~F(*args, **kwargs))", "def __disableControls(self):\n self.ignoreAll()", "def reset_instances_filter(self):\n page_instances = self.page_instances()\n page_instances.field_filter_instances.value = ''\n page_instances.button_filter_instances.click()", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def filter_queryset(self, queryset, view=None):\n queryset = super().filter_queryset(queryset.only(\"id\", \"shared\"))\n form_pk = self.kwargs.get(self.lookup_field)\n\n if form_pk:\n try:\n int(form_pk)\n except ValueError as e:\n if form_pk == self.public_data_endpoint:\n queryset = self._get_public_forms_queryset()\n else:\n raise ParseError(_(f\"Invalid pk {form_pk}\")) from e\n else:\n queryset = self._filtered_or_shared_queryset(queryset, form_pk)\n else:\n tags = self.request.query_params.get(\"tags\")\n not_tagged = self.request.query_params.get(\"not_tagged\")\n\n if tags and isinstance(tags, six.string_types):\n tags = tags.split(\",\")\n queryset = queryset.filter(tags__name__in=tags)\n if not_tagged and isinstance(not_tagged, six.string_types):\n not_tagged = not_tagged.split(\",\")\n queryset = queryset.exclude(tags__name__in=not_tagged)\n\n return queryset", "def filter_tracked(self, queryset, name, value):\n q_batch = Q(batch=None) | Q(batch='')\n q_serial = Q(serial=None) | Q(serial='')\n\n if str2bool(value):\n return queryset.exclude(q_batch & q_serial)\n else:\n return queryset.filter(q_batch & q_serial)", "def filter_queryset(self, queryset):\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset", "def filter_not(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QCombination)):\n self.filter(QNot(arguments[0]))\n else:\n self.filter(QNot(Q(**kwargs)))\n\n return self", "def filter_queryset(self, request, queryset, view):\n\n if view.action == \"list\":\n # Return widgets from xform user has perms to\n return self._xform_filter_queryset(request, queryset, view, \"object_id\")\n\n return super().filter_queryset(request, queryset, view)", "def filter_queryset(self, request, queryset, view):\n if request and request.user.is_anonymous:\n return queryset.filter(shared=True)\n\n return queryset", "def disable_vae_slicing(self):\n self.vae.disable_slicing()", "def filter_serialized(self, queryset, name, value):\n q = Q(serial=None) | Q(serial='')\n\n if str2bool(value):\n return queryset.exclude(q)\n else:\n return queryset.filter(q)", "def disable_traceback_filtering():\n global _ENABLE_TRACEBACK_FILTERING\n _ENABLE_TRACEBACK_FILTERING.value = False", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n if applicable_filters:\n queryset = queryset.filter(applicable_filters)\n if applicable_exclusions:\n queryset = queryset.exclude(applicable_exclusions)\n return queryset", "def disable(self):\n return self.enable(False)", "def __filterIndices(self, indexFilter):\n if '*' in indexFilter:\n self.__index.filterIndices(indexFilter, indexFilter)\n else:\n self.__index.filterIndices(indexFilter)", "def filter_allowed_fields(self):\n allowed_fields = super().filter_allowed_fields\n # Remove assignment_id\n allowed_fields.remove('assignment_id')\n return allowed_fields", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def remove_crds_filter(self, filter):\n if filter in self.filters:\n self.filters.remove(filter)", "def filter_has_batch(self, queryset, name, value):\n q = Q(batch=None) | Q(batch='')\n\n if str2bool(value):\n return queryset.exclude(q)\n else:\n return queryset.filter(q)", "def filter_queryset(self, request, queryset, view):\n if str(request.query_params.get(\"orgs\")).lower() == \"false\":\n organization_user_ids = OrganizationProfile.objects.values_list(\n \"user__id\", flat=True\n )\n queryset = queryset.exclude(id__in=organization_user_ids)\n\n return queryset", "def __invert__(self):\n not_filter = proto.FilterExpression()\n not_filter.filter_not.filter_expression.MergeFrom(self.filter)\n self.filter = not_filter\n return self", "def get_queryset(self):\n q_kwargs = {\n \"awardsearch__latest_transaction__isnull\": True,\n \"date_signed__isnull\": True,\n \"total_obligation__isnull\": True,\n }\n\n return super(AwardManager, self).get_queryset().filter(~Q(**q_kwargs))", "def filter_queryset(self, request, queryset, view):\n if request.user.is_anonymous:\n return queryset.filter(Q(shared_data=True))\n return queryset", "def __editDeselectAll(self):\n self.activeWindow().selectAll(False)", "def eventFilter(self, o: QObject, e: QEvent):\n if e.type() in InteractionEventFilter.filter:\n e.ignore()\n return True\n return super().eventFilter(o, e)", "def unblock_widgets(self):\n\n for element in self.widget_elements:\n element.setDisabled(False)\n\n self.listWidget.setFocus()", "def filter_queryset(self, queryset):\n params = self.request.query_params\n\n queryset = super().filter_queryset(queryset)\n\n if common.settings.stock_expiry_enabled():\n\n # Filter by 'expiry date'\n expired_date_lte = params.get('expiry_date_lte', None)\n if expired_date_lte is not None:\n try:\n date_lte = datetime.fromisoformat(expired_date_lte)\n queryset = queryset.filter(expiry_date__lte=date_lte)\n except (ValueError, TypeError):\n pass\n\n expiry_date_gte = params.get('expiry_date_gte', None)\n if expiry_date_gte is not None:\n try:\n date_gte = datetime.fromisoformat(expiry_date_gte)\n queryset = queryset.filter(expiry_date__gte=date_gte)\n except (ValueError, TypeError):\n pass\n\n # Filter by 'stale' status\n stale = params.get('stale', None)\n\n if stale is not None:\n stale = str2bool(stale)\n\n # How many days to account for \"staleness\"?\n stale_days = common.models.InvenTreeSetting.get_setting('STOCK_STALE_DAYS')\n\n if stale_days > 0:\n stale_date = datetime.now().date() + timedelta(days=stale_days)\n\n stale_filter = StockItem.IN_STOCK_FILTER & ~Q(expiry_date=None) & Q(expiry_date__lt=stale_date)\n\n if stale:\n queryset = queryset.filter(stale_filter)\n else:\n queryset = queryset.exclude(stale_filter)\n\n # Exclude stock item tree\n exclude_tree = params.get('exclude_tree', None)\n\n if exclude_tree is not None:\n try:\n item = StockItem.objects.get(pk=exclude_tree)\n\n queryset = queryset.exclude(\n pk__in=[it.pk for it in item.get_descendants(include_self=True)]\n )\n\n except (ValueError, StockItem.DoesNotExist):\n pass\n\n # Filter by \"part tree\" - only allow parts within a given variant tree\n part_tree = params.get('part_tree', None)\n\n if part_tree is not None:\n try:\n part = Part.objects.get(pk=part_tree)\n\n if part.tree_id is not None:\n queryset = queryset.filter(part__tree_id=part.tree_id)\n except Exception:\n pass\n\n # Exclude StockItems which are already allocated to a particular SalesOrder\n exclude_so_allocation = params.get('exclude_so_allocation', None)\n\n if exclude_so_allocation is not None:\n\n try:\n order = SalesOrder.objects.get(pk=exclude_so_allocation)\n\n # Grab all the active SalesOrderAllocations for this order\n allocations = SalesOrderAllocation.objects.filter(\n line__pk__in=[\n line.pk for line in order.lines.all()\n ]\n )\n\n # Exclude any stock item which is already allocated to the sales order\n queryset = queryset.exclude(\n pk__in=[\n a.item.pk for a in allocations\n ]\n )\n\n except (ValueError, SalesOrder.DoesNotExist):\n pass\n\n # Does the client wish to filter by the Part ID?\n part_id = params.get('part', None)\n\n if part_id:\n try:\n part = Part.objects.get(pk=part_id)\n\n # Do we wish to filter *just* for this part, or also for parts *under* this one?\n include_variants = str2bool(params.get('include_variants', True))\n\n if include_variants:\n # Filter by any parts \"under\" the given part\n parts = part.get_descendants(include_self=True)\n\n queryset = queryset.filter(part__in=parts)\n\n else:\n queryset = queryset.filter(part=part)\n\n except (ValueError, Part.DoesNotExist):\n raise ValidationError({\"part\": \"Invalid Part ID specified\"})\n\n # Does the client wish to filter by stock location?\n loc_id = params.get('location', None)\n\n cascade = str2bool(params.get('cascade', True))\n\n if loc_id is not None:\n\n # Filter by 'null' location (i.e. top-level items)\n if isNull(loc_id):\n if not cascade:\n queryset = queryset.filter(location=None)\n else:\n try:\n # If '?cascade=true' then include items which exist in sub-locations\n if cascade:\n location = StockLocation.objects.get(pk=loc_id)\n queryset = queryset.filter(location__in=location.getUniqueChildren())\n else:\n queryset = queryset.filter(location=loc_id)\n\n except (ValueError, StockLocation.DoesNotExist):\n pass\n\n # Does the client wish to filter by part category?\n cat_id = params.get('category', None)\n\n if cat_id:\n try:\n category = PartCategory.objects.get(pk=cat_id)\n queryset = queryset.filter(part__category__in=category.getUniqueChildren())\n\n except (ValueError, PartCategory.DoesNotExist):\n raise ValidationError({\"category\": \"Invalid category id specified\"})\n\n # Does the client wish to filter by BomItem\n bom_item_id = params.get('bom_item', None)\n\n if bom_item_id is not None:\n try:\n bom_item = BomItem.objects.get(pk=bom_item_id)\n\n queryset = queryset.filter(bom_item.get_stock_filter())\n\n except (ValueError, BomItem.DoesNotExist):\n pass\n\n # Filter by company (either manufacturer or supplier)\n company = params.get('company', None)\n\n if company is not None:\n queryset = queryset.filter(Q(supplier_part__supplier=company) | Q(supplier_part__manufacturer_part__manufacturer=company))\n\n return queryset", "def disable(self):\n self.enabled = False", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def filter(self):\n return self._filter", "def setDefaultFilter(self):\n self.logsItem.setDefaultFilter()", "def filter(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n from jetengine.query_builder.transform import validate_fields\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QNot, QCombination)):\n if self._filters:\n self._filters = self._filters & arguments[0]\n else:\n self._filters = arguments[0]\n else:\n validate_fields(self.__klass__, kwargs)\n if self._filters:\n self._filters = self._filters & Q(**kwargs)\n else:\n if arguments and len(arguments) == 1 and isinstance(arguments[0], dict):\n self._filters = Q(arguments[0])\n else:\n self._filters = Q(**kwargs)\n\n return self", "def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.exclude(moyen_id=24)\n if self.value() == '0':\n return queryset.filter(moyen_id=24)", "def default_filters(self, query) -> object:\n user = self.request.user\n model = self.model\n custom_filter = self.request.params.get('_custom_filter')\n if 'g:professionals' in user.groups and custom_filter == 'pool':\n # disable security for this custom filter\n self.enable_security = False\n professional = Professional.get(user.id)\n pool_ids = [item.id for item in professional.pools]\n query = query.filter(\n model.pool_id.in_(pool_ids),\n model.state == 'published'\n )\n elif custom_filter == 'late_first_submission':\n config_delta = timedelta(seconds=int(LATE_SUBMISSION_SECONDS))\n date_limit = datetime_utcnow() - config_delta\n query = query.filter(\n model.scheduled_datetime <= date_limit,\n model.state == 'awaiting_assets',\n model.last_approval_date.is_(None)\n )\n elif custom_filter == 'late_re_submission':\n config_delta = timedelta(seconds=int(LATE_SUBMISSION_SECONDS))\n date_limit = datetime_utcnow() - config_delta\n query = query.filter(\n model.last_approval_date <= date_limit,\n model.state == 'awaiting_assets',\n model.submission_path.isnot(None),\n )\n return query", "def negated(self):\n op = QueryCompound.Op.And if self.__op == QueryCompound.Op.Or else QueryCompound.Op.Or\n return QueryCompound(*self.__queries, op=op)", "def filter_sent_to_customer(self, queryset, name, value):\n if str2bool(value):\n return queryset.exclude(customer=None)\n else:\n return queryset.filter(customer=None)", "def no_filter(blast_subject_entry):\r\n return True", "def _disable(self):\n self.enabled = False", "def filter(self, *q, **kwargs):\n return self._filter_or_exclude(*q, **kwargs)", "def exclude_for_sale(cls, qs):\n book_model = get_model('books', 'Book')\n books_for_sale = book_model.objects.filter(book_type=book_model.TO_SELL)\n return qs.exclude(book__pk__in=books_for_sale)", "def disable(self):\n super().disable()", "def filter_queryset(self, queryset):\n for backend in list(self.filter_backends):\n queryset = backend().filter_queryset(self.request, queryset, self)\n return queryset", "def exclude_persons_without_registrations(request, filtered_queryset, statuses_that_disallow_empty_registrations=['A']):\n status_disallows_people_with_no_registrations = \\\n request.GET.get('status') in statuses_that_disallow_empty_registrations \n is_filtered_by_bbox = request.GET.get('sw_long') != None \\\n and request.GET.get('sw_lat') != None \\\n and request.GET.get('ne_long') != None\\\n and request.GET.get('ne_lat') != None\n is_filtered_by_city = request.GET.get('city', \"\") != \"\"\n if is_filtered_by_bbox or is_filtered_by_city or \\\n status_disallows_people_with_no_registrations:\n filtered_queryset = [f for f in filtered_queryset if len(f.registrations.all()) > 0]\n return filtered_queryset", "def disable(self):\n self._enabled = False", "def test_no_op(self):\n request = RequestFactory().get('/?tags=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertNotIn('tags__slug__in', filter.qs.filters)", "def exclude_list(self):\n pass", "def disable(self):\n for val in data:\n val.disable()\n self.enabled = False", "def filter_selection_set(info: GraphQLResolveInfo):\n from graphql import Location\n from .pyutils import unfreeze\n\n excluded_field_nodes = []\n\n def _should_include(field_node: FieldNode):\n if not field_node.name:\n # Unknown field_node type\n return True\n if field_node.name.value == \"subscription_id\":\n return True\n\n # Location is a highly nested AST type\n excluded_field_nodes.append(unfreeze(field_node, ignore_types=[Location]))\n return False\n\n info.field_nodes[0].selection_set.selections = [\n x for x in info.field_nodes[0].selection_set.selections if _should_include(x)]\n\n return excluded_field_nodes", "def set_restriction_filters(self):\n self.restriction_filters[\"pk__exact\"] = self.request.user.pk", "def exclude(self):\n\n self.eod.value = 0\n self.public.value = 0", "def disable():\n Plotter.enable = False", "def filter(self, target_model):\n # return filter_dict_to_target_model(self._axl_data, target_model)\n super().__setattr__('_axl_data', filter_dict_to_target_model(self._axl_data, target_model))\n return self", "def filter_search(self, request, search, view):\n raise NotImplementedError(\".filter_search() must be overridden.\")", "def filter_queryset(self, request, queryset, view):\n applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request))\n return self.apply_filters(\n queryset=queryset,\n applicable_filters=self.process_filters(applicable_filters, queryset, view),\n applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view)\n )", "def filter_non_traffic_charges_grid(self, column_name, filter_item_text):\n self.grid_filter_with_textbox(self.non_traffic_charges_grid_div_id, column_name, filter_item_text)", "def disable(self):\n self.SetInteractive(0)", "def filter_clear(client, args):\n client.context.set_query([])", "def filter(self, *args, **kwargs):\n self._not_support_combined_queries(\"filter\")\n return self._filter_or_exclude(False, args, kwargs)", "def disable(self) -> None:", "def disable(self):\r\n self.update(enabled=False)", "def condition_filters(self):\r\n return filters.Filters(self)", "def queryset(self, request, queryset):\n if self.value() == 'Y':\n return queryset.exclude(cwr_exports__count=0)\n elif self.value() == 'N':\n return queryset.filter(cwr_exports__count=0)", "def _filter_in_request(self):\n pass", "def filter_depleted(self, queryset, name, value):\n if str2bool(value):\n return queryset.filter(quantity__lte=0)\n else:\n return queryset.exclude(quantity__lte=0)", "def filter(self, filters):", "def filters_active(self):\n if self.is_valid():\n return bool(\n {\n k: v\n for k, v in self.cleaned_data.items()\n if k not in [\"q\", \"sort\"] and bool(v)\n }\n )\n return False", "def queryset(self, request, queryset):\n if self.value() == 'ignore':\n return queryset.filter(ignored_at__isnull=False)\n if self.value() == 'active':\n return queryset.filter(ignored_at=None)", "def disable(self):\n\n super().disable()\n self._slo_image_size.disable()\n self._slo_neural_network.disable()\n self._slo_number_of_epochs.disable()\n self._slo_examples_per_batch.disable()", "def remove_filter_field(self, field):\n if self.filters:\n category_filter = self.filters.get(str(field.category.id), None)\n\n if category_filter:\n field_filter = category_filter.pop(field.key, None)\n\n if field_filter:\n self.save()", "def filter(self, *args, **kwargs):\n return FilteredQuery(self, F(*args, **kwargs))", "def filter(self, filter_dict):\n pass", "def __enableSearchEdit(self):\n self.__searchEdit.setEnabled(True)\n self.__filterIndices(self.__searchEdit.text())", "def event_ball_search_disable(self, **kwargs):\n del kwargs\n self.ball_search.disable()", "def filter_queryset(self, request, queryset, view):\n form_id = view.kwargs.get(view.lookup_field, view.kwargs.get(\"xform_pk\"))\n lookup_field = view.lookup_field\n\n queryset = queryset.filter(deleted_at=None)\n if request.user.is_anonymous:\n return queryset\n\n if form_id:\n if lookup_field == \"pk\":\n int_or_parse_error(\n form_id, \"Invalid form ID. It must be a positive integer\"\n )\n\n try:\n if lookup_field == \"uuid\":\n form_id = UUID(form_id)\n form = queryset.get(Q(uuid=form_id.hex) | Q(uuid=str(form_id)))\n else:\n xform_kwargs = {lookup_field: form_id}\n form = queryset.get(**xform_kwargs)\n except ObjectDoesNotExist as non_existent_object:\n raise Http404 from non_existent_object\n\n # Check if form is public and return it\n if form.shared:\n if lookup_field == \"uuid\":\n return queryset.filter(Q(uuid=form_id.hex) | Q(uuid=str(form_id)))\n return queryset.filter(Q(**xform_kwargs))\n\n return super().filter_queryset(request, queryset, view)", "def disable(self):\n pass", "def disable(self):", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def removeAutoSaveFilter(filter):", "def disable_depth_of_field(self):\n self._render_passes.disable_depth_of_field_pass()", "def ensure_hidden(self):\n self.set_visible(False)", "def should_filter_by_semester(self):\n return self.kwargs.get('filter_by_semester', True)", "def filter_clear(self, samples=None, subset=None):\n if samples is not None:\n subset = self.make_subset(samples)\n elif not hasattr(self, 'subsets'):\n self.make_subset()\n\n if subset is None:\n samples = self.subsets['All_Analyses']\n else:\n try:\n samples = self.subsets[subset]\n except:\n raise ValueError((\"Subset '{:s}' does not .\".format(subset) +\n \"exist.\\nRun 'make_subset' to create a\" +\n \"subset.\"))\n\n for s in samples:\n self.data_dict[s].filt.clear()", "def set_exclusive(self, exclusive):\n self.widget.setExclusive(exclusive)", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def hide(self):\n self.set_visible(False)", "def condition_group_options(self):\n if \"no-groups\" in self.options and self.options[\"no-groups\"]:\n self.options[\"groups\"] = []\n if \"exclude-groups\" in self.options:\n del self.options[\"exclude-groups\"]\n\n return\n\n super().condition_group_options()", "def filter(self, *args, **kwargs):\n self._expand_pk(kwargs)\n return self._filter_or_exclude(False, *args, **kwargs)", "def Hide(self):\r\n \r\n return self.SetFlag(self.optionHidden, True)", "def toggle_culling(self):\n self.view['cull'] = not self.view['cull']\n self.update_flags()", "def filter_excluded_paths(self, filter_excluded_paths: ConfigNodePropertyArray):\n\n self._filter_excluded_paths = filter_excluded_paths", "def exclude(self, *q, **kwargs):\n return self._filter_or_exclude(*q, _inverse=True, **kwargs)", "def disable(self):\n self.colour_combo.config(state=tk.DISABLED)\n self.game_name_entry.config(state=tk.DISABLED)\n self.num_tickets_entry.config(state=tk.DISABLED)", "def exclude(self, **query):\n\n if self._query != '':\n query = '(%s) AND NOT (%s)' % (self._query, self._build_query(**query))\n else:\n query = 'NOT (%s)' % self._build_query(**query)\n\n return QueryList(self.model,\n query,\n order_by=self._order_by,\n fields=self._fields,\n limit=self._limit,\n offset=self._offset,\n links_to_names=self._links_to_names)", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)" ]
[ "0.6328865", "0.61500955", "0.61500955", "0.61072856", "0.5808935", "0.57838905", "0.57728505", "0.5675368", "0.56208265", "0.5614127", "0.55958295", "0.55693483", "0.5554618", "0.5534325", "0.5480112", "0.5472331", "0.5463367", "0.5458596", "0.5456048", "0.54518133", "0.54490405", "0.5386114", "0.53682685", "0.5360309", "0.5353469", "0.53527874", "0.5349543", "0.53329515", "0.531803", "0.5315922", "0.5315135", "0.53138167", "0.5312335", "0.53037727", "0.52957064", "0.529209", "0.5289526", "0.52830744", "0.5268418", "0.52646804", "0.5254396", "0.52514946", "0.52434963", "0.52371943", "0.5231139", "0.5225057", "0.52235633", "0.5199626", "0.51988924", "0.5196256", "0.51947206", "0.51927984", "0.51815444", "0.5178158", "0.51686233", "0.5167894", "0.5161679", "0.51586986", "0.5147554", "0.5147105", "0.5146268", "0.51451635", "0.5143927", "0.5129824", "0.51260746", "0.51256454", "0.5123588", "0.5118968", "0.5117554", "0.5110237", "0.51082", "0.5103661", "0.5093493", "0.50905037", "0.5089235", "0.50815266", "0.50753313", "0.50744456", "0.5071877", "0.5067305", "0.5062562", "0.5062528", "0.50587785", "0.5054697", "0.50317734", "0.5026347", "0.5024456", "0.5022943", "0.5022639", "0.5021033", "0.5018757", "0.50151086", "0.5012226", "0.50075954", "0.49983823", "0.4987504", "0.4986567", "0.4986213", "0.49832743", "0.4982414" ]
0.6411095
0
Get the filter name for the request data parameter.
def get_param_filter_name(cls, param, rel=None): # check for empty param if not param: return param # strip the rel prefix from the param name. prefix = '%s%s' % (rel or '', LOOKUP_SEP) if rel and param.startswith(prefix): param = param[len(prefix):] # Attempt to match against filters with lookups first. (username__endswith) if param in cls.base_filters: return param # Attempt to match against exclusion filters if param[-1] == '!' and param[:-1] in cls.base_filters: return param[:-1] # Match against relationships. (author__username__endswith). # Preference more specific filters. eg, `note__author` over `note`. for name in reversed(sorted(cls.related_filters)): # we need to match against '__' to prevent eager matching against # like names. eg, note vs note2. Exact matches are handled above. if param.startswith("%s%s" % (name, LOOKUP_SEP)): return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_filter_name(self):\n pass", "async def get_filter(self, **kwargs: Any) -> str:\n return self._telescope.filter_name", "def name(self) -> StringFilter:\n return self.__name", "def filter(self) -> Optional[str]:\n return pulumi.get(self, \"filter\")", "def get_json_callback_parameter_name(self):\n return self.request.GET.get(self.get_json_callback_parameter(), '')", "def filter(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"filter\")", "def set_FilterName(self, value):\n super(GetCallbackDataInputSet, self)._set_input('FilterName', value)", "def filter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"filter\")", "def param_name(self):\n return self._param_name", "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def getFilter(self):\n\n return self.filter", "def get_filters_names_key(self, project, metric_name):\n return u\"{0}-metrics-filters:{1}\".format(project, to_unicode(metric_name))", "def getFilterNameFromInt(cls, num):\n return cls.SUPPORTED_FILTERS[num]", "def get_name(self, ):\n return self.get_parameter('name')", "def get_current_filters(self) -> str:\r\n return self.__filters_string", "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"count\":\n return \"%24count\"\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"filter\":\n return \"%24filter\"\n if original_name == \"orderby\":\n return \"%24orderby\"\n if original_name == \"search\":\n return \"%24search\"\n if original_name == \"select\":\n return \"%24select\"\n if original_name == \"skip\":\n return \"%24skip\"\n if original_name == \"top\":\n return \"%24top\"\n return original_name", "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"count\":\n return \"%24count\"\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"filter\":\n return \"%24filter\"\n if original_name == \"orderby\":\n return \"%24orderby\"\n if original_name == \"search\":\n return \"%24search\"\n if original_name == \"select\":\n return \"%24select\"\n if original_name == \"skip\":\n return \"%24skip\"\n if original_name == \"top\":\n return \"%24top\"\n return original_name", "def _get_full_filter_name(name, filter_path):\n filename = os.path.basename(filter_path)\n file_only = os.path.splitext(filename)[0]\n filter_name = \"{}.{}\".format(file_only, name)\n return filter_name", "def get_filter_values(self): # pylint: disable=too-many-return-statements\n if self.filter_mhr_number:\n return MHR_NUMBER_PARAM, self.filter_mhr_number\n if self.filter_registration_type:\n return REG_TYPE_PARAM, self.filter_registration_type\n if self.filter_reg_start_date:\n return START_TS_PARAM, self.filter_reg_start_date\n if self.filter_status_type:\n return STATUS_PARAM, self.filter_status_type\n if self.filter_client_reference_id:\n return CLIENT_REF_PARAM, self.filter_client_reference_id\n if self.filter_submitting_name:\n return SUBMITTING_NAME_PARAM, self.filter_submitting_name\n if self.filter_username:\n return USER_NAME_PARAM, self.filter_username\n return None, None", "def get_name(self) -> str:\n # read the original value passed by the command\n name = self.raw_param.get(\"name\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return name", "def userfilter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"userfilter\")", "def userfilter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"userfilter\")", "def _get_param_from_request(self, nombreParametro):\n\t\treturn request.args.get(nombreParametro)", "def manifest_filter(self, name):\n if not name:\n return self._data.index\n else:\n name = self._verify_filter_name(name, None)\n if not self.is_filter(name):\n raise KeyError('{} is no valid filter-variable.'.format(name))\n return self.take({name: 0})", "def current_filter(self):\n return self._proxy.get(\"current_filter\", \"filterwheel\")", "def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")", "def parameterName(self):\n return self.name()", "def filter(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"filter\")", "def getName(self):\n return _libsbml.Parameter_getName(self)", "def get_filter(self) -> str:\n\n return \";;\".join(self.filters)", "def getparameters(filter,title = \"\"):\n vardic = filter.__dict__\n for i in list(vardic.keys()):\n if vardic[i] is not None:\n title += \" \"+i+\": \"\n title += str(vardic[i])+\",\"\n return title[:-1]", "def get_name(self, request, *args, **kwargs):\n raise NotImplementedError", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def _get_name(self):\n name_string = '%s_%s' % (\n self.parameter_type,\n self.parameter_value_type)\n if self.paneltemplateparametermarker_set.count() > 0:\n marker_string = \"_\".join(sorted([m.marker.marker_abbreviation for m in self.paneltemplateparametermarker_set.all()]))\n name_string += '_' + marker_string\n if self.fluorochrome:\n name_string += '_' + self.fluorochrome.fluorochrome_abbreviation\n return name_string", "def _get_filter_args(self, filter):\n kwargs = {}\n for arg in filter.args[1:]:\n kwargs[arg] = input(f'Type the {arg.replace(\"_\", \" \").title()}: ')\n return kwargs", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def _name(self):\n return self._arguments[0].split('(')[0]", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def get_hst_filter(header):\n if 'FILTER' in header:\n return header['FILTER'].upper()\n \n if header['INSTRUME'].strip() == 'ACS':\n for i in [1,2]:\n filter_i = header['FILTER{0:d}'.format(i)]\n if 'CLEAR' in filter_i:\n continue\n else:\n filter = filter_i\n \n elif header['INSTRUME'] == 'WFPC2':\n filter = header['FILTNAM1']\n else:\n raise KeyError ('Filter keyword not found for instrument {0}'.format(header['INSTRUME']))\n \n return filter.upper()", "def _name(self):\n return self.arguments[0].split('(')[0]", "def groupfilter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"groupfilter\")", "def groupfilter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"groupfilter\")", "def getFilter(self, type: int) -> int:\n ...", "def name(self):\n return self._data.get('name')", "def name(self):\n return self._data.get('name')", "def get_filter_operator(self):\n return self.instrument.get_filter_operator()", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def get_filter_parameters(self):\n if not self.should_filter():\n return []\n\n fields = []\n for filter_backend in self.view.filter_backends:\n fields += self.get_filter_backend_parameters(filter_backend())\n\n return fields", "def get_filter_pillar(filter_name, pillar_key=\"acl\", pillarenv=None, saltenv=None):\n pillar_cfg = _get_pillar_cfg(pillar_key, pillarenv=pillarenv, saltenv=saltenv)\n return _lookup_element(pillar_cfg, filter_name)", "def filterToName(type: int) -> unicode:\n ...", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name\")", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def name(self):\r\n return self.data[\"name\"]", "def get_filters(self, name=False):\n filtfile_list = self.get_value(\"FILTER_LIST\").split(\",\")\n if not name:\n return filtfile_list\n return [io.filterfile_to_filtername(filt) for filt in filtfile_list]", "def fieldName(self):\n return self._field.name", "def _get_name_from_url(self, request):\n\n format = request.GET.get('format', None)\n if not format:\n match = self._format_query_pattern.match(request.path)\n if match and match.group('format'):\n format = match.group('format')\n return format", "def filter_name(self, name):\n return self.form.set_value(\"generating station search\", name)", "def name(self):\n return self.raw_resource[\"name\"]", "def resource_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_name\")" ]
[ "0.79879385", "0.73243344", "0.675019", "0.65612745", "0.6520124", "0.6505116", "0.64178914", "0.6189231", "0.6183338", "0.61544156", "0.6082367", "0.60678583", "0.6023081", "0.60075915", "0.6005047", "0.59691584", "0.59691584", "0.587701", "0.5817631", "0.5786401", "0.5765724", "0.5765724", "0.5761649", "0.57418895", "0.56908816", "0.5627673", "0.5609342", "0.5592728", "0.55813324", "0.554437", "0.55350256", "0.5532371", "0.55322784", "0.55139995", "0.5497845", "0.5473963", "0.5472612", "0.5469973", "0.5469973", "0.5447275", "0.54416734", "0.54359174", "0.54359174", "0.54305613", "0.5412308", "0.5412308", "0.5366126", "0.53659606", "0.535821", "0.5338131", "0.5334175", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.53267664", "0.5316877", "0.5307435", "0.5303865", "0.53007704", "0.5297264", "0.52960736", "0.52817285", "0.5281525" ]
0.6619432
3
Build a set of filters based on the request data. This currently includes only filter exclusion/negation.
def get_request_filters(self): # build the compiled set of all filters requested_filters = OrderedDict() for filter_name, f in self.filters.items(): requested_filters[filter_name] = f # exclusion params exclude_name = '%s!' % filter_name if related(self, exclude_name) in self.data: # deepcopy the *base* filter to prevent copying of model & parent f_copy = copy.deepcopy(self.base_filters[filter_name]) f_copy.parent = f.parent f_copy.model = f.model f_copy.exclude = not f.exclude requested_filters[exclude_name] = f_copy return requested_filters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters", "def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters", "def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters", "def get_filters(self, request, **resources):\r\n filters = dict()\r\n\r\n if not self._meta.fields:\r\n return filters\r\n\r\n for field in request.GET.iterkeys():\r\n tokens = field.split(LOOKUP_SEP)\r\n field_name = tokens[0]\r\n\r\n if not field_name in self._meta.fields:\r\n continue\r\n\r\n exclude = False\r\n if tokens[-1] == 'not':\r\n exclude = True\r\n tokens.pop()\r\n\r\n converter = self._meta.model._meta.get_field(\r\n field_name).to_python if len(tokens) == 1 else lambda v: v\r\n value = map(converter, request.GET.getlist(field))\r\n\r\n if len(value) > 1:\r\n tokens.append('in')\r\n else:\r\n value = value.pop()\r\n\r\n filters[LOOKUP_SEP.join(tokens)] = (value, exclude)\r\n\r\n return filters", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def _build_filters(self, criteria: Q):\n # Decide the function based on the connector type\n func = and_ if criteria.connector == criteria.AND else or_\n params = []\n for child in criteria.children:\n if isinstance(child, Q):\n # Call the function again with the child\n params.append(self._build_filters(child))\n else:\n # Find the lookup class and the key\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n\n # Instantiate the lookup class and get the expression\n lookup = lookup_class(stripped_key, child[1], self.model_cls)\n if criteria.negated:\n params.append(~lookup.as_expression())\n else:\n params.append(lookup.as_expression())\n\n return func(*params)", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def filters(self, **kwargs):\n return config.filters(self._host, self._session, **kwargs)", "def _build_filters(self, criteria: Q):\n composed_query = query.Q()\n\n if criteria.connector == criteria.AND:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query & self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query & ~lookup.as_expression()\n else:\n composed_query = composed_query & lookup.as_expression()\n else:\n for child in criteria.children:\n if isinstance(child, Q):\n composed_query = composed_query | self._build_filters(child)\n else:\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n lookup = lookup_class(stripped_key, child[1])\n if criteria.negated:\n composed_query = composed_query | ~lookup.as_expression()\n else:\n composed_query = composed_query | lookup.as_expression()\n\n return composed_query", "def filter(self, filters):", "def create_filters(date=None, start_date=None, end_date=None,\n distance_min=None, distance_max=None,\n velocity_min=None, velocity_max=None,\n diameter_min=None, diameter_max=None,\n hazardous=None):\n # Using Operator functions to compare the input parameters\n # with passed values.\n filters = []\n if date:\n filters.append(DateFilter(operator.eq, date))\n\n if start_date:\n filters.append(DateFilter(operator.ge, start_date))\n\n if end_date:\n filters.append(DateFilter(operator.le, end_date))\n\n if diameter_min:\n filters.append(DiameterFilter(operator.ge, float(diameter_min)))\n\n if diameter_max:\n filters.append(DiameterFilter(operator.le, diameter_max))\n\n if hazardous is not None:\n filters.append(HazardousFilter(operator.eq, hazardous))\n\n if distance_min:\n filters.append(DistanecFilter(operator.ge, distance_min))\n\n if distance_max:\n filters.append(DistanecFilter(operator.le, distance_max))\n\n if velocity_min:\n filters.append(VelocityFilter(operator.ge, velocity_min))\n\n if velocity_max:\n filters.append(VelocityFilter(operator.le, velocity_max))\n\n return filters", "def _build_query_filters(self, query: dict, filters: list) -> dict:\n\n for filter_tuple in filters:\n if not isinstance(filter_tuple, tuple) or len(filter_tuple) != 3:\n LOG.error(\"polling_filters tuple %s : invalid format or does not contain 3 elements - skipping this filter\", filter_tuple)\n continue\n if isinstance(filter_tuple[2], list) :\n # If \"value\" is a list of values then create a rule (json object) for each \n # value and use \"OR\" condition.\n condition = {'condition': \"OR\",\n 'rules': []}\n for value in filter_tuple[2]:\n rule = {}\n # Prepend fieldname with \"table.\" string\n rule['field'] = f\"table.{filter_tuple[0]}\"\n rule['operator'] = filter_tuple[1]\n rule['value'] = value\n condition['rules'].append(rule)\n query['rules'].append(condition)\n else:\n # Create a single rule for this tuple\n rule = {}\n field_name = f\"table.{filter_tuple[0]}\"\n rule['field'] = field_name\n rule['operator'] = filter_tuple[1]\n rule['value'] = filter_tuple[2]\n query['rules'].append(rule)\n return query", "def std_filters():\n kwargs = {\n \"sentence_filters\":[punctuation_filter],\n \"word_filters\":[small_word_filter, stopword_filter, stemming_filter]\n }\n return kwargs", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def _split_filters(self, filters):\n # specifying ancestor_location returns an ANDFilter and does not have a column name\n # assume that it should go into inner filters\n complex_filters = [f for f in filters if not hasattr(f, 'column_name')]\n simple_filters = [f for f in filters if hasattr(f, 'column_name')]\n inner_filters = [f for f in simple_filters if f.column_name not in self.AGGREGATE_FILTERS]\n outer_filters = [f for f in simple_filters if f.column_name in self.AGGREGATE_FILTERS]\n return {\n 'inner': inner_filters + complex_filters,\n 'outer': outer_filters,\n }", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def _filter(\n self,\n data: List[dict],\n filters: List[Tuple[str, SupportedFilterType]],\n state_dataclass: StateSchema,\n detail: bool,\n ) -> List[dict]:\n filters = _convert_filters_type(filters, state_dataclass)\n result = []\n for datum in data:\n match = True\n for filter_column, filter_predicate, filter_value in filters:\n filterable_columns = state_dataclass.filterable_columns()\n filter_column = filter_column.lower()\n if filter_column not in filterable_columns:\n raise ValueError(\n f\"The given filter column {filter_column} is not supported. \"\n f\"Supported filter columns: {filterable_columns}\"\n )\n\n if filter_column not in datum:\n match = False\n elif filter_predicate == \"=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n # Case insensitive match for string filter values.\n match = datum[filter_column].lower() == filter_value.lower()\n else:\n match = datum[filter_column] == filter_value\n elif filter_predicate == \"!=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n match = datum[filter_column].lower() != filter_value.lower()\n else:\n match = datum[filter_column] != filter_value\n else:\n raise ValueError(\n f\"Unsupported filter predicate {filter_predicate} is given. \"\n \"Available predicates: =, !=.\"\n )\n\n if not match:\n break\n\n if match:\n result.append(filter_fields(datum, state_dataclass, detail))\n return result", "def process_filters(self, filters, queryset, view):\n return filters", "def build_filters(self, view, filters=None):\n query_builder = self.get_query_builder(backend=self, view=view)\n return query_builder.build_query(**(filters if filters else {}))", "def get_filters(self):", "def condition_filters(self):\r\n return filters.Filters(self)", "def create_filters(date=None, start_date=None, end_date=None,\n distance_min=None, distance_max=None,\n velocity_min=None, velocity_max=None,\n diameter_min=None, diameter_max=None,\n hazardous=None):\n filters = list()\n\n if date is not None:\n filters.append(DateFilter(operator.eq, date))\n if start_date is not None:\n filters.append(DateFilter(operator.ge, start_date))\n if end_date is not None:\n filters.append(DateFilter(operator.le, end_date))\n if distance_min is not None:\n filters.append(DistanceFilter(operator.ge, distance_min))\n if distance_max is not None:\n filters.append(DistanceFilter(operator.le, distance_max))\n if velocity_min is not None:\n filters.append(VelocityFilter(operator.ge, velocity_min))\n if velocity_max is not None:\n filters.append(VelocityFilter(operator.le, velocity_max))\n if diameter_min is not None:\n filters.append(DiameterFilter(operator.ge, diameter_min))\n if diameter_max is not None:\n filters.append(DiameterFilter(operator.le, diameter_max))\n if hazardous is not None:\n filters.append(HazardousFilter(operator.eq, hazardous))\n\n return tuple(filters)", "def build_filters(self, filters = None):\n if filters is None:\n filters = {}\n \n orm_filters = super(UserResource, self).build_filters(filters)\n \n if \"area\" in filters:\n area_id = filters['area']\n area = Area.objects.get(id = area_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentArea = area)]\n \n elif \"environment\" in filters:\n environment_id = filters['environment']\n environment = Environment.objects.get(id = environment_id)\n \n #checked_in_user_profiles = [user_ctx.user for user_ctx in UserContext.objects.filter(currentArea = area)]\n orm_filters[\"pk__in\"] = [user_ctx.user.pk \n for user_ctx in UserContext.objects.filter(currentEnvironment = environment)]\n \n return orm_filters", "def filter_queryset(self, request, queryset, view):\n applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request))\n return self.apply_filters(\n queryset=queryset,\n applicable_filters=self.process_filters(applicable_filters, queryset, view),\n applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view)\n )", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def get_filters(self):\n location_id = self.cleaned_data.get('location_id')\n if (\n location_id\n and user_can_access_location_id(self.domain, self.user, location_id)\n ):\n location_ids = [location_id]\n else:\n location_ids = []\n\n filters = {\n 'location_ids': location_ids,\n 'selected_location_only': self.cleaned_data.get('selected_location_only', False)\n }\n location_status_active = self.cleaned_data.get('location_status_active', None)\n\n if location_status_active is not None:\n filters['is_archived'] = (not location_status_active)\n\n return filters", "def filter(self, filters):\r\n # because http.Request needs params to be a dict of strings to strings\r\n # (roughly) and since BitBucket wants repeated parameters to express\r\n # OR, we'll do the quoting by hand ourselves\r\n def flatten_conditions(filters):\r\n for key, val in filters.items():\r\n if isinstance(val, (list, tuple)):\r\n for v in val:\r\n yield (port.to_b(key), port.to_b(v))\r\n else:\r\n yield (port.to_b(key), port.to_b(val))\r\n\r\n to_encode = tuple(flatten_conditions(filters))\r\n qs = port.urlencode(to_encode)\r\n\r\n url = '{0}/?{1}'.format(self.get_url(), qs)\r\n return http.Request('GET', url), parsers.parse_json", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def get_filters(**params):\n filters = {}\n\n for column, data in params.items():\n if data is not None:\n if isinstance(data, six.string_types):\n f_type, value = _extract_filter_type_and_value(data)\n\n create_or_update_filter(column, value, f_type, filters)\n else:\n create_or_update_filter(column, data, _filter=filters)\n\n return filters", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n if applicable_filters:\n queryset = queryset.filter(applicable_filters)\n if applicable_exclusions:\n queryset = queryset.exclude(applicable_exclusions)\n return queryset", "def group_filters(self, per_page=None, page=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'filters')\r\n return http.Request('GET', url, params), parsers.parse_json", "def get_filter(cls, filter, odata=False):\n\n if filter:\n #www.odata.org/libraries\n if odata:\n lst_filter = []\n if 'and' in filter:\n tmp_filters = filter.split('and')\n else:\n tmp_filters = [filter, ]\n for tmp_filter in tmp_filters:\n if 'eq' in tmp_filter:\n tmp_filter = tmp_filter.replace('eq', '=')\n elif 'gt' in tmp_filter:\n tmp_filter = tmp_filter.raplace('gt', '>')\n elif 'lt' in tmp_filter:\n tmp_filter = tmp_filter.replace('lt', '>')\n lst_filter.append(tmp_filter.split())\n return lst_filter\n else:\n dict_filter = {}\n for lst_attribut in filter.split(','):\n attribut = lst_attribut.split(':')\n if \"/\" in attribut[1]:\n dict_filter[attribut[0]] = attribut[1].split('/')\n else:\n if attribut[1] == 'false':\n dict_filter[attribut[0]] = False\n elif attribut[1] == 'true':\n dict_filter[attribut[0]] = True\n else:\n dict_filter[attribut[0]] = attribut[1]\n return dict_filter\n return False", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def _create_filter_object(form_data: Dict) -> Q:\n filter_object = Q(title__icontains=form_data[\"title\"])\n filter_object &= Q(author__icontains=form_data[\"author\"])\n filter_object &= Q(\n publication_language__icontains=form_data[\"publication_language\"]\n )\n if form_data[\"publication_date_start\"]:\n filter_object &= Q(\n publication_date__gte=form_data[\"publication_date_start\"]\n )\n if form_data[\"publication_date_end\"]:\n filter_object &= Q(publication_date__lte=form_data[\"publication_date_end\"])\n return filter_object", "def build_query_filters(**kwargs):\n queryTarget = kwargs.get(\"queryTarget\", None)\n targetSubtreeClass = kwargs.get(\"targetSubtreeClass\", None)\n queryTargetFilter = kwargs.get(\"queryTargetFilter\", None)\n rspSubtree = kwargs.get(\"rspSubtree\", None)\n rspSubtreeClass = kwargs.get(\"rspSubtreeClass\", None)\n rspSubtreeInclude = kwargs.get(\"rspSubtreeInclude\", None)\n rspPropInclude = kwargs.get(\"rspPropInclude\", None)\n orderBy = kwargs.get(\"orderBy\", None)\n opts = \"\"\n if queryTarget is not None:\n opts+= \"&query-target=%s\" % queryTarget\n if targetSubtreeClass is not None:\n opts+= \"&target-subtree-class=%s\" % targetSubtreeClass\n if queryTargetFilter is not None:\n opts+= \"&query-target-filter=%s\" % queryTargetFilter\n if rspSubtree is not None:\n opts+= \"&rsp-subtree=%s\" % rspSubtree\n if rspSubtreeClass is not None:\n opts+= \"&rsp-subtree-class=%s\" % rspSubtreeClass\n if rspSubtreeInclude is not None:\n opts+= \"&rsp-subtree-include=%s\" % rspSubtreeInclude\n if rspPropInclude is not None:\n opts+= \"&rsp-prop-include=%s\" % rspPropInclude\n if orderBy is not None:\n opts+= \"&order-by=%s\" % orderBy\n\n if len(opts)>0: opts = \"?%s\" % opts.strip(\"&\")\n return opts", "def get_filters():\n \n \"\"\"\"\"\"\"\"\n \n \"\"\"Messeges to genrate filters\"\"\"\n\tnote_messege = 'In this project, we make use of Python to explore data related to bike share systems for three major cities in the United States\\n'\n welcome_messege = 'Hello! Let\\'s explore some US bikeshare data!\\n'\n enter_city_name_messege = 'Which city would you like to filter by? Chicago, New York City or Washington? '\n filter_definition_messege = '\\nWould you like to filter the data by - \\n1. Month\\n2. Day\\n3. Both\\n4. No Filter\\n\\nPlease choose the appropriate filter name.\\nNote: Incorrect filter name will result as \\'no filter selected\\' by the user.\\n'\n enter_filter_messege = 'Desired filter (e.g: Month, Day, Both or No Filter): '\n enter_month_name_messege = 'Enter month name (e.g: january, february, march, april, may or june): '\n enter_day_name_messege = 'Enter day of the week (e.g: monday, tuesday, wednesday, thursday, friday, saturday, sunday): '\n exception_messege = '\\nWarning! That is not a valid input.\\n'\n warning_city_name_messege = '\\nWarning! Invalid city name. Select city name from the following cities only - Chicago, New York City or Washington.' \n warning_month_name_messege = '\\nWarning! Invalid month name. Select month name from the following months only - january, february, march, april, may or june'\n warning_day_name_messege = '\\nWarning! Invalid day name. Select day name from the following days only - monday, tuesday, wednesday, thursday, friday, saturday, sunday'\n \"\"\"\"\"\"\"\"\n \n \"\"\"City, Month and Day List\"\"\"\n city_list = ['chicago', 'new york city', 'washington']\n month_list = ['january', 'february', 'march', 'april', 'may', 'june']\n day_list = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n \"\"\"\"\"\"\"\"\n \n\tprint(note_messege)\n print(welcome_messege)\n \n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs \n while True:\n try:\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n while city.lower() not in city_list:\n while True:\n try: \n print(warning_city_name_messege)\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n print(filter_definition_messege)\n while True:\n try:\n filter_choice = input(enter_filter_messege)\n break\n except:\n print(exception_messege)\n while True: \n if filter_choice.lower() == 'month':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n day = 'all'\n break\n \n elif filter_choice.lower() == 'day':\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday) \n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n month = 'all'\n break\n \n elif filter_choice.lower() == 'both':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege)\n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n break\n \n else:\n month = 'all'\n day = 'all'\n break\n \n\n print('-'*40)\n return city.lower(), month.lower(), day.lower()", "def filter(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n from jetengine.query_builder.transform import validate_fields\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QNot, QCombination)):\n if self._filters:\n self._filters = self._filters & arguments[0]\n else:\n self._filters = arguments[0]\n else:\n validate_fields(self.__klass__, kwargs)\n if self._filters:\n self._filters = self._filters & Q(**kwargs)\n else:\n if arguments and len(arguments) == 1 and isinstance(arguments[0], dict):\n self._filters = Q(arguments[0])\n else:\n self._filters = Q(**kwargs)\n\n return self", "def filter(self, request):\n try:\n columns = dict(request.data.iterlists())['columns']\n except AttributeError:\n columns = request.data['columns']\n return self._get_filtered_results(request, columns=columns)", "def _build_filter_part(self, cls, filters, order_by=None, select=None):\r\n import types\r\n query_parts = []\r\n\r\n order_by_filtered = False\r\n\r\n if order_by:\r\n if order_by[0] == \"-\":\r\n order_by_method = \"DESC\";\r\n order_by = order_by[1:]\r\n else:\r\n order_by_method = \"ASC\";\r\n\r\n if select:\r\n if order_by and order_by in select:\r\n order_by_filtered = True\r\n query_parts.append(\"(%s)\" % select)\r\n\r\n if isinstance(filters, str) or isinstance(filters, unicode):\r\n query = \"WHERE %s AND `__type__` = '%s'\" % (filters, cls.__name__)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n query += \" ORDER BY itemName() %s\" % order_by_method\r\n elif order_by != None:\r\n query += \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n return query\r\n\r\n for filter in filters:\r\n filter_parts = []\r\n filter_props = filter[0]\r\n if type(filter_props) != list:\r\n filter_props = [filter_props]\r\n for filter_prop in filter_props:\r\n (name, op) = filter_prop.strip().split(\" \", 1)\r\n value = filter[1]\r\n property = cls.find_property(name)\r\n if name == order_by:\r\n order_by_filtered = True\r\n if types.TypeType(value) == types.ListType:\r\n filter_parts_sub = []\r\n for val in value:\r\n val = self.encode_value(property, val)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts_sub.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts_sub.append(self._build_filter(property, name, op, val))\r\n filter_parts.append(\"(%s)\" % (\" OR \".join(filter_parts_sub)))\r\n else:\r\n val = self.encode_value(property, value)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts.append(self._build_filter(property, name, op, val))\r\n query_parts.append(\"(%s)\" % (\" or \".join(filter_parts)))\r\n\r\n\r\n type_query = \"(`__type__` = '%s'\" % cls.__name__\r\n for subclass in self._get_all_decendents(cls).keys():\r\n type_query += \" or `__type__` = '%s'\" % subclass\r\n type_query +=\")\"\r\n query_parts.append(type_query)\r\n\r\n order_by_query = \"\"\r\n\r\n if order_by:\r\n if not order_by_filtered:\r\n query_parts.append(\"`%s` LIKE '%%'\" % order_by)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n order_by_query = \" ORDER BY itemName() %s\" % order_by_method\r\n else:\r\n order_by_query = \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n\r\n if len(query_parts) > 0:\r\n return \"WHERE %s %s\" % (\" AND \".join(query_parts), order_by_query)\r\n else:\r\n return \"\"", "def get_filters() -> List[Tuple[str, Callable]]:\n return [\n ('group_files', group_files),\n ('timesince', timesince),\n ('just_updated', just_updated),\n ('get_category_name', get_category_name),\n ('process_status_display', process_status_display),\n ('compilation_status_display', compilation_status_display),\n ('duration', duration),\n ('tidy_filesize', tidy_filesize),\n ('asdict', asdict),\n ('compilation_log_display', compilation_log_display)\n ]", "def _get_filter(self, args):\n\n # Create the filters list\n filter_list = []\n \n # If we want to record all requests, add the file logger filter\n if args.record:\n filter_list.append(filters.StoreLoggerFilter(args.url))\n\n # Add the whitelist filter\n wl_filter = filters.WhitelistedSiteFilter(args.url)\n filter_list.append(wl_filter)\n\n # Create the ACL filter that filters all requests from devices\n acl_filter = filters.DeviceACLFilter(filter_list, args.url)\n\n return acl_filter", "def build_filters(self, filters=None):\n\n if filters is None:\n filters = {}\n\n orm_filters = super(EmployeeResource, self).build_filters(filters)\n\n if 'role' in filters:\n ids = (Employee.by_assignment_role(filters['role'])\n .values_list('id', flat=True))\n orm_filters['pk__in'] = ids\n\n return orm_filters", "def buildReport(cls, queryList):\n boxList = list()\n for dslString,filterList in queryList:\n data = cls.__dataRequest(dslString[0])\n if data != '{}':\n for filter in filterList:\n try:\n if filter:\n filterObj = filter()\n filterObj.loadData(data)\n boxList.extend(filterObj.createBoxList())\n except Exception as e:\n devLogger.error(\"Could not create Filter object: \" + str(e))\n return boxList", "def parse_queryset_filters(filters):\n OP_MAP = [\n (\">=\", \"__gte\"),\n (\"=>\", \"__gte\"),\n (\">\", \"__gt\"),\n (\"<=\", \"__lte\"),\n (\"=<\", \"__lte\"),\n (\"<\", \"__lt\"),\n (\"=\", \"\"),\n ]\n\n filter_dict = {}\n exclude_dict = {}\n for filter_str in filters.split(\",\"):\n if \"!=\" in filter_str:\n key, val = filter_str.split(\"!=\")\n exclude_dict[key] = parse_bool(val, strict=False)\n continue\n for op, new_op in OP_MAP:\n if op in filter_str:\n key, val = filter_str.split(op)\n filter_dict[key + new_op] = parse_bool(val, strict=False)\n break\n else:\n raise ValueError(\"Unknown filter expression: %s\" % filter_str)\n\n return (filter_dict, exclude_dict)", "def filters(self):\n return self.__filters", "def handle_filters(filters: Optional[List[str]]) -> List[Dict[str, Any]]:\n filters_to_send = []\n for filter_ in filters or []:\n split_filter = filter_.split('=')\n if len(split_filter) != 2 or not split_filter[0] or not split_filter[1]:\n raise DemistoException('Filters should be in the format of \"filtername1=filtervalue1,filtername2=filtervalue2\". '\n f'The filter \"{filter_}\" doesn\\'t meet this requirement.')\n filters_to_send.append({'name': split_filter[0],\n 'operator': '=',\n 'value': split_filter[1]})\n return filters_to_send", "def create_filter_query(self, collection_name: str, field: str, filter_type: str, filter_values: Union[List[str], str]=None):\n if filter_type == 'contains':\n # return [{'field' : field, 'filter_type' : 'contains', \"condition\":\"==\", \"condition_value\": filter_values}]\n return [{'field': field, 'filter_type': 'regexp', 'condition': '==', 'condition_value': '.*' + str(filter_values) + '.*'}]\n if filter_type == 'exact_match':\n return [{'field' : field, 'filter_type' : 'exact_match', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'categories':\n return [{'field' : field, 'filter_type' : 'categories', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'exists':\n if filter_values is None or filter_values == '==':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"==\", \"condition_value\":\" \"}]\n elif filter_values == '!=':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"!=\", \"condition_value\":\" \"}]\n if filter_type == '<=' or filter_type == '>=' or filter_type == '>' or filter_type == '<' or filter_type == '==':\n if self.collection_schema(collection_name)[field] == 'date':\n return [{'field' : field, 'filter_type' : 'date', \"condition\":filter_type, \"condition_value\": filter_values}]\n elif self.collection_schema(collection_name)[field] == 'numeric':\n return [{'field' : field, 'filter_type' : 'numeric', \"condition\":filter_type, \"condition_value\":filter_values}]\n else:\n raise ValueError(f\"{filter_type} has not been defined. Please choose one of contains/exact_match/exists/categories/>=/<=/>/<.\")", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n for field, options in applicable_filters[\"field_facets\"].items():\n queryset = queryset.facet(field, **options)\n\n for field, options in applicable_filters[\"date_facets\"].items():\n queryset = queryset.date_facet(field, **options)\n\n for field, options in applicable_filters[\"query_facets\"].items():\n queryset = queryset.query_facet(field, **options)\n\n return queryset", "def _set_runtime_filters(self):\n runtime_filters = []\n if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):\n raise TypeError(\n '%s: filters must be a sequence of tuple with length=3'\n ' got %r instead' % (self.__class__.__name__, self.filters))\n\n for filter_type, filter_operator, filter_value in self.filters:\n if isinstance(filter_type, ValueProvider):\n filter_type = filter_type.get()\n if isinstance(filter_operator, ValueProvider):\n filter_operator = filter_operator.get()\n if isinstance(filter_value, ValueProvider):\n filter_value = filter_value.get()\n runtime_filters.append((filter_type, filter_operator, filter_value))\n\n return runtime_filters or ()", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def default_filters(self, query) -> object:\n user = self.request.user\n model = self.model\n custom_filter = self.request.params.get('_custom_filter')\n if 'g:professionals' in user.groups and custom_filter == 'pool':\n # disable security for this custom filter\n self.enable_security = False\n professional = Professional.get(user.id)\n pool_ids = [item.id for item in professional.pools]\n query = query.filter(\n model.pool_id.in_(pool_ids),\n model.state == 'published'\n )\n elif custom_filter == 'late_first_submission':\n config_delta = timedelta(seconds=int(LATE_SUBMISSION_SECONDS))\n date_limit = datetime_utcnow() - config_delta\n query = query.filter(\n model.scheduled_datetime <= date_limit,\n model.state == 'awaiting_assets',\n model.last_approval_date.is_(None)\n )\n elif custom_filter == 'late_re_submission':\n config_delta = timedelta(seconds=int(LATE_SUBMISSION_SECONDS))\n date_limit = datetime_utcnow() - config_delta\n query = query.filter(\n model.last_approval_date <= date_limit,\n model.state == 'awaiting_assets',\n model.submission_path.isnot(None),\n )\n return query", "def filters(self):\n return self._filters", "def filter_by_query_params(self, request):\n items = self\n company = request.GET.get('company', None)\n main_contractor = request.GET.get('main_contractor', None)\n main_sub_contractor = request.GET.get('main_sub_contractor', None)\n client = request.GET.get('client', None)\n q = request.GET.get('q', None)\n sort_by = request.GET.get('sort_by', None)\n str = request.GET.get('str', None)\n\n # filter\n if main_contractor:\n items = items.filter(main_contractor=main_contractor).distinct()\n if main_sub_contractor:\n items = items.filter(main_sub_contractor=main_sub_contractor).distinct()\n if client:\n items = items.filter(client=client).distinct()\n if company:\n items = items.filter(companies_linked__in=[company]).distinct()\n # sort\n if q == 'asc' and sort_by:\n items = items.order_by(sort_by).distinct()\n\n if q == 'des' and sort_by:\n items = items.order_by('-' + sort_by).distinct()\n\n if str:\n # str = str.strip().lower()\n items = items.filter(Q(reference_no__icontains=str) |\n Q(name__icontains=str)).distinct()\n return items", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def GetFilters(self, filt_defs):\n # The artifact isn't actually used for anything, it's just required to\n # initialize handlers.\n probe = rdfvalue.Probe(artifact=\"Data\", filters=filt_defs)\n return probe.filters", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def filters():\n states = list(storage.all('State').values())\n states.sort(key=lambda state: state.name)\n cities = list(storage.all('City').values())\n cities.sort(key=lambda city: city.name)\n amenities = list(storage.all('Amenity').values())\n amenities.sort(key=lambda amenity: amenity.name)\n\n return render_template('10-hbnb_filters.html', states=states,\n cities=cities, amenities=amenities)", "def filter(self, *args, **kwargs):\n clone = self._clone()\n for f in args:\n clone.filter_obj.add_filter(f)\n for key, value in kwargs.items():\n clone.filter_obj.add_filter_param(key, value)\n return clone", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n customer = request.GET.get('customer', None)\n company = request.GET.get('company', None)\n engineer = request.GET.get('engineer', None)\n q = request.GET.get('q', None)\n sort_by = request.GET.get('sort_by', None)\n str = request.GET.get('str', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if engineer:\n items = items.filter(lead__sales_engineer=engineer).distinct()\n if customer:\n items = items.filter(lead__customer=customer).distinct()\n if company:\n items = items.filter(company=company).distinct()\n # sort\n if q == 'asc' and sort_by:\n items = items.order_by(sort_by).distinct()\n\n if q == 'des' and sort_by:\n items = items.order_by('-' + sort_by).distinct()\n\n if str:\n # str = str.strip().lower()\n items = items.filter(Q(reference_no__icontains=str) |\n Q(erp_reference__icontains=str)).distinct()\n return items", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n quote = request.GET.get('quote', None)\n added_by = request.GET.get('added_by', None)\n customer = request.GET.get('customer', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if customer:\n items = items.filter(customer=customer).distinct()\n if quote:\n items = items.filter(quote=quote).distinct()\n if added_by:\n items = items.filter(added_by=added_by).distinct()\n\n return items", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n customer = request.GET.get('customer', None)\n quote = request.GET.get('quote', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if customer:\n items = items.filter(customer=customer).distinct()\n if quote:\n items = items.filter(quote=quote).distinct()\n\n return items", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def filters(self):\n\t\treturn self.local_filter", "def get_queryset(self):\n\n qs = super().get_queryset() # get company specific queryset\n\n filters = dict(self.request.GET.lists()) # dictionary of lists\n\n # pull out order_by and order\n order_by = filters.pop(\"order_by\", None)\n order = filters.pop(\"order\", None)\n\n # Ordering by JSON field taken from\n # https://stackoverflow.com/questions/36641759/django-1-9-jsonfield-order-by\n # Jan 2, 2018\n\n if order_by:\n if order:\n pass\n # TODO: Figure out what can be done for ordering...\n\n else:\n qs = qs.order_by(\"-id\") # default to descending id order\n\n for exp_filter in filters:\n try:\n qs = self.FILTERS[exp_filter](qs, filters[exp_filter])\n except KeyError:\n pass\n # do nothing if not a filter\n\n return qs", "def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def get_filters(self) -> dict:\n return self._filters", "def build_query_structure(self):\n query_list = list()\n filter_list = list()\n for key, val in self.q_dict.items():\n if key in self.es_query_keys:\n query_list.append(\n {\"match\": {\".\".join(key.split(\"_\")): val[0]}})\n elif key in self.es_date_keys:\n filter_list.append(\n {\"range\": {\".\".join(key.split(\"_\")): val}})\n elif \":\" in val[0]:\n #for handling queries like dd_dct=gte:1\n range_val = val[0].split(\":\")\n filter_list.append({\"range\": {\".\".join(key.split(\"_\")): {\n range_val[0]: int(range_val[1])}}})\n else:\n filter_list.append(\n {\"terms\": {\".\".join(key.split(\"_\")): val}})\n return query_list, filter_list", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f", "def get_filters(config, all_statuses, all_projects):\n filtered_statuses = [status['id'] for status in all_statuses if status['name'] in\n config['status_filter']]\n status_id_filter_str = '!' + '|'.join(str(status_id) for status_id in filtered_statuses)\n \n # filter by project\n filtered_projects = [project['id'] for project in all_projects if project['name'] in\n config['project_filter']]\n \n # transform list of dicts format\n # {'Remote': '0'} -> {'name': 'Remote', 'value': '0'}\n # this is an inclusive filter\n included_custom_fields = list()\n for custom_field in config['custom_field_filter']:\n for key in custom_field.keys():\n included_custom_fields.append({'name': key, 'value': custom_field[key]})\n\n return {'status_id_filter_str': status_id_filter_str,\n 'filtered_projects': filtered_projects,\n 'included_custom_fields': included_custom_fields}", "def _build_query(self, types=(), paths=(), depth=None, query=None, filterPermissions=True, globFilters=None):\n available_indexes = self.model_catalog.searcher.get_indexes()\n not_indexed_user_filters = {} # Filters that use not indexed fields\n\n user_filters_query = None\n types_query = None\n paths_query = None\n permissions_query = None\n\n partial_queries = []\n\n if query:\n \"\"\"\n # if query is a dict, we convert it to AdvancedQuery\n # @TODO We should make the default query something other than AdvancedQuery\n subqueries = []\n if isinstance(query, dict):\n for attr, value in query.iteritems():\n if isinstance(value, str) and '*' in value:\n subqueries.append(MatchGlob(attr, value))\n else:\n subqueries.append(Eq(attr, value))\n query = And(*subqueries)\n partial_queries.append(query)\n \"\"\"\n partial_queries.append(self._parse_user_query(query))\n\n # Build query from filters passed by user\n if globFilters:\n for key, value in globFilters.iteritems():\n if key in available_indexes:\n if user_filters_query:\n user_filters_query = And(query, MatchRegexp(key, '*%s*' % value))\n else:\n user_filters_query = MatchRegexp(key, '*%s*' % value)\n else:\n not_indexed_user_filters[key] = value\n\n if user_filters_query:\n partial_queries.append(user_filters_query)\n\n # Build the objectImplements query\n if not isinstance(types, (tuple, list)):\n types = (types,)\n types_query_list = [ Eq('objectImplements', dottedname(t)) for t in types ]\n if types_query_list:\n if len(types_query_list) > 1:\n types_query = Or(*types_query_list)\n else:\n types_query = types_query_list[0]\n\n partial_queries.append(types_query)\n\n # Build query for paths\n if paths is not False: # When paths is False we dont add any path condition\n if not paths:\n paths = ('/'.join(self.context.getPhysicalPath()) + '*', )\n elif isinstance(paths, basestring):\n paths = (paths,)\n\n \"\"\" OLD CODE. Why this instead of In? What do we need depth for?\n q = {'query':paths}\n if depth is not None:\n q['depth'] = depth\n paths_query = Generic('path', q)\n \"\"\"\n paths_query = In('path', paths)\n partial_queries.append(paths_query)\n\n # filter based on permissions\n if filterPermissions and allowedRolesAndGroups(self.context):\n permissions_query = In('allowedRolesAndUsers', allowedRolesAndGroups(self.context))\n partial_queries.append(permissions_query)\n\n # Put together all queries\n search_query = And(*partial_queries)\n return (search_query, not_indexed_user_filters)", "def get_timeline_filters(self, req):", "def get_queryset(self):\n\n fl = self.request.QUERY_PARAMS.get('fl', None)\n win = self.request.QUERY_PARAMS.get('win', None)\n win_filter = Q(orderstatus__in=['Shipped', 'Unshipped', 'Processing'], fulfillmentchannel='MFN')\n #win_filter = Q(orderstatus__in=['Unshipped', 'Processing'], fulfillmentchannel='MFN')\n\n queryset = None\n if fl is not None and fl.isdigit():\n logger.info(\"Got filter id: %s\", fl)\n try:\n filter = Filter.objects.get(pk=int(fl))\n if filter:\n ancestor_logic = Q() #Create Q object to hold other query\n #If filter is only root node\n if filter.is_root_node():\n ancestor_logic = pickle.loads(filter.logic) #Deserilize the filter logic\n logger.info(\"Filter has only root node, Logic: %s\", ancestor_logic)\n\n #If filter has parents\n else:\n for filter_data in filter.get_ancestors(False, True): #Get all parents including self\n filter_logic = pickle.loads(filter_data.logic) #Deserilize the filter logic\n if ancestor_logic.__len__()==0:\n ancestor_logic = filter_logic\n else:\n ancestor_logic = ancestor_logic & filter_logic\n logger.info(\"Filter has parents, Logic: %s\", ancestor_logic)\n\n if ancestor_logic:\n queryset = AmazonOrders.objects.filter(ancestor_logic & win_filter) #pass the query object to filter\n logger.info(\"Filter query, Query: %s\", queryset.query)\n\n except Exception as e:\n logger.error(\"In queryset exception : %s\",e)\n elif win is not None:\n logic = None\n if win == 'AFN':\n logic = Q(fulfillmentchannel=win, orderstatus='Shipped')\n elif win in ['Pending', 'Canceled']:\n logic = Q(orderstatus=win)\n elif win == 'Unshipped':\n logic = Q(orderstatus=win)\n\n if logic:\n queryset = AmazonOrders.objects.filter(logic)\n logger.info(\"Win query, Query: %s\", queryset.query)\n\n else:\n #queryset = AmazonOrders.objects.all()\n queryset = AmazonOrders.objects.filter(win_filter)\n logger.info(\"Filter not passed, Processing full Query: %s\", queryset.query)\n\n return queryset", "def get_filter_args(filters, disallow_if_not_in_search=True):\n filters.clear_filters()\n request_args = set(request.args)\n for arg in request_args:\n re_match = re.findall(r\"_flt_(\\d)_(.*)\", arg)\n if not re_match:\n continue\n filter_index = int(re_match[0][0])\n filter_column = re_match[0][1]\n if (\n filter_column not in filters.get_search_filters().keys()\n and disallow_if_not_in_search\n ):\n log.warning(\"Filter column not allowed\")\n continue\n filters.add_filter_index(filter_column, filter_index, request.args.getlist(arg))", "def build_ip_filters(self):\n if is_list_empty(self.data['ipfilter']['myfilter']) is False:\n for item in self.data['ipfilter']['myfilter']:\n self.cidr_filter_list.append(item)\n else:\n logger.warning(\n \"my filter field is empty in the given input file , rules for the same will not be created in \"\n \"Nginx configuration\")", "def filter_queryset(self,queryset):\n filters = {}\n for backend in list(self.filter_backends):\n backendobj = backend()\n queryset = backendobj.filter_queryset(self.request, queryset, self)\n if hasattr(backendobj,'get_applied_filters'):\n filters.update(backendobj.get_applied_filters())\n self. applied_filters = OrderedDict()\n for key,value in filters.items():\n if isinstance(value,datetime.datetime):\n self.applied_filters[key]=value\n del filters[key]\n self.applied_filters.update(sorted(filters.items(),key=itemgetter(1),reverse=True))\n return queryset", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def FS_filter(self, at_data, *args, **kwargs) -> dict:\n\n b_status : bool = True\n l_file : list = []\n l_dirHits : list = []\n l_dir : list = []\n str_path : str = at_data[0]\n al_file : list = at_data[1]\n\n if len(self.args['fileFilter']):\n if self.args['fileFilterLogic'].upper() == 'OR':\n al_file = [x \\\n for y in self.args['fileFilter'].split(',') \\\n for x in al_file if y in x]\n else:\n for y in self.args['fileFilter'].split(','):\n al_file = [x for x in al_file if y in x]\n\n if len(self.args['dirFilter']):\n l_dirHits = [str_path \\\n for y in self.args['dirFilter'].split(',') \\\n if y in str_path]\n if self.args['dirFilterLogic'].upper() == 'AND':\n if len(l_dirHits) == len(self.args['dirFilter'].split(',')):\n for y in self.args['dirFilter'].split(','):\n l_dirHits = [x for x in l_dirHits if y in x]\n else:\n l_dirHits = []\n if len(l_dirHits):\n # Remove any duplicates in the l_dirHits: duplicates can occur\n # if the tokens in the filter expression map more than once\n # into the leaf node in the <str_path>, as a path that is\n #\n # /some/dir/in/the/space/1234567\n #\n # and a search filter on the dirspace of \"123,567\"\n [l_dir.append(x) for x in l_dirHits if x not in l_dir]\n else:\n # If no dir hits for this dir, then we zero out the\n # file filter\n al_file = []\n\n if len(al_file):\n al_file.sort()\n l_file = al_file\n b_status = True\n else:\n self.dp.qprint( \"No valid files to analyze found in path %s!\" %\n str_path, comms = 'warn', level = 5)\n l_file = None\n b_status = False\n return {\n 'status': b_status,\n 'l_file': l_file\n }", "def _getQuery(self, request):\n q = Conference.query()\n inequality_filter, filters = self._formatFilters(request.filters)\n\n # If exists, sort on inequality filter first\n if not inequality_filter:\n q = q.order(Conference.name)\n else:\n q = q.order(ndb.GenericProperty(inequality_filter))\n q = q.order(Conference.name)\n\n for filtr in filters:\n if filtr[\"field\"] in [\"month\", \"maxAttendees\"]:\n filtr[\"value\"] = int(filtr[\"value\"])\n formatted_query = ndb.query.FilterNode(filtr[\"field\"], filtr[\"operator\"], filtr[\"value\"])\n q = q.filter(formatted_query)\n return q", "def get_filters(self, saving):\n self.filter_entry_dict.clear()\n\n for entry, var in self.filter_entries_list:\n if (entry.get() != \"\") and (var.get() != \"\") and (not saving):\n self.filter_entry_dict[var.get()] = entry.get()\n elif saving and var.get() != \"\":\n self.filter_entry_dict[var.get()] = entry.get()", "def curate_filter_info(self):\n filter_list = [\n self.sample_name, self.final_id, self.all_variant_count,\n self.filter_min_depth_count, self.filter_max_depth_count,\n self.filter_common_var_count, self.log_mut_count,\n self.cosmic_variant_counts, self.unknown_maf_count\n ]\n return filter_list", "def get_contract_filters(*contracts):\n return [generate_filter(filter_text) for filter_text in contracts]", "def apply_queryset_rules(self, qs):\n clauses = {\n 'filter': [],\n 'exclude': []}\n\n for rule in self.drip_model.queryset_rules.all():\n\n clause = clauses.get(rule.method_type, clauses['filter'])\n\n kwargs = rule.filter_kwargs(qs, now=self.now)\n clause.append(Q(**kwargs))\n\n qs = rule.apply_any_annotation(qs)\n\n if clauses['exclude']:\n qs = qs.exclude(functools.reduce(operator.or_, clauses['exclude']))\n qs = qs.filter(*clauses['filter'])\n\n return qs", "def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]", "async def getQueryFilters(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getQueryFilters()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/query-options/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/query-options/\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def _apply_filters(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return False\n if not all(keyword in metadata.keywords for keyword in self.filters[\"keywords\"]):\n return False\n if \"features\" in self.filters:\n if not metadata.features:\n return False\n if not all(feature in metadata.features for feature in self.filters[\"features\"]):\n return False\n if \"authors\" in self.filters:\n if not metadata.authors:\n return False\n if not all(author in metadata.authors for author in self.filters[\"authors\"]):\n return False\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return False\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return False\n return True", "def create_filters(id=None, title=None, category=None, priority=None,\n status=None, place=None, description=None, name=None):\n\n filters = {}\n if id:\n filters['id'] = id\n if title:\n filters['title'] = title\n if category:\n filters['category'] = category\n if priority:\n filters['priority'] = priority\n if status:\n filters['status'] = status\n if place:\n filters['place'] = place\n if description:\n filters['description'] = description\n if name:\n filters['name'] = name\n return filters", "def _getQuery(self, request):\n q = Conference.query()\n inequality_filter, filters = self._formatFilters(request.filters)\n # If exists, sort on inequality filter first\n if not inequality_filter:\n q = q.order(Conference.name)\n else:\n q = q.order(ndb.GenericProperty(inequality_filter))\n q = q.order(Conference.name)\n for filtr in filters:\n if filtr[\"field\"] in [\"month\", \"maxAttendees\"]:\n try:\n filtr[\"value\"] = int(filtr[\"value\"])\n except ValueError:\n raise endpoints.BadRequestException(\n \"Non-integer in integer field.\")\n formatted_query = ndb.query.FilterNode(\n filtr[\"field\"], filtr[\"operator\"], filtr[\"value\"])\n q = q.filter(formatted_query)\n return q", "def apply_search_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params['term'] = k\n params['query'] = demisto.getArg(k)\n break\n return params", "def _build_filter_set(self, column_config_name, service_name=None, **filters):\n\n if not service_name:\n service_name = column_config_name\n\n if not self._column_configs.get(service_name):\n self._get_col_config(service_name, fetch_name=column_config_name)\n\n caomColConfig = self._column_configs[service_name]\n\n mashupFilters = []\n for colname, value in filters.items():\n\n # make sure value is a list-like thing\n if np.isscalar(value,):\n value = [value]\n\n # Get the column type and separator\n colInfo = caomColConfig.get(colname)\n if not colInfo:\n warnings.warn(\"Filter {} does not exist. This filter will be skipped.\".format(colname), InputWarning)\n continue\n\n colType = \"discrete\"\n if (colInfo.get(\"vot.datatype\", colInfo.get(\"type\")) in (\"double\", \"float\", \"numeric\")) \\\n or colInfo.get(\"treatNumeric\"):\n colType = \"continuous\"\n\n separator = colInfo.get(\"separator\")\n freeText = None\n\n # validate user input\n if colType == \"continuous\":\n if len(value) < 2:\n warningString = \"{} is continuous, \".format(colname) + \\\n \"and filters based on min and max values.\\n\" + \\\n \"Not enough values provided, skipping...\"\n warnings.warn(warningString, InputWarning)\n continue\n elif len(value) > 2:\n warningString = \"{} is continuous, \".format(colname) + \\\n \"and filters based on min and max values.\\n\" + \\\n \"Too many values provided, the first two will be \" + \\\n \"assumed to be the min and max values.\"\n warnings.warn(warningString, InputWarning)\n else: # coltype is discrete, all values should be represented as strings, even if numerical\n value = [str(x) for x in value]\n\n # check for wildcards\n\n for i, val in enumerate(value):\n if ('*' in val) or ('%' in val):\n if freeText: # freeText is already set cannot set again\n warningString = \"Only one wildcarded value may be used per filter, \" + \\\n \"all others must be exact.\\n\" + \\\n \"Skipping {}...\".format(val)\n warnings.warn(warningString, InputWarning)\n else:\n freeText = val.replace('*', '%')\n value.pop(i)\n\n # craft mashup filter entry\n entry = {}\n entry[\"paramName\"] = colname\n if separator:\n entry[\"separator\"] = separator\n if colType == \"continuous\":\n entry[\"values\"] = [{\"min\": value[0], \"max\":value[1]}]\n else:\n entry[\"values\"] = value\n if freeText:\n entry[\"freeText\"] = freeText\n\n mashupFilters.append(entry)\n\n return mashupFilters", "def query_filter_builder(cls, user_attribute: str, value: Any) -> List[Q]:\n attributes = re.compile(r\"Or|And|OR|AND\").split(user_attribute)\n query_builder = []\n for attr in attributes:\n attr = attr.strip().lower()\n cond = {f\"{attr}__icontains\": value}\n if user_attribute.split(attr)[0].lower().endswith(\"or\"):\n last_query = query_builder.pop()\n query_builder.append(Q(last_query, Q(**cond), join_type=\"OR\"))\n elif attr != \"\":\n query_builder = [*query_builder, Q(**cond)]\n return query_builder", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)" ]
[ "0.7212947", "0.70940274", "0.7063467", "0.70396554", "0.70150787", "0.7010616", "0.68755764", "0.67024094", "0.67008877", "0.6694236", "0.6660822", "0.6646979", "0.65543395", "0.65318733", "0.64761287", "0.64659613", "0.6453026", "0.64314073", "0.6378158", "0.63564056", "0.6350748", "0.6342837", "0.6340055", "0.6304544", "0.63007", "0.62936974", "0.629182", "0.6277413", "0.6258392", "0.6253238", "0.6230658", "0.61981726", "0.61981726", "0.6163718", "0.61385757", "0.6117649", "0.6089614", "0.60769033", "0.60478383", "0.6046397", "0.6035758", "0.6022503", "0.6016799", "0.60117894", "0.60063046", "0.5994791", "0.5985719", "0.59758973", "0.5972178", "0.59599", "0.59532803", "0.59432614", "0.59296954", "0.5924079", "0.5909814", "0.5909506", "0.59081644", "0.5896761", "0.5892159", "0.5888201", "0.587619", "0.58704543", "0.586078", "0.58461314", "0.5844234", "0.5831105", "0.5827569", "0.58187354", "0.5811178", "0.5800361", "0.57946914", "0.57891107", "0.57891107", "0.5786299", "0.57672244", "0.5761322", "0.57481736", "0.57427007", "0.57388616", "0.57336056", "0.5728447", "0.57254034", "0.5724551", "0.5717672", "0.5712907", "0.57060117", "0.5689324", "0.5688375", "0.56859577", "0.56744224", "0.5667527", "0.56634617", "0.56630635", "0.5662163", "0.56613594", "0.56565917", "0.5655649", "0.5655154", "0.5654543", "0.564838" ]
0.7715377
0
Get the related filterset instances for all related filters.
def get_related_filtersets(self): related_filtersets = OrderedDict() for related_name in self.related_filters: if related_name not in self.filters: continue f = self.filters[related_name] related_filtersets[related_name] = f.filterset( data=self.data, queryset=f.get_queryset(self.request), relationship=related(self, related_name), request=self.request, prefix=self.form_prefix, ) return related_filtersets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_related_filtersets(self, queryset):\n for related_name, related_filterset in self.related_filtersets.items():\n # Related filtersets should only be applied if they had data.\n prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP)\n if not any(value.startswith(prefix) for value in self.data):\n continue\n\n field_name = self.filters[related_name].field_name\n lookup_expr = LOOKUP_SEP.join([field_name, 'in'])\n subquery = Subquery(related_filterset.qs.values('pk'))\n queryset = queryset.filter(**{lookup_expr: subquery})\n\n return queryset", "def getFilterSetFilterFieldManagers(self):\n return _get_related_managers(self, FilterSetFilterField)", "def get_request_filters(self):\n # build the compiled set of all filters\n requested_filters = OrderedDict()\n for filter_name, f in self.filters.items():\n requested_filters[filter_name] = f\n\n # exclusion params\n exclude_name = '%s!' % filter_name\n if related(self, exclude_name) in self.data:\n # deepcopy the *base* filter to prevent copying of model & parent\n f_copy = copy.deepcopy(self.base_filters[filter_name])\n f_copy.parent = f.parent\n f_copy.model = f.model\n f_copy.exclude = not f.exclude\n\n requested_filters[exclude_name] = f_copy\n\n return requested_filters", "def filters(self):\n return self._filters", "def filters(self):\n return self.__filters", "def get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def filter_queryset(self, queryset):\n for backend in list(self.filter_backends):\n queryset = backend().filter_queryset(self.request, queryset, self)\n return queryset", "def _get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def get_filter_subset(cls, params, rel=None):\n # Determine names of filters from query params and remove empty values.\n # param names that traverse relations are translated to just the local\n # filter names. eg, `author__username` => `author`. Empty values are\n # removed, as they indicate an unknown field eg, author__foobar__isnull\n filter_names = {cls.get_param_filter_name(param, rel) for param in params}\n filter_names = {f for f in filter_names if f is not None}\n return OrderedDict(\n (k, v) for k, v in cls.base_filters.items() if k in filter_names\n )", "def get_filters(self):\n if self.filters is not None:\n return self.filters\n elif self.parent is not None:\n return self.parent.get_filters()\n else:\n return None", "def all(self, filter_deleted=False):\n objects = self.matching_objects(filter_deleted=filter_deleted)\n return objects", "def get_filters(self):\n objects = javabridge.get_env().get_object_array_elements(\n javabridge.call(self.jobject, \"getFilters\", \"()[Lweka/filters/Filter;\"))\n result = []\n for obj in objects:\n result.append(Filter(jobject=obj))\n return result", "def qs(self) -> MIZQuerySet:\n if isinstance(self, type):\n raise TypeError(\n f\"Calling qs() from class level is prohibited. Use {self.__name__}.objects instead.\"\n )\n # noinspection PyUnresolvedReferences\n return self._meta.model.objects.filter(pk=self.pk)", "def filters(self):\n\t\treturn self.local_filter", "def subresources(self):\n return self._get_related_resources(True)", "def sets(self):\n return self._sets", "def load_all_queryset(self):\n return self.get_model()._default_manager.all()", "def getViewFilterFieldManagers(self):\n return _get_related_managers(self, ViewFilterField)", "def filter_queryset(self, queryset):\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters", "def filters(self):\n # easy enough\n return self.dcpl.getFilters()", "def get_filterable_queryset(self):\n site = self.get_site()\n\n if not site:\n return self.get_model_class().objects.none()\n\n queryset = self.get_model_class().objects.in_site(site).live()\n\n filterable_list_block = self.get_filterable_list_wagtail_block()\n if filterable_list_block is None:\n return queryset\n\n if filterable_list_block.value['filter_children']:\n queryset = queryset.child_of(self)\n elif filterable_list_block.value['filter_siblings']:\n queryset = queryset.sibling_of(self)\n\n return queryset", "def get_list_filters(self):\n # look in session for the saved search...\n filters = ListFilter()\n filters.get_list_filter(self.table)\n return filters", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n for field, options in applicable_filters[\"field_facets\"].items():\n queryset = queryset.facet(field, **options)\n\n for field, options in applicable_filters[\"date_facets\"].items():\n queryset = queryset.date_facet(field, **options)\n\n for field, options in applicable_filters[\"query_facets\"].items():\n queryset = queryset.query_facet(field, **options)\n\n return queryset", "def get_filters(self) -> dict:\n return self._filters", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters", "def filter_all(cls, **kwargs):\n return cls.query.filter_by(**kwargs).all()", "def get_collections(self):\n\n from authentication.models import Collection\n\n return (\n Collection.objects\n .filter(models.Q(admin=self) | models.Q(members=self) | models.Q(guests=self))\n .distinct()\n .order_by('title')\n )", "def condition_filters(self):\r\n return filters.Filters(self)", "def get_queryset(self):\n\n qs = Aid.objects \\\n .published() \\\n .open() \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors')\n\n filter_form = self.form\n results = filter_form.filter_queryset(qs)\n ordered_results = filter_form.order_queryset(results).distinct()\n return ordered_results", "def find(cls, **filters):\n return cls.query.filter_by(**filters).all()", "def get_collection(self, request, **resources):\r\n\r\n if self._meta.queryset is None:\r\n return []\r\n\r\n # Filter collection\r\n filters = self.get_filters(request, **resources)\r\n filters.update(self.get_default_filters(**resources))\r\n qs = self._meta.queryset\r\n for key, (value, exclude) in filters.items():\r\n try:\r\n if exclude:\r\n qs = qs.exclude(**{key: value})\r\n\r\n else:\r\n qs = qs.filter(**{key: value})\r\n except FieldError, e:\r\n logger.warning(e)\r\n\r\n sorting = self.get_sorting(request, **resources)\r\n if sorting:\r\n qs = qs.order_by(*sorting)\r\n\r\n return qs", "def GetFilters(self, bulk=False):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n\n filters = self._SendRequest(HTTP_GET, \"/%s/filters\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return filters\n else:\n return [f[\"uuid\"] for f in filters]", "def getQuery(self):\n # Get a list of object managers, each of which containing the\n # corresponding view and filter set filter field objects of all\n # available filter set classes.\n filter_field_managers = [\n manager for manager in self.getFilterSetFilterFieldManagers()\n ] + [\n manager for manager in self.view.getViewFilterFieldManagers()\n ]\n\n # Create an OR query for all filter fields of the same class\n or_queries = []\n for manager in filter_field_managers:\n filter_fields = manager.all()\n if filter_fields:\n or_queries.append(\n reduce(lambda x, y: x | y, [\n filter_field.getRecordFilter()\n for filter_field in filter_fields\n ]\n )\n )\n\n # If there are different filter field OR queries, combine those\n # queries as one AND query\n if or_queries:\n return reduce(lambda x, y: x & y, [\n or_query for or_query in or_queries\n ]\n )\n # If the filter set does not have any filter fields, we return an empty\n # query, which is equivalent to querying all objects, e.g.:\n # `View.objects.all() == View.objects.filter(Q())`\n else:\n return Q()", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def sets(self):\n\n return self._collection.distinct('set')", "def load_all_filters(self, interp=True, lamb=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in s.content]\n return(filters)", "def load_all_filters(self, interp=True, lamb=None):\n return [self._load_filter(k, interp=interp, lamb=lamb)\n for k in self.content]", "def process_filters(self, filters, queryset, view):\n return filters", "def get_queryset(self):\n category_qs = Category.objects \\\n .select_related('theme') \\\n .order_by('theme__name', 'name')\n\n base_qs = Aid.objects \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors') \\\n .prefetch_related(Prefetch('categories', queryset=category_qs))\n\n user = self.request.user\n if user.is_authenticated and user.is_superuser:\n qs = base_qs\n elif user.is_authenticated:\n q_published = Q(status='published')\n q_is_author = Q(author=user)\n qs = base_qs.filter(q_published | q_is_author)\n else:\n qs = base_qs.published()\n\n return qs", "def filter(cls, *args, **kwargs) -> models.QuerySet:\n return cls.objects.filter(*args, **kwargs)", "def get_queryset(self):\n if self.queryset is None:\n raise ImproperlyConfigured(\"%(cls)s is missing a QuerySet.\" % {\n 'cls': self.__class__.__name__\n })\n\n return self.queryset.all()", "def get_all_associations(self):\n return", "def get_queryset(self):\n # Get tags from the request if it was specified\n tags = self.request.query_params.get('tags')\n # Get authors from the request if it was specified\n authors = self.request.query_params.get('authors')\n # Make copy of queryset as to not modify the original queryset\n queryset = self.queryset\n if tags:\n # Get list of ids specified\n tag_ids = self._params_to_ints(tags)\n # Filter on the foreign key object with tags__id__in\n queryset = queryset.filter(tags__id__in=tag_ids)\n if authors:\n # Get list of ids specified\n author_ids = self._params_to_ints(authors)\n # Filter by the author\n queryset = queryset.filter(authors__id__in=author_ids)\n\n return queryset.filter(user=self.request.user)", "def filter(self, *args, **kwargs):\n p = self.proxy\n filter_clauses = self.filter_clauses + list(args)\n related_clauses = self.related_clauses[:]\n\n connection_kwarg = p.connection_kwarg\n connection = self.connection\n\n # Build the filter operations\n for k, v in kwargs.items():\n # Ignore connection parameter\n if k == connection_kwarg:\n connection = v\n continue\n model = p.model\n op = \"eq\"\n if \"__\" in k:\n parts = k.split(\"__\")\n if parts[-1] in QUERY_OPS:\n op = parts[-1]\n k = \"__\".join(parts[:-1])\n col = resolve_member_column(model, k, related_clauses)\n\n # Support lookups by model\n if isinstance(v, Model):\n v = v.serializer.flatten_object(v, scope=None)\n elif op in ('in', 'notin'):\n # Flatten lists when using in or notin ops\n v = model.serializer.flatten(v, scope=None)\n\n clause = getattr(col, QUERY_OPS[op])(v)\n filter_clauses.append(clause)\n\n return self.clone(\n connection=connection,\n filter_clauses=filter_clauses,\n related_clauses=related_clauses)", "def get_related_trackers(self):\n\n return Tracker.objects.filter(product=self.pk)", "def all(self):\n return (self.__objects)", "def filters(self, **kwargs):\n return config.filters(self._host, self._session, **kwargs)", "def filter_edges(self, edges, cache_match=False):\n for filter_feature in self:\n edges = filter_feature.filter_edges(edges, cache_match)\n return edges", "def getTableRecords(self):\n # Combine non-empty filter queries from all filter sets of this view\n query = reduce(lambda x, y: x | y, [fs.getQuery()\n for fs in self.filterset_set.all()])\n\n return Record.objects.filter(query).distinct().order_by(\n \"report__date_range_begin\")", "def filter(self, filters):", "def _get_related_objects(obj, parent_class=False):\n foreign_managers = _get_related_managers(obj, parent_class)\n\n related_objects = []\n for manager in foreign_managers:\n related_objects += manager.all()\n\n return related_objects", "def all(cls):\n return cls.where()", "def associated_objects(self):\n return self._associated_objects", "def references(self):\n return self._get_related_resources(False)", "def filter(self, **kwargs):\n related_names = []\n for argname, _ in kwargs.iteritems():\n related_name = argname.split('__')\n if len(related_name) > 1:\n related_names.append(\"__\".join(related_name[:-1]))\n if len(related_names) > 0:\n return super(\n JeevesQuerySet, self).filter(\n **kwargs).select_related(*related_names)\n else:\n return super(JeevesQuerySet, self).filter(**kwargs)", "def get_all(self):\n return ReadingSet(self._set)", "def get_filters(self):", "def _get_all_resources(self):\n all_resources = []\n for resource in ResourceModel.scan():\n all_resources.append(resource)\n return all_resources", "def get_queryset(self):\n return self.queryset().select_related(\n 'image'\n ).prefetch_related(\n 'authors',\n 'categories',\n )", "def get_all(cls, **filters) -> List[dict]:\n limit = filters.pop(\"limit\", 0) or 0\n offset = filters.pop(\"offset\", 0) or 0\n errors = cls.validate_query(filters)\n if errors:\n raise ValidationFailed(filters, errors)\n\n cls.deserialize_query(filters)\n\n if cls.logger.isEnabledFor(logging.DEBUG):\n if filters:\n cls.logger.debug(f\"Query documents matching {filters}...\")\n else:\n cls.logger.debug(f\"Query all documents...\")\n documents = cls.__collection__.find(filters, skip=offset, limit=limit)\n if cls.logger.isEnabledFor(logging.DEBUG):\n nb_documents = (\n cls.__collection__.count_documents(filters, skip=offset, limit=limit)\n if limit\n else cls.__collection__.count_documents(filters, skip=offset)\n )\n cls.logger.debug(\n f'{nb_documents if nb_documents else \"No corresponding\"} documents retrieved.'\n )\n return [cls.serialize(document) for document in documents]", "def get_related_categories(self):\n if not hasattr(self, '_BasePublication__related_categories_cache'):\n self.__related_categories_cache = self.related_categories.select_related('category').all()\n return self.__related_categories_cache", "def queryset(cls):\n return cls.model._default_manager.all()", "def all(self):\n\n return self.__model__.query.all()", "def get_queryset(self):\n\n fl = self.request.QUERY_PARAMS.get('fl', None)\n win = self.request.QUERY_PARAMS.get('win', None)\n win_filter = Q(orderstatus__in=['Shipped', 'Unshipped', 'Processing'], fulfillmentchannel='MFN')\n #win_filter = Q(orderstatus__in=['Unshipped', 'Processing'], fulfillmentchannel='MFN')\n\n queryset = None\n if fl is not None and fl.isdigit():\n logger.info(\"Got filter id: %s\", fl)\n try:\n filter = Filter.objects.get(pk=int(fl))\n if filter:\n ancestor_logic = Q() #Create Q object to hold other query\n #If filter is only root node\n if filter.is_root_node():\n ancestor_logic = pickle.loads(filter.logic) #Deserilize the filter logic\n logger.info(\"Filter has only root node, Logic: %s\", ancestor_logic)\n\n #If filter has parents\n else:\n for filter_data in filter.get_ancestors(False, True): #Get all parents including self\n filter_logic = pickle.loads(filter_data.logic) #Deserilize the filter logic\n if ancestor_logic.__len__()==0:\n ancestor_logic = filter_logic\n else:\n ancestor_logic = ancestor_logic & filter_logic\n logger.info(\"Filter has parents, Logic: %s\", ancestor_logic)\n\n if ancestor_logic:\n queryset = AmazonOrders.objects.filter(ancestor_logic & win_filter) #pass the query object to filter\n logger.info(\"Filter query, Query: %s\", queryset.query)\n\n except Exception as e:\n logger.error(\"In queryset exception : %s\",e)\n elif win is not None:\n logic = None\n if win == 'AFN':\n logic = Q(fulfillmentchannel=win, orderstatus='Shipped')\n elif win in ['Pending', 'Canceled']:\n logic = Q(orderstatus=win)\n elif win == 'Unshipped':\n logic = Q(orderstatus=win)\n\n if logic:\n queryset = AmazonOrders.objects.filter(logic)\n logger.info(\"Win query, Query: %s\", queryset.query)\n\n else:\n #queryset = AmazonOrders.objects.all()\n queryset = AmazonOrders.objects.filter(win_filter)\n logger.info(\"Filter not passed, Processing full Query: %s\", queryset.query)\n\n return queryset", "def all(self):\n return self._summarize(lambda c: c.all)", "def get_filterable_queryset(self):\n queryset = super().get_filterable_queryset()\n category_names = get_category_children(self.filterable_categories)\n return queryset.filter(categories__name__in=category_names)", "def filters(self, filters):\n\n self._filters = filters", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n if applicable_filters:\n queryset = queryset.filter(applicable_filters)\n if applicable_exclusions:\n queryset = queryset.exclude(applicable_exclusions)\n return queryset", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def get_all(self):\n return [_ for _ in self]", "def get_relations(self):\n if not hasattr(self, '_BasePublication__relations_cache'):\n tree_opts = Rubric._mptt_meta\n self.__relations_cache = self.forward_relations.select_related('rubric', 'to_publication').order_by(\n 'rubric__%s' % tree_opts.tree_id_attr, 'rubric__%s' % tree_opts.left_attr)\n return self.__relations_cache", "def find_set(self):\n return self._set_set(self._find_set())", "def siblings(self):\n return type(self)._default_manager.filter(category=self.category)", "def GetFilters(self, filt_defs):\n # The artifact isn't actually used for anything, it's just required to\n # initialize handlers.\n probe = rdfvalue.Probe(artifact=\"Data\", filters=filt_defs)\n return probe.filters", "def filter_children(self, queryset, filter_dict):\n return queryset.filter(**filter_dict)", "def get_queryset(self):\n reviews = Review.objects \\\n .filter(reviewer=self.request.user) \\\n .filter(closed_on=None) \\\n .order_by('due_date') \\\n .select_related()\n\n reviews = self.step_filter(reviews)\n\n self.search_form = self.get_search_form(reviews)\n reviews = self.search_form.filter_reviews()\n\n return reviews", "def prefetch(self, queryset: models.QuerySet) -> models.QuerySet:\n subquery = self.model.objects.all()\n \n if self.filters:\n q = reduce(operator.and_, [f.get() for f in self.filters])\n subquery = subquery.filter(q)\n \n if self.sort:\n subquery = subquery.order_by(*self.sort)\n \n subquery = subquery.select_related(\n *[f for f in self._one_fields if f not in self.joins.keys()]\n )\n subquery = subquery.prefetch_related(\n *[f for f in self._many_fields if f not in self.joins.keys()]\n )\n \n new = queryset.prefetch_related(models.Prefetch(self.field, queryset=subquery))\n \n # Recursively prefetch inner joins\n for j in self.joins.values():\n new = j.prefetch(new)\n \n return new", "def all(self, datastore):\n return datastore.query(self.__model__).all()", "def configuration_sets(self):\n return self._configuration_sets", "def get_suppliers(self):\n return Product.objects.get(id=self.id).supplier_set.all()", "def get_feature_sets(self, exclude=None):\n\n # Create list containing features per dataframe as sets\n feat_sets = [set(df) for df in self]\n # exclude unwanted features\n if exclude:\n feat_sets = [df_set.difference(exclude) for df_set in feat_sets]\n\n return feat_sets", "def related_objects(self, related_model, related_fields, objs):\n predicate = reduce(operator.or_, (\n query_utils.Q(**{'%s__in' % related_field.name: objs})\n for related_field in related_fields\n ))\n return related_model._default_manager.using(self.using).filter(\n predicate\n )", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def resources(self):\n\n return self.FIXTURE.resources_collection(self)", "def associatedObjects (self):\n return self.__associatedObjects", "def gather_entities(self):\n entitylist = set()\n for entity in self.entities.all():\n entitylist.add(entity)\n entitylist.update(entity.get_ancestors())\n return entitylist #set(entity for entity in entitylist if not entity.abstract_entity)", "def get_all(cls):\n return DataStore.get_all_instance(cls)", "def get_queryset(self):\n if getattr(self, 'use_this_queryset', None):\n return self.use_this_queryset\n return self.model().objects.all()" ]
[ "0.7343028", "0.67154413", "0.61035365", "0.60354567", "0.5970218", "0.59653723", "0.5931315", "0.58990955", "0.5755272", "0.5717431", "0.5715828", "0.5692265", "0.5688741", "0.5616253", "0.5609072", "0.5583161", "0.55796957", "0.5577063", "0.55623674", "0.55400354", "0.55383265", "0.55206186", "0.55204594", "0.5512285", "0.54760534", "0.54760534", "0.54760534", "0.54760534", "0.54760534", "0.54760534", "0.5475427", "0.5473662", "0.5472968", "0.5472968", "0.5472968", "0.5441506", "0.54396325", "0.54392475", "0.54231656", "0.54219514", "0.5417784", "0.54173744", "0.54144716", "0.54058474", "0.54015714", "0.53992426", "0.5394185", "0.5394173", "0.53155565", "0.53040606", "0.5297765", "0.5284545", "0.52666456", "0.5265867", "0.5258437", "0.52353466", "0.52342635", "0.5231647", "0.52273387", "0.5224538", "0.52087206", "0.52053255", "0.52004945", "0.518317", "0.51808", "0.51797074", "0.51669496", "0.51662123", "0.51652336", "0.5160936", "0.51564616", "0.51543576", "0.51377195", "0.5131135", "0.5116789", "0.51140106", "0.5100626", "0.50996", "0.5098223", "0.5097882", "0.50965923", "0.5094705", "0.50909835", "0.50705737", "0.50697064", "0.50696695", "0.50633687", "0.5052784", "0.5052097", "0.5049979", "0.5047636", "0.50460804", "0.504054", "0.50297946", "0.50244665", "0.50239426", "0.5021251", "0.50155056", "0.5013133", "0.50123996" ]
0.82383394
0
Filter the provided `queryset` by the `related_filtersets`. It is recommended that you override this method to change the filtering behavior across relationships.
def filter_related_filtersets(self, queryset): for related_name, related_filterset in self.related_filtersets.items(): # Related filtersets should only be applied if they had data. prefix = '%s%s' % (related(self, related_name), LOOKUP_SEP) if not any(value.startswith(prefix) for value in self.data): continue field_name = self.filters[related_name].field_name lookup_expr = LOOKUP_SEP.join([field_name, 'in']) subquery = Subquery(related_filterset.qs.values('pk')) queryset = queryset.filter(**{lookup_expr: subquery}) return queryset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n if applicable_filters:\n queryset = queryset.filter(applicable_filters)\n if applicable_exclusions:\n queryset = queryset.exclude(applicable_exclusions)\n return queryset", "def get_related_filtersets(self):\n related_filtersets = OrderedDict()\n\n for related_name in self.related_filters:\n if related_name not in self.filters:\n continue\n\n f = self.filters[related_name]\n related_filtersets[related_name] = f.filterset(\n data=self.data,\n queryset=f.get_queryset(self.request),\n relationship=related(self, related_name),\n request=self.request,\n prefix=self.form_prefix,\n )\n\n return related_filtersets", "def filter(self, **kwargs):\n related_names = []\n for argname, _ in kwargs.iteritems():\n related_name = argname.split('__')\n if len(related_name) > 1:\n related_names.append(\"__\".join(related_name[:-1]))\n if len(related_names) > 0:\n return super(\n JeevesQuerySet, self).filter(\n **kwargs).select_related(*related_names)\n else:\n return super(JeevesQuerySet, self).filter(**kwargs)", "def filter_queryset(self, queryset):\n for backend in list(self.filter_backends):\n queryset = backend().filter_queryset(self.request, queryset, self)\n return queryset", "def filter(self, *args, **kwargs):\n p = self.proxy\n filter_clauses = self.filter_clauses + list(args)\n related_clauses = self.related_clauses[:]\n\n connection_kwarg = p.connection_kwarg\n connection = self.connection\n\n # Build the filter operations\n for k, v in kwargs.items():\n # Ignore connection parameter\n if k == connection_kwarg:\n connection = v\n continue\n model = p.model\n op = \"eq\"\n if \"__\" in k:\n parts = k.split(\"__\")\n if parts[-1] in QUERY_OPS:\n op = parts[-1]\n k = \"__\".join(parts[:-1])\n col = resolve_member_column(model, k, related_clauses)\n\n # Support lookups by model\n if isinstance(v, Model):\n v = v.serializer.flatten_object(v, scope=None)\n elif op in ('in', 'notin'):\n # Flatten lists when using in or notin ops\n v = model.serializer.flatten(v, scope=None)\n\n clause = getattr(col, QUERY_OPS[op])(v)\n filter_clauses.append(clause)\n\n return self.clone(\n connection=connection,\n filter_clauses=filter_clauses,\n related_clauses=related_clauses)", "def get_queryset(self):\n # Get tags from the request if it was specified\n tags = self.request.query_params.get('tags')\n # Get authors from the request if it was specified\n authors = self.request.query_params.get('authors')\n # Make copy of queryset as to not modify the original queryset\n queryset = self.queryset\n if tags:\n # Get list of ids specified\n tag_ids = self._params_to_ints(tags)\n # Filter on the foreign key object with tags__id__in\n queryset = queryset.filter(tags__id__in=tag_ids)\n if authors:\n # Get list of ids specified\n author_ids = self._params_to_ints(authors)\n # Filter by the author\n queryset = queryset.filter(authors__id__in=author_ids)\n\n return queryset.filter(user=self.request.user)", "def filter_queryset(self, queryset):\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset", "def filter_queryset(self, queryset):\n query_params = self.request.query_params\n # validate query parameters\n exception_response = ParamsCheck.validate(\n query_params, APIParams.products_list_params\n )\n if exception_response:\n return exception_response\n\n products_qs = self.get_queryset() # all\n\n category = query_params.get(\"category\", None)\n exclude_ingredients = query_params.get(\"exclude_ingredient\", None)\n exclude_ingredients = self._clean_string(exclude_ingredients)\n include_ingredients = query_params.get(\"include_ingredient\", None)\n include_ingredients = self._clean_string(include_ingredients)\n\n # filtering part\n if category is not None:\n products_qs = products_qs.filter(category=category)\n for each in include_ingredients:\n products_qs = products_qs.filter(ingredients__name=each)\n for each in exclude_ingredients:\n products_qs = products_qs.exclude(ingredients__name=each)\n\n return products_qs", "def filter_queryset(self, request, queryset, view):\n applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request))\n return self.apply_filters(\n queryset=queryset,\n applicable_filters=self.process_filters(applicable_filters, queryset, view),\n applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view)\n )", "def filter_queryset(self, queryset, view=None):\n queryset = super().filter_queryset(queryset.only(\"id\", \"shared\"))\n form_pk = self.kwargs.get(self.lookup_field)\n\n if form_pk:\n try:\n int(form_pk)\n except ValueError as e:\n if form_pk == self.public_data_endpoint:\n queryset = self._get_public_forms_queryset()\n else:\n raise ParseError(_(f\"Invalid pk {form_pk}\")) from e\n else:\n queryset = self._filtered_or_shared_queryset(queryset, form_pk)\n else:\n tags = self.request.query_params.get(\"tags\")\n not_tagged = self.request.query_params.get(\"not_tagged\")\n\n if tags and isinstance(tags, six.string_types):\n tags = tags.split(\",\")\n queryset = queryset.filter(tags__name__in=tags)\n if not_tagged and isinstance(not_tagged, six.string_types):\n not_tagged = not_tagged.split(\",\")\n queryset = queryset.exclude(tags__name__in=not_tagged)\n\n return queryset", "def prefetch(self, queryset: models.QuerySet) -> models.QuerySet:\n subquery = self.model.objects.all()\n \n if self.filters:\n q = reduce(operator.and_, [f.get() for f in self.filters])\n subquery = subquery.filter(q)\n \n if self.sort:\n subquery = subquery.order_by(*self.sort)\n \n subquery = subquery.select_related(\n *[f for f in self._one_fields if f not in self.joins.keys()]\n )\n subquery = subquery.prefetch_related(\n *[f for f in self._many_fields if f not in self.joins.keys()]\n )\n \n new = queryset.prefetch_related(models.Prefetch(self.field, queryset=subquery))\n \n # Recursively prefetch inner joins\n for j in self.joins.values():\n new = j.prefetch(new)\n \n return new", "def related_objects(self, related_model, related_fields, objs):\n predicate = reduce(operator.or_, (\n query_utils.Q(**{'%s__in' % related_field.name: objs})\n for related_field in related_fields\n ))\n return related_model._default_manager.using(self.using).filter(\n predicate\n )", "def filter_queryset(self, request, queryset, view):\n\n username = request.query_params.get(\"shared_with\")\n\n if username:\n try:\n # The Team model extends the built-in Django Group model\n # Groups a User belongs to are available as a queryset property\n # of a User object, which this code takes advantage of\n\n organization_user_ids = (\n User.objects.get(username=username)\n .groups.all()\n .values_list(\"team__organization\", flat=True)\n .distinct()\n )\n\n filtered_queryset = queryset.filter(user_id__in=organization_user_ids)\n\n return filtered_queryset\n\n except ObjectDoesNotExist as non_existent_object:\n raise Http404 from non_existent_object\n\n return queryset", "def apply_queryset_rules(self, qs):\n clauses = {\n 'filter': [],\n 'exclude': []}\n\n for rule in self.drip_model.queryset_rules.all():\n\n clause = clauses.get(rule.method_type, clauses['filter'])\n\n kwargs = rule.filter_kwargs(qs, now=self.now)\n clause.append(Q(**kwargs))\n\n qs = rule.apply_any_annotation(qs)\n\n if clauses['exclude']:\n qs = qs.exclude(functools.reduce(operator.or_, clauses['exclude']))\n qs = qs.filter(*clauses['filter'])\n\n return qs", "def get_filterable_queryset(self):\n site = self.get_site()\n\n if not site:\n return self.get_model_class().objects.none()\n\n queryset = self.get_model_class().objects.in_site(site).live()\n\n filterable_list_block = self.get_filterable_list_wagtail_block()\n if filterable_list_block is None:\n return queryset\n\n if filterable_list_block.value['filter_children']:\n queryset = queryset.child_of(self)\n elif filterable_list_block.value['filter_siblings']:\n queryset = queryset.sibling_of(self)\n\n return queryset", "def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n \n # List of StockLocation objects to match against\n locations = self.get_locations()\n\n # We wish to filter by stock location(s)\n if len(locations) > 0:\n \"\"\"\n At this point, we are basically forced to be inefficient,\n as we need to compare the 'filters' string of each label,\n and see if it matches against each of the requested items.\n\n TODO: In the future, if this becomes excessively slow, it\n will need to be readdressed.\n \"\"\"\n\n valid_label_ids = set()\n\n for label in queryset.all():\n\n matches = True\n\n # Filter string defined for the StockLocationLabel object\n try:\n filters = InvenTree.helpers.validateFilterString(label.filters)\n except:\n # Skip if there was an error validating the filters...\n continue\n\n for loc in locations:\n\n loc_query = StockLocation.objects.filter(pk=loc.pk)\n\n try:\n if not loc_query.filter(**filters).exists():\n matches = False\n break\n except FieldError:\n matches = False\n break\n\n # Matched all items\n if matches:\n valid_label_ids.add(label.pk)\n else:\n continue\n\n # Reduce queryset to only valid matches\n queryset = queryset.filter(pk__in=[pk for pk in valid_label_ids])\n\n return queryset", "def filter_children(self, queryset, filter_dict):\n return queryset.filter(**filter_dict)", "def _filter_related_m2m(self, rel):\n field = rel.field\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def filter_queryset(self, queryset):\n \n queryset = super().filter_queryset(queryset)\n\n # List of StockItem objects to match against\n items = self.get_items()\n\n # We wish to filter by stock items\n if len(items) > 0:\n \"\"\"\n At this point, we are basically forced to be inefficient,\n as we need to compare the 'filters' string of each label,\n and see if it matches against each of the requested items.\n\n TODO: In the future, if this becomes excessively slow, it\n will need to be readdressed.\n \"\"\"\n\n # Keep track of which labels match every specified stockitem\n valid_label_ids = set()\n \n for label in queryset.all():\n\n matches = True\n\n # Filter string defined for the StockItemLabel object\n try:\n filters = InvenTree.helpers.validateFilterString(label.filters)\n except ValidationError:\n continue\n\n for item in items:\n\n item_query = StockItem.objects.filter(pk=item.pk)\n\n try:\n if not item_query.filter(**filters).exists():\n matches = False\n break\n except FieldError:\n matches = False\n break\n\n # Matched all items\n if matches:\n valid_label_ids.add(label.pk)\n else:\n continue\n\n # Reduce queryset to only valid matches\n queryset = queryset.filter(pk__in=[pk for pk in valid_label_ids])\n\n return queryset", "def get_filterable_queryset(self):\n queryset = super().get_filterable_queryset()\n category_names = get_category_children(self.filterable_categories)\n return queryset.filter(categories__name__in=category_names)", "def related_view_filter():\n pass", "def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n\n items = self.get_items()\n\n if len(items) > 0:\n \"\"\"At this point, we are basically forced to be inefficient:\n\n We need to compare the 'filters' string of each report template,\n and see if it matches against each of the requested items.\n\n In practice, this is not too bad.\n \"\"\"\n\n valid_report_ids = set()\n\n for report in queryset.all():\n matches = True\n\n try:\n filters = InvenTree.helpers.validateFilterString(report.filters)\n except ValidationError:\n continue\n\n for item in items:\n item_query = self.ITEM_MODEL.objects.filter(pk=item.pk)\n\n try:\n if not item_query.filter(**filters).exists():\n matches = False\n break\n except FieldError:\n matches = False\n break\n\n # Matched all items\n if matches:\n valid_report_ids.add(report.pk)\n\n # Reduce queryset to only valid matches\n queryset = queryset.filter(pk__in=list(valid_report_ids))\n\n return queryset", "def related_objects(self, related, objs):\n from versions.models import Versionable\n\n related_model = related.related_model\n if issubclass(related_model, Versionable):\n qs = related_model.objects.current\n else:\n qs = related_model._base_manager.all()\n return qs.using(self.using).filter(\n **{\"%s__in\" % related.field.name: objs}\n )", "def filter(self, *args, **kwargs):\n self._expand_pk(kwargs)\n return self._filter_or_exclude(False, *args, **kwargs)", "def filter_queryset(self, request, queryset, view):\n owner = request.query_params.get(\"owner\")\n\n if owner:\n kwargs = {self.owner_prefix + \"__username__iexact\": owner}\n\n return queryset.filter(**kwargs)\n\n return queryset", "def filter_queryset(self, request, queryset, view):\n # filter by tags if available.\n tags = request.query_params.get(\"tags\", None)\n\n if tags and isinstance(tags, six.string_types):\n tags = tags.split(\",\")\n return queryset.filter(tags__name__in=tags)\n\n return queryset", "def filter(self, destination_object=None, source_object=None, **kwargs):\n if destination_object:\n kwargs.update({\n \"destination_id\": destination_object.pk,\n \"destination_type\": get_for_model(destination_object),\n })\n if source_object:\n kwargs.update({\n \"source_id\": source_object.pk,\n \"source_type\": get_for_model(source_object),\n })\n return super(RelatedContentQuerySet, self).filter(**kwargs)", "def filter(cls, *args, **kwargs) -> models.QuerySet:\n return cls.objects.filter(*args, **kwargs)", "def pre_filter(self, qs):\n return qs", "def filter_queryset(self, request, queryset, view):\n if request and request.user.is_anonymous:\n return queryset.filter(shared=True)\n\n return queryset", "def filter_queryset(self, queryset):\n params = self.request.query_params\n\n queryset = super().filter_queryset(queryset)\n\n # Filter by 'build'\n build = params.get('build', None)\n\n if build is not None:\n\n try:\n build = Build.objects.get(pk=build)\n\n queryset = queryset.filter(stock_item__build=build)\n\n except (ValueError, Build.DoesNotExist):\n pass\n\n # Filter by stock item\n item = params.get('stock_item', None)\n\n if item is not None:\n try:\n item = StockItem.objects.get(pk=item)\n\n items = [item]\n\n # Do we wish to also include test results for 'installed' items?\n include_installed = str2bool(params.get('include_installed', False))\n\n if include_installed:\n # Include items which are installed \"underneath\" this item\n # Note that this function is recursive!\n installed_items = item.get_installed_items(cascade=True)\n\n items += list(installed_items)\n\n queryset = queryset.filter(stock_item__in=items)\n\n except (ValueError, StockItem.DoesNotExist):\n pass\n\n return queryset", "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n for field, options in applicable_filters[\"field_facets\"].items():\n queryset = queryset.facet(field, **options)\n\n for field, options in applicable_filters[\"date_facets\"].items():\n queryset = queryset.date_facet(field, **options)\n\n for field, options in applicable_filters[\"query_facets\"].items():\n queryset = queryset.query_facet(field, **options)\n\n return queryset", "def filter(self, *args, **kwargs):\n self._not_support_combined_queries(\"filter\")\n return self._filter_or_exclude(False, args, kwargs)", "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or 'other')\n # to decide how to filter the queryset.\n\n if self.value() is None:\n return queryset.all()\n\n return queryset.filter(firm__pk=self.value())", "def get_queryset(self, **kwargs):\n # if getattr(self.view, 'deleted_obj_lookup', False) and self.view.queryset is None and self.view.model:\n if getattr(self.view, 'deleted_obj_lookup', False) or self.request.GET.get('deleted_obj_lookup', None):\n return self.view.model._default_manager.all_with_deleted().filter(**kwargs)\n return self.super.get_queryset(**kwargs)", "def get_queryset(self):\n self.object = self.get_object()\n return self.object.friends.all().exclude(user=self.object.user)", "def getFilterSetFilterFieldManagers(self):\n return _get_related_managers(self, FilterSetFilterField)", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def filter_queryset(self, request, queryset, view):\n form_id = view.kwargs.get(view.lookup_field, view.kwargs.get(\"xform_pk\"))\n lookup_field = view.lookup_field\n\n queryset = queryset.filter(deleted_at=None)\n if request.user.is_anonymous:\n return queryset\n\n if form_id:\n if lookup_field == \"pk\":\n int_or_parse_error(\n form_id, \"Invalid form ID. It must be a positive integer\"\n )\n\n try:\n if lookup_field == \"uuid\":\n form_id = UUID(form_id)\n form = queryset.get(Q(uuid=form_id.hex) | Q(uuid=str(form_id)))\n else:\n xform_kwargs = {lookup_field: form_id}\n form = queryset.get(**xform_kwargs)\n except ObjectDoesNotExist as non_existent_object:\n raise Http404 from non_existent_object\n\n # Check if form is public and return it\n if form.shared:\n if lookup_field == \"uuid\":\n return queryset.filter(Q(uuid=form_id.hex) | Q(uuid=str(form_id)))\n return queryset.filter(Q(**xform_kwargs))\n\n return super().filter_queryset(request, queryset, view)", "def get_related_field_queryset(self, request, list_queryset, field, queryset):\n if hasattr(queryset, 'list_permissions'):\n return queryset.list_permissions(request.user)\n else:\n return queryset", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)", "def filter_queryset(self,queryset):\n filters = {}\n for backend in list(self.filter_backends):\n backendobj = backend()\n queryset = backendobj.filter_queryset(self.request, queryset, self)\n if hasattr(backendobj,'get_applied_filters'):\n filters.update(backendobj.get_applied_filters())\n self. applied_filters = OrderedDict()\n for key,value in filters.items():\n if isinstance(value,datetime.datetime):\n self.applied_filters[key]=value\n del filters[key]\n self.applied_filters.update(sorted(filters.items(),key=itemgetter(1),reverse=True))\n return queryset", "def filter_queryset(self, request, queryset, view):\n\n if view.action == \"list\":\n # Return widgets from xform user has perms to\n return self._xform_filter_queryset(request, queryset, view, \"object_id\")\n\n return super().filter_queryset(request, queryset, view)", "def queryset(self, request, queryset):\r\n # Compare the requested value to decide how to filter the queryset.\r\n if self.value():\r\n return queryset.filter(parent_id=self.value())\r\n return queryset", "def get_queryset(self):\n category_qs = Category.objects \\\n .select_related('theme') \\\n .order_by('theme__name', 'name')\n\n base_qs = Aid.objects \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors') \\\n .prefetch_related(Prefetch('categories', queryset=category_qs))\n\n user = self.request.user\n if user.is_authenticated and user.is_superuser:\n qs = base_qs\n elif user.is_authenticated:\n q_published = Q(status='published')\n q_is_author = Q(author=user)\n qs = base_qs.filter(q_published | q_is_author)\n else:\n qs = base_qs.published()\n\n return qs", "def prefetch_related(self, *lookups):\n self._not_support_combined_queries(\"prefetch_related\")\n clone = self._chain()\n if lookups == (None,):\n clone._prefetch_related_lookups = ()\n else:\n for lookup in lookups:\n if isinstance(lookup, Prefetch):\n lookup = lookup.prefetch_to\n lookup = lookup.split(LOOKUP_SEP, 1)[0]\n if lookup in self.query._filtered_relations:\n raise ValueError(\n \"prefetch_related() is not supported with FilteredRelation.\"\n )\n clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups\n return clone", "def filter_queryset(self, queryset):\n tags = self.request.GET.getlist(\"tag\")\n if tags:\n for tag in tags:\n queryset = queryset.filter(tag__tag=tag)\n return super().filter_queryset(queryset)", "def filter_queryset(self, request, queryset, view):\n if request.user.is_anonymous:\n return queryset.filter(Q(shared_data=True))\n return queryset", "def filter_queryset(self, queryset):\n user = self.request.user\n if user.is_superuser:\n return super().filter_queryset(queryset)\n return queryset.filter(collaborators=user)", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs", "def filter_installed(self, queryset, name, value):\n if str2bool(value):\n return queryset.exclude(belongs_to=None)\n else:\n return queryset.filter(belongs_to=None)", "def filter(self, *q, **kwargs):\n return self._filter_or_exclude(*q, **kwargs)", "def step_filter(self, qs):\n return qs", "def filter_queryset(self, request, queryset, view):\n if view.action == \"retrieve\" and request.method == \"GET\":\n return queryset.model.objects.all()\n\n filtered_queryset = super().filter_queryset(request, queryset, view)\n org_users = set(\n [group.team.organization for group in request.user.groups.all()] +\n [o.user for o in filtered_queryset]\n )\n\n return queryset.model.objects.filter(user__in=org_users, user__is_active=True)", "def get_queryset(self):\n\n qs = Aid.objects \\\n .published() \\\n .open() \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('financers', 'instructors')\n\n filter_form = self.form\n results = filter_form.filter_queryset(qs)\n ordered_results = filter_form.order_queryset(results).distinct()\n return ordered_results", "def get_queryset(self):\n if getattr(self, 'use_this_queryset', None):\n return self.use_this_queryset\n return self.model().objects.all()", "def get_queryset(self, *args, **kwargs):\n qs = super().get_queryset(*args, **kwargs).filter(user=self.request.user)\n return qs", "def setup_eager_loading(cls, queryset):\n queryset = queryset.prefetch_related('keywords_str')\n queryset = queryset.prefetch_related('tags_str')\n # queryset = queryset.prefetch_related('keywords')\n # queryset = queryset.prefetch_related('tags')\n return queryset", "def setup_eager_loading(queryset):\n queryset = queryset.select_related('user')\n return queryset", "def get_queryset(self):\n\n qs = super().get_queryset() # get company specific queryset\n\n filters = dict(self.request.GET.lists()) # dictionary of lists\n\n # pull out order_by and order\n order_by = filters.pop(\"order_by\", None)\n order = filters.pop(\"order\", None)\n\n # Ordering by JSON field taken from\n # https://stackoverflow.com/questions/36641759/django-1-9-jsonfield-order-by\n # Jan 2, 2018\n\n if order_by:\n if order:\n pass\n # TODO: Figure out what can be done for ordering...\n\n else:\n qs = qs.order_by(\"-id\") # default to descending id order\n\n for exp_filter in filters:\n try:\n qs = self.FILTERS[exp_filter](qs, filters[exp_filter])\n except KeyError:\n pass\n # do nothing if not a filter\n\n return qs", "def get_filter_subset(cls, params, rel=None):\n # Determine names of filters from query params and remove empty values.\n # param names that traverse relations are translated to just the local\n # filter names. eg, `author__username` => `author`. Empty values are\n # removed, as they indicate an unknown field eg, author__foobar__isnull\n filter_names = {cls.get_param_filter_name(param, rel) for param in params}\n filter_names = {f for f in filter_names if f is not None}\n return OrderedDict(\n (k, v) for k, v in cls.base_filters.items() if k in filter_names\n )", "def get_queryset(self):\n video_id = self.get_related_video_id()\n\n if video_id is None: # backward behavior for stand-alone site context\n return super().get_queryset().none()\n\n queryset = super().get_queryset().filter(video_id=video_id)\n\n if self.request.resource is not None: # Then we are in an LTI context\n queryset = self._get_lti_queryset(queryset)\n\n if (\n not self.request.resource and self.request.user and self.action == \"list\"\n ): # Then we are in stand-alone site context\n queryset = self._get_standalone_queryset(queryset)\n\n if self.action == \"list_attendances\":\n # we only want live sessions that are registered or with live_attendance not empty\n queryset = queryset.filter(\n Q(is_registered=True)\n | ~(Q(live_attendance__isnull=True) | Q(live_attendance__exact={}))\n )\n\n return queryset", "def _filter_related_fk(self, rel):\n field = rel.field\n if isinstance(field, models.ForeignKey):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def filter_ancestor(self, queryset, name, ancestor):\n\n return queryset.filter(\n parent__in=ancestor.get_descendants(include_self=True)\n )", "def qs(self) -> MIZQuerySet:\n if isinstance(self, type):\n raise TypeError(\n f\"Calling qs() from class level is prohibited. Use {self.__name__}.objects instead.\"\n )\n # noinspection PyUnresolvedReferences\n return self._meta.model.objects.filter(pk=self.pk)", "def get_queryset(self):\n\n fl = self.request.QUERY_PARAMS.get('fl', None)\n win = self.request.QUERY_PARAMS.get('win', None)\n win_filter = Q(orderstatus__in=['Shipped', 'Unshipped', 'Processing'], fulfillmentchannel='MFN')\n #win_filter = Q(orderstatus__in=['Unshipped', 'Processing'], fulfillmentchannel='MFN')\n\n queryset = None\n if fl is not None and fl.isdigit():\n logger.info(\"Got filter id: %s\", fl)\n try:\n filter = Filter.objects.get(pk=int(fl))\n if filter:\n ancestor_logic = Q() #Create Q object to hold other query\n #If filter is only root node\n if filter.is_root_node():\n ancestor_logic = pickle.loads(filter.logic) #Deserilize the filter logic\n logger.info(\"Filter has only root node, Logic: %s\", ancestor_logic)\n\n #If filter has parents\n else:\n for filter_data in filter.get_ancestors(False, True): #Get all parents including self\n filter_logic = pickle.loads(filter_data.logic) #Deserilize the filter logic\n if ancestor_logic.__len__()==0:\n ancestor_logic = filter_logic\n else:\n ancestor_logic = ancestor_logic & filter_logic\n logger.info(\"Filter has parents, Logic: %s\", ancestor_logic)\n\n if ancestor_logic:\n queryset = AmazonOrders.objects.filter(ancestor_logic & win_filter) #pass the query object to filter\n logger.info(\"Filter query, Query: %s\", queryset.query)\n\n except Exception as e:\n logger.error(\"In queryset exception : %s\",e)\n elif win is not None:\n logic = None\n if win == 'AFN':\n logic = Q(fulfillmentchannel=win, orderstatus='Shipped')\n elif win in ['Pending', 'Canceled']:\n logic = Q(orderstatus=win)\n elif win == 'Unshipped':\n logic = Q(orderstatus=win)\n\n if logic:\n queryset = AmazonOrders.objects.filter(logic)\n logger.info(\"Win query, Query: %s\", queryset.query)\n\n else:\n #queryset = AmazonOrders.objects.all()\n queryset = AmazonOrders.objects.filter(win_filter)\n logger.info(\"Filter not passed, Processing full Query: %s\", queryset.query)\n\n return queryset", "def filter(self, request, queryset): # NOQA: A003\n pro = request.GET.get(\"production\")\n\n if pro:\n queryset = queryset.filter(production=pro)\n\n queryset = queryset.prefetch_related(\"production\")\n\n return queryset", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.prefetch_related('work__writers')\n qs = qs.prefetch_related('artist')\n qs = qs.prefetch_related('record_label')\n return qs", "def filter(self, filtered=None, **kwargs):\n \"\"\"whose attributes match the given keyword arguments.\n \"\"\"\n if filtered is None:\n filtered = self._objects\n try:\n key, value = kwargs.popitem()\n except KeyError:\n # We're out of filters, return\n return filtered\n\n def get_match(obj):\n return key in obj and obj.get(key) == value\n\n return self.filter(filtered=filter(get_match, filtered), **kwargs)", "def queryset(self, request):\n qs = super(SiteAdmin, self).queryset(request)\n qs = Site.admin.select_related().filter(id__in=qs)\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "def filter(self, *args, **kwargs):\n auth_object = kwargs.pop('auth_object', None)\n\n if auth_object is not None:\n ctype = ContentType.objects.get_for_model(auth_object)\n kwargs['auth_object_content_type'] = ctype\n kwargs['auth_object_id'] = auth_object.pk\n\n subscribed_object = kwargs.pop('subscribed_object', None)\n\n if subscribed_object is not None:\n ctype = ContentType.objects.get_for_model(subscribed_object)\n proxies = SubscribedObjectProxy.objects.filter(content_type=ctype,\n object_id=subscribed_object.pk)\n kwargs['subscribed_object_proxies__in'] = proxies\n\n return super(NotificationChannelQuerySet, self).filter(*args, **kwargs)", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n categories = self.request.query_params.get('categories')\n user = self.request.query_params.get('user')\n queryset = self.queryset\n\n if tags:\n tags_title = self._params(tags)\n queryset = queryset.filter(tags__title__in=tags_title)\n\n if categories:\n categories_title = self._params(categories)\n queryset = queryset.filter(categories__title__in=categories_title)\n\n if user:\n user_id = self._params_to_ints(user)\n queryset = queryset.filter(user__id__in=user_id)\n return queryset", "def get_prefetched_queryset(self, *args, **kwargs):\n\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .prefetch_related(\n \"assignment_related_users\",\n \"agenda_items\",\n \"lists_of_speakers\",\n \"tags\",\n \"attachments\",\n \"polls\",\n \"polls__options\",\n )\n )", "def get_queryset(self):\n # Check if the parameter assigned_only is on the request\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n # Make copy of queryset so we do not modify the original\n queryset = self.queryset\n # If the parameter was passed filter on the book not\n # being specified\n if assigned_only:\n queryset = queryset.filter(book__isnull=False)\n\n # Remove duplicates\n return queryset.filter(\n user=self.request.user\n ).order_by('-name').distinct()", "def post_filter(self, qs):\n return qs", "def filter_queryset(self, request, queryset, view):\n if str(request.query_params.get(\"orgs\")).lower() == \"false\":\n organization_user_ids = OrganizationProfile.objects.values_list(\n \"user__id\", flat=True\n )\n queryset = queryset.exclude(id__in=organization_user_ids)\n\n return queryset", "def choices_queryset_queryset(self, *args, **kwargs):\n import re\n and_split = re.compile('(?:\\s+AND\\s+)')\n qs = []\n if self.choices_queryset and self.field_type in \\\n (\"ModelChoiceField\",\"ModelMultipleChoiceField\"):\n qs = self.choices_queryset.get_all_objects_for_this_type()\n\n if self.choices_queryset_filter:\n filter_args = dict([f.split('=') for f in self.choices_queryset_filter.split(',')])\n\n # testing AND y OR\n # and_split.split(\"name__in=[1,2,4,5, 'AND', ' AND THEN...'] AND id__gt=2\")\n # [\"name__in=[1,2,4,5, 'AND ']\", ' AND ', 'id__gt=2]\n # print and_split.split(self.choices_queryset_filter)\n # filter_args = dict([f.split('=') for f in and_split.split(self.choices_queryset_filter)])\n\n if filter_args:\n qs = qs.filter(**filter_args)\n return qs", "def get_queryset(self):\n queryset = super(BaseViewSet, self).get_queryset()\n user = self.request.user\n return queryset.filter(user=user)", "def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n\n queryset = self.queryset\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n return queryset", "def get_queryset(self):\n target_author = get_object_or_404(CustomUser, username=self.kwargs.get('username', None))\n if self.request.user == target_author:\n return Taxonomy.objects.filter(author=target_author)\n else:\n return Taxonomy.objects.filter(author=target_author).filter(public=True)", "def filtered_by(self, rel_name,\n related_entity):\n return self.weighted_by(rel_name, related_entity)", "def filter_by_model(self, instance):\n content_type = ContentType.objects.get_for_model(instance.__class__)\n object_id = instance.id\n queryset = super(UserTrackerManager, self).filter(\n content_type=content_type, object_id=object_id)\n return queryset", "def join(self, model_or_queryset, *filter_q, **filter_kw):\n join_type = filter_kw.get('_join_type', INNER)\n queryset = super(With, self).join(model_or_queryset, *filter_q, **filter_kw)\n\n # the underlying Django code forces the join type into INNER or a LEFT OUTER join\n alias, _ = queryset.query.table_alias(self.name)\n join = queryset.query.alias_map[alias]\n if join.join_type != join_type:\n join.join_type = join_type\n return queryset", "def get_queryset(self):\n return super().get_queryset().filter(user=self.request.user)", "def get_queryset(self):\n return self.queryset().select_related(\n 'image'\n ).prefetch_related(\n 'authors',\n 'categories',\n )", "def filter_queryset(self, queryset):\n params = self.request.query_params\n\n queryset = super().filter_queryset(queryset)\n\n if common.settings.stock_expiry_enabled():\n\n # Filter by 'expiry date'\n expired_date_lte = params.get('expiry_date_lte', None)\n if expired_date_lte is not None:\n try:\n date_lte = datetime.fromisoformat(expired_date_lte)\n queryset = queryset.filter(expiry_date__lte=date_lte)\n except (ValueError, TypeError):\n pass\n\n expiry_date_gte = params.get('expiry_date_gte', None)\n if expiry_date_gte is not None:\n try:\n date_gte = datetime.fromisoformat(expiry_date_gte)\n queryset = queryset.filter(expiry_date__gte=date_gte)\n except (ValueError, TypeError):\n pass\n\n # Filter by 'stale' status\n stale = params.get('stale', None)\n\n if stale is not None:\n stale = str2bool(stale)\n\n # How many days to account for \"staleness\"?\n stale_days = common.models.InvenTreeSetting.get_setting('STOCK_STALE_DAYS')\n\n if stale_days > 0:\n stale_date = datetime.now().date() + timedelta(days=stale_days)\n\n stale_filter = StockItem.IN_STOCK_FILTER & ~Q(expiry_date=None) & Q(expiry_date__lt=stale_date)\n\n if stale:\n queryset = queryset.filter(stale_filter)\n else:\n queryset = queryset.exclude(stale_filter)\n\n # Exclude stock item tree\n exclude_tree = params.get('exclude_tree', None)\n\n if exclude_tree is not None:\n try:\n item = StockItem.objects.get(pk=exclude_tree)\n\n queryset = queryset.exclude(\n pk__in=[it.pk for it in item.get_descendants(include_self=True)]\n )\n\n except (ValueError, StockItem.DoesNotExist):\n pass\n\n # Filter by \"part tree\" - only allow parts within a given variant tree\n part_tree = params.get('part_tree', None)\n\n if part_tree is not None:\n try:\n part = Part.objects.get(pk=part_tree)\n\n if part.tree_id is not None:\n queryset = queryset.filter(part__tree_id=part.tree_id)\n except Exception:\n pass\n\n # Exclude StockItems which are already allocated to a particular SalesOrder\n exclude_so_allocation = params.get('exclude_so_allocation', None)\n\n if exclude_so_allocation is not None:\n\n try:\n order = SalesOrder.objects.get(pk=exclude_so_allocation)\n\n # Grab all the active SalesOrderAllocations for this order\n allocations = SalesOrderAllocation.objects.filter(\n line__pk__in=[\n line.pk for line in order.lines.all()\n ]\n )\n\n # Exclude any stock item which is already allocated to the sales order\n queryset = queryset.exclude(\n pk__in=[\n a.item.pk for a in allocations\n ]\n )\n\n except (ValueError, SalesOrder.DoesNotExist):\n pass\n\n # Does the client wish to filter by the Part ID?\n part_id = params.get('part', None)\n\n if part_id:\n try:\n part = Part.objects.get(pk=part_id)\n\n # Do we wish to filter *just* for this part, or also for parts *under* this one?\n include_variants = str2bool(params.get('include_variants', True))\n\n if include_variants:\n # Filter by any parts \"under\" the given part\n parts = part.get_descendants(include_self=True)\n\n queryset = queryset.filter(part__in=parts)\n\n else:\n queryset = queryset.filter(part=part)\n\n except (ValueError, Part.DoesNotExist):\n raise ValidationError({\"part\": \"Invalid Part ID specified\"})\n\n # Does the client wish to filter by stock location?\n loc_id = params.get('location', None)\n\n cascade = str2bool(params.get('cascade', True))\n\n if loc_id is not None:\n\n # Filter by 'null' location (i.e. top-level items)\n if isNull(loc_id):\n if not cascade:\n queryset = queryset.filter(location=None)\n else:\n try:\n # If '?cascade=true' then include items which exist in sub-locations\n if cascade:\n location = StockLocation.objects.get(pk=loc_id)\n queryset = queryset.filter(location__in=location.getUniqueChildren())\n else:\n queryset = queryset.filter(location=loc_id)\n\n except (ValueError, StockLocation.DoesNotExist):\n pass\n\n # Does the client wish to filter by part category?\n cat_id = params.get('category', None)\n\n if cat_id:\n try:\n category = PartCategory.objects.get(pk=cat_id)\n queryset = queryset.filter(part__category__in=category.getUniqueChildren())\n\n except (ValueError, PartCategory.DoesNotExist):\n raise ValidationError({\"category\": \"Invalid category id specified\"})\n\n # Does the client wish to filter by BomItem\n bom_item_id = params.get('bom_item', None)\n\n if bom_item_id is not None:\n try:\n bom_item = BomItem.objects.get(pk=bom_item_id)\n\n queryset = queryset.filter(bom_item.get_stock_filter())\n\n except (ValueError, BomItem.DoesNotExist):\n pass\n\n # Filter by company (either manufacturer or supplier)\n company = params.get('company', None)\n\n if company is not None:\n queryset = queryset.filter(Q(supplier_part__supplier=company) | Q(supplier_part__manufacturer_part__manufacturer=company))\n\n return queryset", "def get_queryset(self):\n queryset = self.queryset.all()\n \n #Filter based on query\n query = self.request.query_params.get('q', None)\n if query:\n queryset = queryset.filter(Q(pk__icontains=query) | \n Q(customer__name__icontains=query))\n \n offset = int(self.request.query_params.get('offset', 0))\n limit = int(self.request.query_params.get('limit', settings.REST_FRAMEWORK['PAGINATE_BY']))\n if offset and limit:\n queryset = queryset[offset - 1:limit + (offset - 1)]\n else:\n queryset = queryset[0:50]\n\n queryset = queryset.select_related('acknowledgement',\n 'pdf',\n 'customer',\n 'employee',\n 'project')\n queryset = queryset.prefetch_related('items',\n 'customer__addresses',\n 'items__item')\n \n return queryset", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n ingredient = self.request.query_params.get('ingredient')\n queryset = self.queryset\n if tags:\n tags_id = self._params_to_int(tags)\n queryset = queryset.filter(tags__id__in =tags_id)\n if ingredient:\n ingredient_id = self._params_to_int(ingredient)\n queryset = queryset.filter(ingredient__id__in = ingredient_id)\n\n return queryset.filter(user = self.request.user)", "def process_filters(self, filters, queryset, view):\n return filters", "def filter(self, *arguments, **kwargs):\n from jetengine.query_builder.node import Q, QCombination, QNot\n from jetengine.query_builder.transform import validate_fields\n\n if arguments and len(arguments) == 1 and isinstance(arguments[0], (Q, QNot, QCombination)):\n if self._filters:\n self._filters = self._filters & arguments[0]\n else:\n self._filters = arguments[0]\n else:\n validate_fields(self.__klass__, kwargs)\n if self._filters:\n self._filters = self._filters & Q(**kwargs)\n else:\n if arguments and len(arguments) == 1 and isinstance(arguments[0], dict):\n self._filters = Q(arguments[0])\n else:\n self._filters = Q(**kwargs)\n\n return self", "def get_queryset(self):\n if self.queryset is None:\n return self.model.objects.all()\n return self.queryset", "def filter_queryset(self, queryset):\n # get the queryset (so effectively run the method normally)\n queryset = super().filter_queryset(queryset)\n # now fix it and save it in session data\n queryset = save_session_data(self, queryset)\n return queryset", "def queryset(self, request, queryset):\n if self.value() == 'syndicated':\n return queryset.filter(syndicated_at__isnull=False)\n if self.value() == 'ready_to_syndicate':\n return queryset.filter(ignored_at=None,\n syndicated_at=None,\n longitude_position__isnull=False,\n merchantwebsite__deleted_at=None,\n merchantdoc__deleted_at=None,\n ).distinct()\n if self.value() == 'ignore':\n return queryset.filter(ignored_at__isnull=False)", "def get_queryset(self):\n qs = super(RetiresmartzViewSet, self).get_queryset()\n # Check user object permissions\n user = SupportRequest.target_user(self.request)\n return qs.filter_by_user(user)", "def filter(self, *args, **kwargs):\n # *args are `Q` objects\n for q in args:\n self.query.add_q(q)\n if kwargs:\n self.query.add_q(ql.Q(**kwargs))\n return self", "def get_queryset(self):\n queryset = Company.objects.all().order_by('name')\n\n if self.request.GET.get('supplier', None):\n queryset = queryset.filter(is_supplier=True)\n\n if self.request.GET.get('customer', None):\n queryset = queryset.filter(is_customer=True)\n\n return queryset", "def get_queryset(self):\n filter_kwargs = {self.bound_to_user_field_name: self.request.auth.user}\n return super(BoundToUserMixin, self).get_queryset().filter(**filter_kwargs)", "def get_queryset(self):\n return self._get_base_queryset().filter(deleted__isnull=True)", "def get_queryset(self):\n \n # throw error if no args pass in\n assert self.model or self.queryset, 'No queryset found'\n if self.queryset:\n return self.queryset\n else:\n return self.model.objects.all()", "def queryset(self, request, queryset):\n # 返回文章queryset里面 所有指定作者的文章\n author_id = self.value()\n if author_id:\n return queryset.filter(author__id=author_id)\n else:\n return queryset" ]
[ "0.67854434", "0.6782465", "0.66839457", "0.6680913", "0.646492", "0.63564074", "0.63288736", "0.63143224", "0.63010234", "0.6240037", "0.6125244", "0.60945296", "0.608679", "0.6075394", "0.6073214", "0.60558784", "0.60067743", "0.60016656", "0.5998127", "0.59923685", "0.5961973", "0.59463584", "0.59414476", "0.5927339", "0.5890029", "0.5801646", "0.5800344", "0.5799148", "0.5787586", "0.57637143", "0.57158923", "0.5715131", "0.5704239", "0.5633272", "0.56267184", "0.56062853", "0.55856186", "0.557398", "0.5571936", "0.55583507", "0.5547043", "0.5542426", "0.5540386", "0.5536742", "0.55236584", "0.5497709", "0.54961336", "0.5495913", "0.54938567", "0.5490111", "0.5488922", "0.54842144", "0.54725605", "0.54427844", "0.54310894", "0.54253155", "0.5419738", "0.5407551", "0.53922224", "0.53901887", "0.5374227", "0.5373183", "0.53627497", "0.5356814", "0.534319", "0.5342137", "0.5332187", "0.5330275", "0.53248996", "0.53006274", "0.52905625", "0.5284559", "0.5283927", "0.52723527", "0.5270824", "0.5267442", "0.5253701", "0.5249691", "0.52490187", "0.5248903", "0.52483475", "0.5235964", "0.52174187", "0.521572", "0.52106494", "0.521042", "0.51995945", "0.5196474", "0.51951385", "0.517655", "0.5129035", "0.5118152", "0.5116105", "0.5113794", "0.5106301", "0.50997984", "0.5095329", "0.5076002", "0.5072068", "0.5064754" ]
0.84336936
0
Callback function for metrics_btn A window will popup to display the user metadata of previous user
def display_metrics(self): metrics = client.user_metrics(self.user_name.get()) messagebox.showinfo("Metrics", metrics)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_metrics3(self):\n messagebox.showinfo(\"Processed Image Metrics\", self.pro_metrics)", "def display_metrics2(self):\n messagebox.showinfo(\"Original Image Metrics\", self.raw_metrics)", "def user_labels_prev(*args):\n return _ida_hexrays.user_labels_prev(*args)", "def action_session_user_stats(args, config, db, wdb):\n\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_users AS\n (SELECT DISTINCT\n analysis_session_requests.session_id as session_id,\n analysis_requestlog_combined.user_sid as user_sid\n FROM analysis_requestlog_combined, analysis_session_requests\n WHERE analysis_requestlog_combined.id = analysis_session_requests.request_id\n )\n ''')\n wdb.commit()\n\n # How many sessions did each user have?\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_count_per_user AS (\n SELECT\n analysis_session_users.user_sid,\n count(analysis_session_users.session_id) as session_count\n FROM analysis_session_users, user\n WHERE analysis_session_users.user_sid = user.user_name\n GROUP BY analysis_session_users.user_sid\n );''')\n wdb.commit()\n\n user_ids = db.simple_query('SELECT user_sid FROM analysis_session_users')\n sessions_per_user = collections.Counter(user_ids)\n sessions_per_user['anonymous'] = sessions_per_user[None]\n del sessions_per_user[None]\n\n write_data('user_session_counts', {\n 'data': dict(sessions_per_user.most_common()),\n })\n reverse_counts = collections.Counter(\n sessions_per_user.values()).most_common()\n write_data('user_session_counts_reverse', {\n 'data': list(reverse_counts),\n })", "def _onUsers(self, event):\n dialog = sc.UsersDialog(self)\n dialog.ShowModal()\n dialog.Destroy()\n self.updateUsers()", "def display_user_stats(self):\n\n self.user_frame = tk.LabelFrame(\n self.stats_frame, text=\"User Types\", padx=5, pady=5\n )\n self.user_frame.grid(row=3, padx=5, pady=5, sticky=\"w\")\n self.user_stats_data = tk.Label(self.user_frame, justify=\"left\")\n self.user_stats_data.pack()\n\n self.gender_frame = tk.LabelFrame(\n self.stats_frame, text=\"User Gender\", padx=5, pady=5\n )\n self.gender_frame.grid(row=3, column=1, padx=5, pady=5, sticky=\"w\")\n self.gender_stats_data = tk.Label(self.gender_frame, justify=\"left\")\n self.gender_stats_data.pack()\n\n self.age_frame = stat_display_labels(\n self.stats_frame,\n \"Age Stats\",\n [\n \"Oldest Rider Birth Year:\",\n \"Youngest Rider Birth Year:\",\n \"Most common birth year:\",\n ],\n row=4,\n columnspan=2,\n )\n self.age_stats_data = tk.Label(self.age_frame, justify=\"left\")\n self.age_stats_data.grid(row=0, column=1)", "def on_info_click(self, event):\n def on_close(event, wind):\n wind.Close()\n wind.Destroy()\n event.Skip()\n wind = wx.PopupTransientWindow(self, wx.RAISED_BORDER)\n if self.auto_save.GetValue():\n info = \"'auto-save' is currently selected. Temperature bounds will be saved when you click 'next' or 'back'.\"\n else:\n info = \"'auto-save' is not selected. Temperature bounds will only be saved when you click 'save'.\"\n text = wx.StaticText(wind, -1, info)\n box = wx.StaticBox(wind, -1, 'Info:')\n boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)\n boxSizer.Add(text, 5, wx.ALL | wx.CENTER)\n exit_btn = wx.Button(wind, wx.ID_EXIT, 'Close')\n wind.Bind(wx.EVT_BUTTON, lambda evt: on_close(evt, wind), exit_btn)\n boxSizer.Add(exit_btn, 5, wx.ALL | wx.CENTER)\n wind.SetSizer(boxSizer)\n wind.Layout()\n wind.Popup()", "def back_click(self):\n self.controller.show_account_display_screen(self.us)", "def stats (self, event = None):\r\n \r\n statFrame = StatisticsDialog(parent = self, storyPanel = self.storyPanel, app = self.app)\r\n statFrame.ShowModal()", "def OnButtonOKButton(self):\r\n\t\tmeta = {}\r\n\t\tfor key in self._config.keys():\r\n\t\t\tif self._config[key] != self._configtmp[key]:\r\n\t\t\t\tmeta[key] = self._configtmp[key]\r\n\t\tif meta:\r\n\t\t\t# Since if frame open, wallpaper downloads are delayed,\r\n\t\t\t# the image data is always valid\r\n\t\t\tself._config.update(self._configtmp)\r\n\t\t\tself._config.Save(self._twirlpath)\r\n\t\t\tmeta.update({\"username\":self._config[\"username\"].encode(\"utf-8\"),\r\n\t\t\t\t\"userhash\":self._config[\"userhash\"],\r\n\t\t\t\t\"imageid\":self._config[\"imageid\"]})\r\n\t\t\tnetops.SendMetadata(consts.URL_SEND_META, meta)\r\n\t\tself.Hide()", "def analyze(self):\r\n self.current = 'score'\r\n popup = AnalyzeInterface(self.current_screen).open()", "def showUserStats(self) :\n self.getAllStats()\n self.getNbTotalLevelsPlayed()\n Scenario.messageAllStats(self.level_history[0].created_at)\n self.showBestStats()\n self.showWorstStats()\n self.showAverageStats()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def show_counter_screen():\n\n # Get and clear active screen\n clear()\n create_title(\"Here is the counter:\")\n\n obj = {\"counter\": 0}\n\n counter_lbl = create_title(\"%d\" % obj[\"counter\"])\n counter_lbl.set_y(150-counter_lbl.get_height()//2)\n\n def plus_one(btn, e):\n if e == lv.EVENT.RELEASED:\n obj[\"counter\"] += 1\n counter_lbl.set_text(\"%d\" % obj[\"counter\"])\n\n def minus_one(btn, e):\n if e == lv.EVENT.RELEASED:\n obj[\"counter\"] -= 1\n counter_lbl.set_text(\"%d\" % obj[\"counter\"])\n\n btn = create_button(\"+1\", y=150-BTN_HEIGHT//2, callback=plus_one)\n btn.set_width(100)\n btn.set_x(HOR_RES-PADDING-100)\n\n btn = create_button(\"-1\", y=150-BTN_HEIGHT//2, callback=minus_one)\n btn.set_width(100)\n btn.set_x(PADDING)", "def back_click(self):\n self.controller.show_user_menu_screen(self.us)", "def btn_display_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n del image_fio\n del img_hist_fio\n self.show_as_waiting(False)", "async def userinfo(self, ctx: \"IceTeaContext\", target: discord.Member = None):\n target = target or ctx.author\n target_data = ctx.author_data if target == ctx.author else await ctx.get_user_data(target)\n if target_data:\n nicknames = await target_data.get_nicknames()\n else:\n nicknames = []\n shared_servers = len([member for member in ctx.bot.get_all_members() if member == target])\n embed = discord.Embed(title=f\"{target.nick or target.name} Profile\")\n embed.set_author(name=f\"{target.name} ({target.id})\", icon_url=target.avatar_url)\n embed.set_thumbnail(url=target.avatar_url)\n embed.add_field(name=\"Shared Servers\", value=f\"{shared_servers} Shared\")\n embed.add_field(name=\"Created\",\n value=f\"\"\"{timeago.format(target.created_at)} ({target.created_at.strftime(\"%b %d, %Y\")})\"\"\")\n embed.add_field(name=\"Joined\",\n value=f\"\"\"{timeago.format(target.joined_at)} ({target.joined_at.strftime(\"%b %d, %Y\")})\"\"\")\n embed.set_footer(text=\"Last Spoke In server\")\n if target_data:\n embed.timestamp = target_data.last_spoke\n else:\n embed.timestamp = ctx.message.created_at\n if len(nicknames) > 0:\n embed.add_field(name=\"Nicknames\", value=\" , \".join(str(nick) for nick in nicknames[:5]), inline=False)\n embed.add_field(name=\"Roles\", value=\" , \".join([role.name for role in target.roles[:5] if len(role.name) > 0]),\n inline=False)\n if target.activity:\n if isinstance(target.activity, discord.Spotify):\n embed.add_field(name=\"Currently Listening to\",\n value=f\"**{target.activity.title}** by {target.activity.artist} \")\n else:\n embed.add_field(name=\"Currently Playing Since\",\n value=f\"{target.activity.name}\\n{target.activity.details}\\n{target.activity.state}\")\n await ctx.send(embed=embed)", "def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)", "def get_user_notifications(self, login):", "def display_session(self):\n self.user['display_manager'] = {'name': self.user['display']}\n if self.user['display'] is not None:\n\n # Set display manager name\n self.user['display_manager']['name'] = \\\n self.packages['display_manager']['name'][self.user['display']]\n\n # Append display manager packages\n self.user['display_manager']['packages'] = \\\n self.packages['display_manager']['packages'][self.user['display']]\n\n # Append display manager greeter\n if self.user['greeter'] is not None:\n self.user['display_manager']['packages'] += ' {x}'.format(\n x=self.packages['greeter']['packages'][self.user['greeter']])\n\n self.user['display_manager']['session'] = \\\n self.packages['greeter']['session'][self.user['greeter']]", "def test_TC_Users_200819_3(self):\n self.log.info(\"*#\" * 20)\n self.log.info(\"test_TC_Users_200819_3 started\")\n self.log.info(\"*#\" * 20)\n self.us.gotoUsers()\n self.us.clickViewProfile()\n result = self.us.verifyViewProfile()\n self.ts.markFinal(\"test_TC_Users_200819_3\", result, \"View profile button Verification\")", "def show_callback(self, client_data):\n pass", "def show_user_info(self):\n name = self.get_user_name()\n print(f'Name: {name.title()}')\n print(f'Age: {self.age}')\n print(f'Gender: {self.gender.title()}')\n print(f'Mobile: {self.m_number}')", "def user_cmts_prev(*args):\n return _ida_hexrays.user_cmts_prev(*args)", "def describe_user(self):\n print(\"We have stored next information about user \" +\n self.first_name.title() + \" \" + self.last_name.title() +\n \":\")\n print(\"- Username: \" + self.username)\n print(\"- Age: \" + str(self.age))\n print(\"- Location: \" + self.location.title())", "def user_history(self):\n self.query_1 = \"SELECT * FROM orders WHERE user_id=%s\"\n self.input_1 = (self.user_id,) \n self.event = \"user_history\"\n self.message = \"Order history fetched successfully.\"\n self.error = \"Unable to fetch order history.\"", "def opm_popup(opmvers, text, nrow):\n\n layout1 = [[sg.Multiline(text, size=(80, nrow), background_color='white', text_color='darkgreen')],\n [sg.CloseButton('OK')]]\n window1 = sg.Window('OPMRUN - Flow Job Scheduler ' + opmvers, layout=layout1)\n window1.Read()\n return ()", "def describe_my_user_profile():\n pass", "def displayAudit():\n\tauditResults=runAudit(masterPod.currentMasterPod)\n\t#Get results and duplicates\n\tallResults=auditResults[\"ResultDict\"]\n\tduplicateResults=auditResults[\"DuplicateDict\"]\n\n\t#Display score\n\tauditScore=auditResults[\"Overall\"]\n\tauditScoreVar.set(str(auditScore)+\"%\")\n\n\tif auditScore >= 60:\n\t\tauditScoreLabel.update(fg=mainGreenColour)\n\telif auditScore >= 45:\n\t\tauditScoreLabel.update(fg=mainOrangeColour)\n\telse:\n\t\tauditScoreLabel.update(fg=mainRedColour)\n\n\t#Go through the results\n\tfor itemName in auditTable.rowInfo:\n\t\tif itemName in auditResults:\n\t\t\t#Update the label\n\t\t\tauditTable.updateRow(itemName,auditResults[itemName])\n\n\n\t#Update the buttons to they update on clicks\n\tfor rowText in auditTable.buttonInfo:\n\t\tif rowText == \"All accounts\":\n\t\t\tauditTable.updateButtonCommand(rowText,lambda: showAuditResults(allResults))\n\t\telif rowText == \"Strong Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Strong']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults: showAuditResults(s))\n\n\t\telif rowText == \"Average Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Medium']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults : showAuditResults(s))\n\n\t\telif rowText == \"Weak Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Weak']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults: showAuditResults(s))\n\n\t\telif rowText == \"Duplicates\":\n\t\t\tauditTable.updateButtonCommand(rowText,lambda: showAuditResults(duplicateResults))\n\n\t#Clear the tree\n\tauditResultsTree.delete(*auditResultsTree.get_children())", "def populating_popup(self, *args):\n return _ida_hexrays.Hexrays_Hooks_populating_popup(self, *args)", "def views_by_browser_verbose(self, tab): \n \n get_browsers = Button(tab, text='Get browsers',bg='#999FA5', command= lambda: self.get_browser_result_verbose())\n get_browsers.grid(column=0, row=0, padx=30, pady=30)", "def showInfoWindow():\n\treturn 0", "def profileToolClicked(self):\n self.openDock()\n # Set the profile map tool\n self.profile_tool.setActive()", "def show_data():", "def onShowed(self):\n self.parent.actionTagTwo=\"\"\n pass", "def go_back(self):\n self.displayUi = LoginScreen()\n DB.log_out()\n self.hide()\n self.displayUi.show()", "def print_user_actions():\n print\n print \"=\" * 80\n print \" User Actions\"\n print \"=\" * 80", "def update_user_metrics(self,user_id:int)->None:\n with connection.cursor() as cursor:\n cursor.execute(f\"SELECT update_user_metrics({user_id})\")\n ##TODO: this should return something ", "def btn_follow_clicked(self, widget, data=None):\n print \"follow clicked\"\n #Going to put random stuff here.", "def ev_windowshown(self, event: WindowEvent) -> None:", "def processUserAction(self, user_action):\n self.history[\"user_action\"] = user_action\n dialogue_act = user_action[\"action\"]\n self.current_function = None\n self.dont_know = False\n\n\n def provideQuery():\n self.query = user_action[\"query\"]\n self.query_vector = self.dataset.getVectorForQuery(self.query)\n self.dataset.updateResults(query = self.query)\n self.result_index=0\n self.list_current = False\n return user_action\n\n def provideKw():\n self.keywords[\"provided\"].add(user_action[\"keyword\"])\n self.keywords[\"rejected\"].discard(user_action[\"keyword\"])\n self.dataset.updateResults(keywords = self.keywords)\n self.list_current = False\n self.result_index=0\n return user_action\n\n def rejectKws():\n self.keywords[\"provided\"].difference_update(user_action[\"keywords\"])\n self.keywords[\"rejected\"].update(user_action[\"keywords\"])\n self.dataset.updateResults(keywords = self.keywords)\n self.list_current = False\n return user_action\n\n def rejectFunctions():\n self.functions_rejected.update(user_action[\"functions\"])\n self.dataset.updateResults(not_functions = self.functions_rejected)\n self.list_current = False\n return user_action\n\n def eliSugg():\n return user_action\n\n def eliInfo():\n self.current_function = user_action[\"function\"]\n return user_action\n\n def eliInfoAll():\n self.current_function = user_action[\"function\"]\n return user_action\n\n def changePage():\n return user_action\n\n def dontKnow():\n self.dont_know = True\n\n\n switcher = {\n 'provide-query':provideQuery,\n 'provide-kw':provideKw,\n 'reject-kws':rejectKws,\n 'reject-functions':rejectFunctions,\n 'eli-sugg':eliSugg,\n 'eli-sugg-all':eliSugg,\n 'eli-info':eliInfo,\n 'eli-info-all':eliInfo,\n 'change-page':changePage,\n 'dont-know':dontKnow\n }\n\n if dialogue_act in switcher:\n return switcher[dialogue_act]()\n else: return user_action", "def user_numforms_prev(*args):\n return _ida_hexrays.user_numforms_prev(*args)", "def show_popup(self, data):\r\n store = get_store()\r\n self.ids.inlayout.rows = 1\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Viscosity :\", \"FFFFFF\")))\r\n value = round(store.get('Viscosity')[\"value\"], 2)\r\n viscotext = str(value)+\" \"+store.get('Viscosity')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(viscotext, \"FFFFFF\")))\r\n self.open()", "def show_popup(self, data):\r\n store = get_store()\r\n if data[\"errcode\"] == 2:\r\n self.ids.inlayout.rows = 12\r\n else:\r\n self.ids.inlayout.rows = 11\r\n #if there is an error to print\r\n if data[\"errcode\"] == 2:\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Warning :\", \"FF0000\")))\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(data[\"errtext\"], \"FF0000\")))\r\n #Hydrodynamic injection\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Hydrodynamic injection :\", \"FFFFFF\")))\r\n value = round(store.get('Hydrodynamicinjection')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Hydrodynamicinjection')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Capillary volume\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Capillary volume :\", \"BFBFBF\")))\r\n value = round(store.get('Capillaryvolume')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Capillaryvolume')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Capillary volume to window\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Capillary volume to window :\", \"FFFFFF\")))\r\n value = round(store.get('Capillaryvolumetowin')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Capillaryvolumetowin')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Injection plug length\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Injection plug length :\", \"BFBFBF\")))\r\n value = round(store.get('Injectionpluglen')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectionpluglen')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Plug (% of total length)\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Plug (% of total length) :\", \"FFFFFF\")))\r\n value = round(store.get('Pluglenpertotallen')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Pluglenpertotallen')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Plug (% of length to window)\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Plug (% of length to window) :\", \"BFBFBF\")))\r\n value = round(store.get('Pluglenperlentowin')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Pluglenperlentowin')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Injected analyte \r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Injected analyte :\", \"FFFFFF\")))\r\n value = round(store.get('Injectedanalyteng')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectedanalyteng')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=\"\"))\r\n value = round(store.get('Injectedanalytepmol')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectedanalytepmol')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Injection pressure\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Injection pressure :\", \"BFBFBF\")))\r\n value = round(store.get('Injectionpressure')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Injectionpressure')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #Flow rate\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Flow rate :\", \"FFFFFF\")))\r\n value = round(store.get('Flowrate')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Flowrate')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"FFFFFF\")))\r\n #Field strength\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Field strength :\", \"BFBFBF\")))\r\n value = round(store.get('Fieldstrength')[\"value\"], 2)\r\n value = str(value)+\" \"+store.get('Fieldstrength')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(value, \"BFBFBF\")))\r\n #open the popup\r\n self.open()", "def update_ui(self):\r\n pass\r\n windowdata = self.window.get_data(self.__class__.__name__)\r\n windowdata['action_group'].get_action('UndoClose').set_sensitive(len(self.tabs_closed) > 0)\r\n windowdata['action_group'].get_action('CloseAll').set_sensitive(self.notebook.get_n_pages() > 0)\r\n windowdata['action_group'].get_action('CloseOthers').set_sensitive(self.notebook.get_n_pages() > 1)", "def user(self):", "def user_changes(self, user, what=None):\n pass", "def run(self):\n self.user_t = None\n self.users.show()\n self.connect()", "def manage_info():", "def onPrevious(self, event):\n\t\tself.previousPicture()", "def on_quit(self):\n\n def confirm(dt):\n \"\"\"\n This method restores operating system settings to default if user is using Linux and stops app.\n :param dt: It is for handling callback input.\n :return:\n \"\"\"\n\n database_api.signOut(Cache.get(\"info\",\n \"token\"\n ),\n Cache.get(\"info\",\n \"nick\"\n )\n )\n\n if platform.system() == \"Linux\":\n os.system(\"sh func/sh/restore.sh\")\n\n App.get_running_app().stop()\n\n popup_content = FloatLayout()\n popup = Popup(title=\"Quit\",\n content=popup_content,\n separator_color=[140 / 255., 55 / 255., 95 / 255., 1.],\n size_hint=(None, None),\n size=(self.width / 5, self.height / 5)\n )\n popup_content.add_widget(Label(text=\"Are you sure?\",\n color=(1, 1, 1, 1),\n font_name=\"data/font/CaviarDreams.ttf\",\n font_size=self.width / 50,\n pos_hint={\"center_x\": .5, \"center_y\": .625}\n )\n )\n popup_content.add_widget(Button(text=\"Yes\",\n font_name=\"data/font/LibelSuit.ttf\",\n font_size=self.height / 40,\n background_normal=\"data/img/widget_green.png\",\n background_down=\"data/img/widget_green_select.png\",\n size_hint_x=.5,\n size_hint_y=None,\n height=self.height / 25,\n pos_hint={\"center_x\": .25, \"y\": 0},\n on_release=confirm\n )\n )\n popup_content.add_widget(Button(text=\"No\",\n font_name=\"data/font/LibelSuit.ttf\",\n font_size=self.height / 40,\n background_normal=\"data/img/widget_red.png\",\n background_down=\"data/img/widget_red_select.png\",\n size_hint_x=.5,\n size_hint_y=None,\n height=self.height / 25,\n pos_hint={\"center_x\": .75, \"y\": 0},\n on_release=popup.dismiss\n )\n )\n popup.open()", "def update_info(self):\n self.m_canvas.master.m_informations_displayer.set_operations(\n self.m_current_index\n )\n self.m_canvas.master.m_informations_displayer.set_time(\n self.m_history[self.m_current_index].m_passed_time\n )", "def Return():\n confirm_frame.forget()\n self.LoadLogInWindow()", "def top10(userid, args):\r\n if popuplib.isqueued(\"sourcerpg_rpgtop5\", userid):\r\n return\r\n popuplib.send('sourcerpg_rpgtop5', userid)", "def refresh_screen(self):", "def btn_equalize_hist_callback(self):\n self.show_as_waiting(True)\n self.image_proc_selected('Histogram Equalization')\n self.show_as_waiting(False)", "def on_preferences(self, evt):\n # Passing `self` sets the main window as the parent window\n self.prefs.Show(self)\n evt.Skip()", "def on_new_google_user(data):\n print(\"Got an event for new google user input with data:\", data)\n push_new_user_to_db(data[\"name\"], data[\"email\"], data[\"pic\"], data[\"room\"])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL", "def Return():\n forgot_frame.forget()\n self.LoadLogInWindow()", "def show_users():\n return 'hehe'", "def OnShowLog(self, event):\n dlg = LogViewer(self)\n dlg.OnLogRefresh(event)\n dlg.ShowModal()\n dlg.Destroy()", "def _login_btn_clicked(self):\n username = self.entry_username.get()\n conn = pymysql.connect(host='localhost', user='root', password='######', db='#####')\n with conn:\n a = conn.cursor()\n a.execute('SELECT User_ID FROM user WHERE UserName = %s;',(username))\n user_data = a.fetchall()\n a.close()\n conn.close()\n if user_data == ():\n tm.showerror(\"Login error\", \"Incorrect username\")\n else:\n found_user = AppFrame._format_data(user_data) #User_ID\n u_type_ID = (AppFrame._check_user_type(found_user)[0]) #(user_type)_ID\n user_type = (AppFrame._check_user_type(found_user)[1])\n AppFrame.my_jobs_frame(root,username, u_type_ID, user_type)", "def after_record(self):\n self.record_label.set(\"Re-Record\")\n self.button_2['state'] = \"normal\"", "def OnButtonLoginOKButton(self, event):\r\n\t\tself.OnButtonOKButton()", "def show_results ():\n #Total volume\n vol = tkinter.Label\\\n (text= (\"Total volume: \" + str (ice_volume) + \" m\\u00b2\"))\n vol.pack ()\n #Total mass\n mass = tkinter.Label\\\n (text= (\"Total mass: \" + str (ice_mass) + \" kg\"))\n mass.pack ()\n #Towability\n print (\"Calculating towability\")\n if ice_mass > 36000000:\n tow = tkinter.Label (text = \"Iceberg cannot be towed\")\n else:\n tow = tkinter.Label (text = \"Iceberg can be towed\")\n print (\"Towability calculated\")\n tow.pack ()\n #Disable button after 1 click\n #Code based on https://www.youtube.com/watch?v=QfTo3rK3e48\n results_btn ['state'] = 'disabled'", "def goBack(self):\n self.displayUi = LoginScreen()\n self.hide()\n self.displayUi.show()", "def displayMl(self, *args):\n\t\tself.callMethod(('ManialinkManager', 'displayManialinkToLogin'), *args)", "def google_profile_extra_details(backend, details, response, user=None, is_new=False,\n *args, **kwargs):\n profile = {}\n\n logger.info('Pipeline logging in with google-oauth2')\n\n if user is not None:\n client_profile = user.profile\n # save all updated fields\n if isinstance(response, dict):\n logger.info('Pipeline.google updating client.profile_data from google %s - %s' % (user, response,) )\n\n client_profile.profile_data.update(response) \n\n client_profile.save(update_fields=['profile_data'])", "def onShow(self):\n pass", "def _on_rfid_event(self):\n self.logger.info(\"Got user %s\" % self.user.name)\n self.hide_banner()\n self.set_user(self.user.name, self.user.balance, self.user.credit)\n self._request_redraw()\n\n return self.sm.state", "def capture_user_input(self):\n\n self.ratings[self.current_unit_id] = self.UI.get_ratings()\n self.notes[self.current_unit_id] = self.UI.user_notes", "def OnButtonRateOKButton(self, event):\r\n\t\tself.OnButtonOKButton()", "def btn_compare_callback(self):\n self.show_as_waiting(True)\n mrs2_ids = self.tbl_images.get_mrs_ids(2)\n mrs2_names = self.tbl_images.get_mrs_names(2)\n\n for id, name in zip(mrs2_ids, mrs2_names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n self.img_displayer.new_display(image_fio, name)\n self.show_as_waiting(False)", "def view_users(stdscr):\n stdscr.clear()\n safe_put(stdscr, \"* marks a user online at last update. Hit any key to return to menu.\", (2, 1))\n row = 4\n for user in taunet.users.all():\n if user.is_on:\n safe_put(stdscr, \"*\", (row, 1))\n safe_put(stdscr, user.name, (row, 3))\n row += 1\n stdscr.refresh()\n\n # Wait for any key, then clear and return to menu.\n stdscr.getch()\n stdscr.clear()\n stdscr.refresh()", "def _on_get_user_info(self, callback, session, user):\n logging.debug('user data from github ' + str(user))\n if user is None:\n callback(None)\n return\n callback({\n \"id\": user[\"id\"],\n \"login\": user[\"login\"],\n \"name\": user.get(\"name\"),\n \"email\": user.get(\"email\"),\n \"access_token\": session[\"access_token\"],\n })", "def OnOk(self, event = None):\n ##Close.\n self.UpdateData()\n self.Hide()\n self.Destroy()", "def Data_source_page(): \n layout = [\n [sg.Button('Property issue dates'),\n sg.Button('Current property status')],\n [sg.Button('Number of property owners'), sg.Button('Upload new data')],\n [sg.Button('Logout')]]\n window = sg.Window('Data Source Page', layout, finalize=True,\n size=(500, 150), element_justification='c')\n\n event, values = window.read()\n print(event, values)\n\n if event == None or event == 'Exit Application':\n window.close()\n if event == 'Property issue dates':\n window.close()\n dataexplorerscreens.DataExplorerScreen1()\n if event == 'Current property status':\n window.close()\n dataexplorerscreens.DataExplorerScreen2()\n if event == 'Number of property owners':\n window.close()\n dataexplorerscreens.DataExplorerScreen3()\n if event == 'Upload new data':\n window.close()\n uploadnewdata.Upload_new_data_page()\n if event == 'Logout':\n window.close()\n login.login_main()", "def loginAfterFeed():\n label = loginFunc(Camera())\n print(\"loginAfterFeed label:\", label)\n if label is None:\n return json.dumps({\"name\":\"error\"})\n else:\n return json.dumps({\"name\":label})", "def __window_back(self):\n pass", "def info_loop(self, widget):\n\n if (self.username_changed_id is None or\n self.hostname_changed_id is None):\n return\n\n if (widget is not None and widget.get_name() == 'fullname' and\n not self.username_edited):\n self.username.handler_block(self.username_changed_id)\n new_username = misc.utf8(widget.get_text().split(' ')[0])\n new_username = new_username.encode('ascii', 'ascii_transliterate')\n new_username = new_username.decode().lower()\n new_username = re.sub('^[^a-z]+', '', new_username)\n new_username = re.sub('[^-a-z0-9_]', '', new_username)\n self.username.set_text(new_username)\n self.username.handler_unblock(self.username_changed_id)\n elif (widget is not None and widget.get_name() == 'username' and\n not self.hostname_edited):\n self.hostname.handler_block(self.hostname_changed_id)\n t = widget.get_text()\n if t:\n self.hostname.set_text(re.sub(r'\\W', '', t) + self.suffix)\n self.hostname.handler_unblock(self.hostname_changed_id)\n\n # Do some initial validation. We have to process all the widgets so we\n # can know if we can really show the next button. Otherwise we'd show\n # it on any field being valid.\n complete = True\n\n if self.fullname.get_text():\n self.fullname_ok.show()\n else:\n self.fullname_ok.hide()\n\n text = self.username.get_text()\n if text:\n errors = check_username(text)\n if errors:\n self.username_error(make_error_string(self.controller, errors))\n complete = False\n else:\n self.username_ok.show()\n self.username_error_label.hide()\n else:\n self.username_ok.hide()\n self.username_error_label.hide()\n complete = False\n\n password_ok = validation.gtk_password_validate(\n self.controller,\n self.password,\n self.verified_password,\n self.password_ok,\n self.password_error_label,\n self.password_strength,\n self.allow_password_empty,\n )\n\n complete = complete and password_ok\n\n txt = self.hostname.get_text()\n self.hostname_ok.show()\n if txt:\n errors = check_hostname(txt)\n if errors:\n self.hostname_error(make_error_string(self.controller, errors))\n complete = False\n self.hostname_ok.hide()\n else:\n self.hostname_ok.show()\n self.hostname_error_label.hide()\n else:\n complete = False\n self.hostname_ok.hide()\n self.hostname_error_label.hide()\n\n self.controller.allow_go_forward(complete)", "def btn_display_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n self.img_displayer.new_display(image_fio, name)\n self.show_as_waiting(False)", "def double_registered_guest(win_title, msg, warning, font, warning_font, button_font, n_current_find=1, n_names=1):\r\n if n_names > 1:\r\n title = [sg.Text(str(n_current_find) + '/' + str(n_names), font=font)]\r\n if n_current_find < n_names:\r\n last_button = 'Next'\r\n else:\r\n last_button = 'Cancel'\r\n else:\r\n title = ''\r\n last_button = 'Cancel'\r\n\r\n layout = [title,\r\n [sg.Text(msg, font=font)],\r\n [sg.Text(warning, font=warning_font, text_color='red')],\r\n [sg.Button('Print', font=button_font),\r\n sg.Button(last_button, font=button_font)]]\r\n win = sg.Window(win_title, layout, modal=True, grab_anywhere=True, enable_close_attempted_event=True)\r\n event, value = win.read()\r\n if event == sg.WINDOW_CLOSE_ATTEMPTED_EVENT or event == 'Next':\r\n event = \"Cancel\"\r\n win.close()\r\n return event", "async def userinfo(self, ctx, user: discord.Member = None):\n if user is None:\n user = ctx.author\n\n if user.activity is None:\n activity = \"None\"\n else:\n activity = user.activity.name\n\n warns = await self.bot.pool.fetchval(\"SELECT COUNT(*) FROM modcases WHERE caseuserid = $1 AND guildid = $2 AND casetype = $3\", user.id, user.guild.id, \"Warn\")\n kicks = await self.bot.pool.fetchval(\"SELECT COUNT(*) FROM modcases WHERE caseuserid = $1 AND guildid = $2 AND casetype = $3\", user.id, user.guild.id, \"Kick\")\n bans = await self.bot.pool.fetchval(\"SELECT COUNT(*) FROM modcases WHERE caseuserid = $1 AND guildid = $2 AND casetype = $3\", user.id, user.guild.id, \"Ban\")\n mutes = await self.bot.pool.fetchval(\"SELECT COUNT(*) FROM modcases WHERE caseuserid = $1 AND guildid = $2 AND casetype = $3\", user.id, user.guild.id, \"Mute\")\n\n embed=discord.Embed(title = f\"{user.name}'s Information\", color = discord.Color.blurple())\n embed.add_field(name = \"Name:\", value = user.mention)\n embed.add_field(name = \"Name Hash:\", value = user.name)\n embed.add_field(name = \"Nickname:\", value = user.nick)\n embed.add_field(name = \"Account Created:\", value = user.created_at.strftime(\"%m-%d-%Y\"))\n embed.add_field(name = \"Joined Server At:\", value = user.joined_at.strftime(\"%m-%d-%Y\"))\n embed.add_field(name = \"ID:\", value = user.id)\n embed.add_field(name = \"Status\", value = user.status)\n embed.add_field(name = \"Activity:\", value = activity)\n embed.add_field(name = \"Highest Role\", value = user.top_role.mention)\n embed.add_field(name = ( \"​\" ), value = ( \"​\" ), inline = False)\n embed.add_field(name = \"Kicks:\", value = kicks)\n embed.add_field(name = \"Bans:\", value = bans)\n embed.add_field(name = \"Warns:\", value = warns)\n embed.add_field(name = \"Mutes:\", value = mutes)\n await ctx.send(embed=embed)", "def on_pushButton_query_account_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def __metadata_update(self, value):\n if value:\n try:\n self.details.original_widget = TextWidget('Updating metadata files. Please, wait...')\n self.execution_manager.close()\n except QMapError as e:\n self.details.original_widget = TextWidget(e)\n self.__back_to_main()", "def delete_user_summary(name):\r\n delete_memoized(get_user_summary, name)", "def clickDetails(self):\n self.waitForElement(locator=self._userProfile_detailsBtn, locatorType=\"xpath\")\n element = self.getElementList(locator=self._userProfile_detailsBtn, locatorType=\"xpath\")\n self.elementClick(element=element[0])\n pp.time.sleep(2)", "def about_developer(self):\r\n self.pop_window(title=\"About\", \r\n msg=\"ChikonEye Version: 2.0.1 \\nDeveloper Info:\\nName : Ashraf Minhaj \\nEmail : [email protected] \\nsite : ashrafminhajfb.blogspot.com \\nyouTube : fusebatti\")", "def user_popup(request):\n try:\n return _user_popup(request)\n except Exception as err:\n logging.exception('Exception in user_popup processing:')\n # Return HttpResponse because the JS part expects a 200 status code.\n return HttpHtmlResponse(\n '<font color=\"red\">Error: %s; please report!</font>' %\n err.__class__.__name__)", "def user_popup(request):\n try:\n return _user_popup(request)\n except Exception as err:\n logging.exception('Exception in user_popup processing:')\n # Return HttpResponse because the JS part expects a 200 status code.\n return HttpHtmlResponse(\n '<font color=\"red\">Error: %s; please report!</font>' %\n err.__class__.__name__)", "def RequestUserAttention(self, pane_window):\r\n \r\n # try to find the pane\r\n paneInfo = self.GetPane(pane_window)\r\n if not paneInfo.IsOk():\r\n raise Exception(\"Pane window not found\")\r\n\r\n dc = wx.ClientDC(self._frame)\r\n\r\n # if the frame is about to be deleted, don't bother\r\n if not self._frame or self._frame.IsBeingDeleted():\r\n return\r\n \r\n if not self._frame.GetSizer():\r\n return\r\n\r\n for part in self._uiparts:\r\n if part.pane == paneInfo:\r\n self._art.RequestUserAttention(dc, self._frame, part.pane.caption, part.rect, part.pane)\r\n self._frame.RefreshRect(part.rect, True)\r\n break", "def __init__(self, user, enabled=True):\n super().__init__()\n self.setObjectName(\"user-profile\")\n self.enabled = enabled\n self.setProperty(\"follow-mouse\", enabled)\n\n image, label = _get_visuals(user)\n\n grid = QGridLayout(self)\n i = QLabel()\n i.setPixmap(image)\n i.setAlignment(Qt.AlignCenter)\n\n text = label\n text.setAlignment(Qt.AlignCenter)\n\n grid.addWidget(i, 0, 0)\n grid.addWidget(text, 1, 0)", "def user(self):\n pass", "def set_metrics(self):", "def get_user(self, x, y, width, height):\n\n \"\"\" Add a warning window that notifies the player that a username already exists\n and whether they will still want to proceed \"\"\"\n\n def enter():\n \"\"\" Hey new user! / Welcome back, <username>! Your last attempt was on... \"\"\"\n pass\n\n def raise_above_all(window):\n window.attributes('-topmost', 1) # put the username box on top of all other windows\n\n def username_exists():\n \"\"\" If username exists...\"\"\"\n username_exists_box = Tk()\n username_exists_box.title(\"Player already exists\")\n\n username_box = Tk() # create a new window for username box\n username_box.title(\"Player Username\") # name the box \n username_box['background'] = '#8cab9c' # set a background colour\n\n # placing the box\n x, y = 300, 400 # box width and height\n pos_horizontally = int(self.monitor_width / 2 - (x / 2)) # finding co-ordinates to place the box \n pos_vertically = int(self.monitor_height / 2 - (y / 2)) # in the middle of self.master\n res = \"{}x{}+{}+{}\"\n username_box.geometry(res.format(x, y, pos_horizontally, pos_vertically)) # pos in the middle of the screen\n\n # Username Box widgets\n label_text = \"Username must be between 3-15 characters \\nValid characters: a-z 0-9 _&$£-.\\nPlease enter your username:\\n\"\n label = Label(username_box, wraplength=180, justify=\"center\", bg='#617D6F', fg=\"#ffffff\", text=label_text)\n label.place(height=200, width=180, relx=0.5, rely=0.4, anchor='center')\n\n input_box = Text(username_box, font=('Arial', 12), bg=\"#F8F8FF\") # box where user enters their username\n input_box.place(height=25, width=120, relx=0.5, rely=0.5, anchor='center')\n\n enter_button = Button(username_box, text=\"Enter\", command=enter) # creating a button to submit username\n enter_button.place(height=30, width=80, relx=0.5, rely=0.6, anchor='center')\n\n exit_button = Button(username_box, text=\"Exit\", command=username_box.destroy) # exits the program\n exit_button.place(height=30, width=60, relx=0.5, rely=0.7, anchor='center')\n \"\"\" You have to do username restrictions:\n isalnum(), &, %, _ <- use regex\n lenght: 3-15\n\n https://www.geeksforgeeks.org/python-program-check-string-contains-special-character/\n\n convert it to lowercase\n \"\"\"\n raise_above_all(username_box)", "def mine(request):\n request.user_to_show = request.user\n return _show_user(request)", "def mine(request):\n request.user_to_show = request.user\n return _show_user(request)", "def show_popup(self, data):\r\n store = get_store()\r\n self.ids.inlayout.rows = 1\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(\"Conductivity :\", \"FFFFFF\")))\r\n value = round(store.get('Conductivity')[\"value\"], 2)\r\n conductivitytext = str(value)+\" \"+store.get('Conductivity')[\"unit\"]\r\n self.ids.inlayout.add_widget(CEToolBoxLabel(text=add_color(conductivitytext, \"FFFFFF\")))\r\n self.open()", "def users():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Users', level=1)\r\n users = get_qlik_sense.get_users()\r\n num_of_users = len(users)\r\n table = document.add_table(rows=num_of_users+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'userId'\r\n row.cells[1].text = 'userDirectory'\r\n row.cells[2].text = 'name'\r\n row.cells[3].text = 'roles'\r\n row.cells[4].text = 'inactive'\r\n row.cells[5].text = 'removed externally'\r\n row.cells[6].text = 'blacklisted'\r\n for user in range(num_of_users):\r\n row = table.rows[user+1]\r\n row.cells[0].text = str(users[user][0])\r\n row.cells[1].text = str(users[user][1])\r\n row.cells[2].text = str(users[user][2])\r\n row.cells[3].text = str(users[user][3])\r\n row.cells[4].text = str(users[user][4])\r\n row.cells[5].text = str(users[user][5])\r\n row.cells[6].text = str(users[user][6])\r\n document.add_page_break()", "def OnFrameShow(self):\r\n\r\n\t\t# Update local copy of config\r\n\t\tself._configtmp.update(self._config)\r\n\r\n\t\t# Fix notebook background color when switching themes in XP\r\n\t\tself.notebookApp.SetBackgroundColour(\\\r\n\t\t\tself.notebookApp.GetThemeBackgroundColour())\r\n\r\n\t\t# Set flag\r\n\t\tself.toggleButtonRateFlag.SetValue(self._configtmp[\"flagimage\"])\r\n\r\n\t\t# Set ratings\r\n\t\tself._iconstars = [icons.getGrayStarBitmap(),\r\n\t\t\ticons.getYellowStarBitmap(),\r\n\t\t\ticons.getGrayOrangeStarBitmap(),\r\n\t\t\ticons.getYellowOrangeStarBitmap()]\r\n\t\tself.SetStars()\r\n\r\n\t\t# Set image info\r\n\t\tself.textCtrlImageInfo.Clear()\r\n\t\tself.textCtrlImageInfo.WriteText(self._configtmp[\"imageinfo\"])\r\n\r\n\t\t# If login still valid, change text on Sign In page\r\n\t\tif (self._configtmp[\"userhash\"] != \"00000000000000000000000000000000\"):\r\n\t\t\tself.staticTextSignedIn.SetLabel(\" You are signed in.\")\r\n\t\telse:\r\n\t\t\tself.staticTextSignedIn.SetLabel(\"You are not signed in.\")\r\n\t\tusername = self._configtmp[\"username\"]\r\n\t\tself.textCtrlLogin.Clear()\r\n\t\tself.textCtrlLogin.WriteText(username)\r\n\r\n\t\t# Set options\r\n\t\t_ratelist = [1, 2, 3, 4, 5]\r\n\t\tself.choiceOptionRatedAtLeast.SetSelection(\r\n\t\t\t_ratelist.index(self._configtmp[\"ratedatleast\"]))\r\n\t\t_percentlist = [5, 10, 20, 50, 75, 100]\r\n\t\tself.choiceOptionPercentUnrated.SetSelection(\r\n\t\t\t_percentlist.index(self._configtmp[\"percentnew\"]))\r\n\t\t_changeeverylist = [900, 1800, 3600, 7200, 14400, 28800, 86400,\r\n\t\t\t172800, 345600, 604800]\r\n\t\tself.choiceOptionChangeEvery.SetSelection(\r\n\t\t\t_changeeverylist.index(self._configtmp[\"changeevery\"]))\r\n\r\n\t\t# Update complete, show frame\r\n\t\tself.Show()\r\n\t\tself.Raise()" ]
[ "0.5536531", "0.5336233", "0.5274754", "0.52151805", "0.5156828", "0.51532024", "0.51404095", "0.509433", "0.50761753", "0.50675863", "0.5057054", "0.5030155", "0.49895877", "0.49638245", "0.49565363", "0.4928132", "0.49049008", "0.48959267", "0.48698136", "0.4867956", "0.48614398", "0.48579764", "0.48489344", "0.4842281", "0.4839902", "0.4839006", "0.48387027", "0.48349887", "0.4825721", "0.48123717", "0.4812226", "0.48116773", "0.4796702", "0.4785195", "0.47831893", "0.4774848", "0.4770255", "0.4768022", "0.47614002", "0.4758096", "0.47535756", "0.4742499", "0.47390026", "0.47358993", "0.47327045", "0.4723895", "0.47207794", "0.47165447", "0.47153643", "0.47105208", "0.47074", "0.47001952", "0.46958026", "0.469388", "0.46868968", "0.46775866", "0.4675306", "0.46701294", "0.4660121", "0.46580747", "0.46569696", "0.46554467", "0.4653668", "0.46529272", "0.46409434", "0.46377125", "0.46345314", "0.46334386", "0.46313652", "0.46313205", "0.46296087", "0.46232945", "0.46186277", "0.4613669", "0.46105486", "0.4608602", "0.46075267", "0.4595952", "0.45895785", "0.4579961", "0.45778057", "0.4577099", "0.45652476", "0.45616075", "0.45585203", "0.45574018", "0.45510924", "0.45506746", "0.45496288", "0.45496288", "0.4548061", "0.4546251", "0.4545398", "0.45453426", "0.4544222", "0.45371595", "0.45371595", "0.4535516", "0.45333382", "0.4533254" ]
0.63189566
0
Callback function for im_metrics_btn1 A window will popup to display the original image metadata
def display_metrics2(self): messagebox.showinfo("Original Image Metrics", self.raw_metrics)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_metrics3(self):\n messagebox.showinfo(\"Processed Image Metrics\", self.pro_metrics)", "def win5_ManageData_Caption(event=None):\r\n\r\n clearwin()\r\n global IMAGES_FILE_PATH\r\n global i\r\n i = 0\r\n\r\n Descriptions = load()\r\n imgs = []\r\n if os.path.exists(IMAGES_FILE_PATH + sep + 'Images'):\r\n for picture in sorted(listdir(IMAGES_FILE_PATH + sep + 'Images')): # Store all the Images' Path in imgs []\r\n path = fr'Images\\{picture}'\r\n imgs.append(path)\r\n\r\n # Opens the first Image with an adequate format (size)\r\n Images = Image.open(imgs[0])\r\n baseheight = 600\r\n hpercent = (baseheight / float(Images.size[1]))\r\n wsize = int((float(Images.size[0]) * float(hpercent)))\r\n Image_copy = Images.resize((wsize, baseheight), Image.ANTIALIAS)\r\n photo = ImageTk.PhotoImage(Image_copy)\r\n img_label = tkinter.Label(mframe, image=photo)\r\n img_label.pack(padx=10, pady=10)\r\n\r\n def myClick(event=None):\r\n global i\r\n\r\n i = i + 1\r\n # get the description of image n*i\r\n save(Descriptions)\r\n if i > len(imgs):\r\n save(Descriptions)\r\n win1()\r\n return\r\n Descriptions[imgs[i - 1]] = T.get(\"1.0\", 'end')\r\n Descriptions[imgs[i - 1]] = clear_trailing_newlines(Descriptions[imgs[i - 1]])\r\n if i >= len(imgs): # If we reached the last image\r\n save(Descriptions)\r\n win1()\r\n return\r\n\r\n T.delete(\"1.0\", 'end')\r\n Images = Image.open(imgs[i])\r\n baseheight = 600\r\n hpercent = (baseheight / float(Images.size[1]))\r\n wsize = int((float(Images.size[0]) * float(hpercent)))\r\n Image_copy = Images.resize((wsize, baseheight), Image.ANTIALIAS)\r\n img_label.img = ImageTk.PhotoImage(Image_copy)\r\n img_label.config(image=img_label.img)\r\n img_label.image = img_label.img\r\n if imgs[i] in Descriptions:\r\n T.insert(\"1.0\", Descriptions[imgs[i]])\r\n\r\n def Back(event=None):\r\n global i\r\n i = i - 2\r\n myClick()\r\n\r\n # Adding the entry box\r\n T = tkinter.Text(mframe, height=10, width=40)\r\n if imgs[0] in Descriptions:\r\n T.insert(\"1.0\", Descriptions[imgs[0]])\r\n T.insert(\"1.0\", '')\r\n\r\n T.pack()\r\n # add_button\r\n my_button = tkinter.Button(mframe, text=\"Add Description\", command=lambda: myClick())\r\n my_button.pack(padx=10)\r\n T.bind('<Right>', func=myClick)\r\n T.bind('<Left>', func=Back)\r\n\r\n mframe.mainloop()\r\n\r\n back = tkinter.Button(mframe, command=win3_ManagePictures, text='Back')\r\n back.pack()\r\n else:\r\n def no_pictures_error():\r\n mframe.pack_propagate(0)\r\n\r\n b1 = tkinter.Label(mframe, text='Please Download Pictures (Or', font=(\"Courier\", 18), bg='pink')\r\n b1.pack(side='top', expand='YES')\r\n b1.place(relx=0.5, rely=0.4, anchor='center')\r\n b1 = tkinter.Label(mframe, text='Add Them Manually) Before Adding Captions', font=(\"Courier\", 18),\r\n bg='pink')\r\n b1.pack(side='top', expand='YES')\r\n b1.place(relx=0.5, rely=0.5, anchor='center')\r\n time.sleep(1.5)\r\n win3_ManagePictures()\r\n\r\n t = threading.Thread(target=no_pictures_error)\r\n t.start()", "def OnOpenClicked(self, event):\n \n # Create the image\n image = medipy.io.load(self._image_path) #, 0, loader_class= nmr2D.Nmr2D)\n \n # Insert a reference spectrum into the image if one has been specified\n if self._reference_path is not None:\n spectrum = numpy.fromfile(self._reference_path, numpy.int32)\n image.metadata[\"header\"][\"proton_spectrum\"] = spectrum\n \n # Load a list of annotations if an annotation file has been specified\n if self._annotations_path is not None:\n image.metadata[\"Data\"] = image.data\n dom = md.parse(self._annotations_path)\n peaks = dom.getElementsByTagName(\"Peak2D\")\n image.annotations = ObservableList()\n for peak in peaks:\n annotation = ImageAnnotation()\n ppm = (float(peak.getAttribute(\"F1\")),float(peak.getAttribute(\"F2\")))\n point = rbnmr.ppm_to_point(ppm, \n image.metadata[\"Procs\"],\n image.metadata[\"Proc2s\"])\n annotation.position = [0, point[-2], point[-1]]\n annotation.label = peak.getAttribute(\"annotation\")\n annotation.shape = ImageAnnotation.Shape.cross\n annotation.size = 10\n annotation.color = [0, 1., 0.]\n annotation.filled = False\n annotation.depth = 10\n image.annotations.append(annotation)\n \n self.GetParent().append_image([{\"image\":image}])\n \n # Close the window\n self.Destroy()", "def on_image(self, image):", "def btn_display_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n del image_fio\n del img_hist_fio\n self.show_as_waiting(False)", "def update_information(self):\n if self._image_path is not None:\n self._open_button.Enable() \n else:\n self._open_button.Disable() \n \n self.Fit()\n self.GetSizer().SetSizeHints(self)", "def imageinfo(self, *args, **kwargs):\n return self.logger.log(logging.INFO-1, *args, **kwargs)", "def showBtnImg(*args, **kwargs):\n\targs[0].get_image().show()", "def measure(self, imgage, previous=None):", "def btn_equalize_hist_callback(self):\n self.show_as_waiting(True)\n self.image_proc_selected('Histogram Equalization')\n self.show_as_waiting(False)", "def update_image(self, cv_img):\n\t\tqt_img = self.convert_cv_qt(cv_img)\n\t\tself.label.setPixmap(qt_img)\n\t\tself.display_info()", "def imshow_infos(img,\n infos,\n text_color='white',\n font_size=26,\n row_width=20,\n win_name='',\n show=True,\n fig_size=(15, 10),\n wait_time=0,\n out_file=None):\n with ImshowInfosContextManager(fig_size=fig_size) as manager:\n _, img = manager.put_img_infos(\n img,\n infos,\n text_color=text_color,\n font_size=font_size,\n row_width=row_width,\n win_name=win_name,\n show=show,\n wait_time=wait_time,\n out_file=out_file)\n return img", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def OnButtonOKButton(self):\r\n\t\tmeta = {}\r\n\t\tfor key in self._config.keys():\r\n\t\t\tif self._config[key] != self._configtmp[key]:\r\n\t\t\t\tmeta[key] = self._configtmp[key]\r\n\t\tif meta:\r\n\t\t\t# Since if frame open, wallpaper downloads are delayed,\r\n\t\t\t# the image data is always valid\r\n\t\t\tself._config.update(self._configtmp)\r\n\t\t\tself._config.Save(self._twirlpath)\r\n\t\t\tmeta.update({\"username\":self._config[\"username\"].encode(\"utf-8\"),\r\n\t\t\t\t\"userhash\":self._config[\"userhash\"],\r\n\t\t\t\t\"imageid\":self._config[\"imageid\"]})\r\n\t\t\tnetops.SendMetadata(consts.URL_SEND_META, meta)\r\n\t\tself.Hide()", "def __init__(self):\n self.root = tk.Tk()\n self.root.minsize(width=150, height=150)\n self.root.maxsize(height=500)\n self.root.title(\"GUV analysis\")\n self.root.iconbitmap(os.path.join(os.path.dirname(__file__), \"icon.ico\"))\n self.window = tk.Frame(self.root)\n self.window.pack(side=\"top\", fill=\"both\", expand=True)\n\n self.widgets = {}\n self.images = {}\n\n self.widgets['lblTitle'] = tk.Label(self.window, text='GUV analysis tool', font=\"-weight bold -size 20\")\n self.widgets['lblTitle'].grid(column=0, row=0, columnspan=3)\n\n self.images['newImg'] = tk.PhotoImage(file=os.path.join(os.path.dirname(__file__),'icon-new.png')).subsample(2,2)\n self.widgets['btnNew'] = tk.Button(self.window, text='New analysis', image=self.images['newImg'], command=self.start_new_analysis, compound=tk.TOP, borderwidth=0)\n self.widgets['btnNew'].grid(column=0, row=1, padx=10)\n\n self.images['openImg'] = PhotoImage_cd('icon-open.png').subsample(2,2)\n self.widgets['btnOpen'] = tk.Button(self.window, text='Open existing analysis', command=self.reopen_existing_analysis, image=self.images['openImg'], compound=tk.TOP, borderwidth=0)\n self.widgets['btnOpen'].grid(column=1, row=1, padx=10)\n\n self.images['closeImg'] = PhotoImage_cd('icon-close.png').subsample(2,2)\n self.widgets['btnClose'] = tk.Button(self.window, text='Close program', command=self.root.quit, image=self.images['closeImg'], compound=tk.TOP, borderwidth=0)\n self.widgets['btnClose'].grid(column=2, row=1, padx=10)", "def screeninfo(self):\n\t\tDevice().capture_screenshot()\n\t\tresolution = (self.width, self.height)\n\t\tdroid = AQMdroid('image.png', resolution, self.filename)\n\t\t\n\t\ttry:\n\t\t\tdroid.getorigin()\n\t\texcept Exception as e:\n\t\t\tScriptGen(self.filename).log_checker(self.log_handler)\n\t\t\tScriptGen(self.filename).log_checker(self.generate_log_file)\n\t\t\tprint \"\\nExit Point Triggered.\"\n\t\t\tsys.exit()", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def show_info(filename, abs_=None, center=None):\n fimage = FITSImage(filename)\n print(\"Image data shape: {0}\".format(fimage.shape))\n print(\"Image size: %dx%d\" % (fimage.Nx, fimage.Ny))\n print(\"Data unit: [%s]\" % fimage.bunit)\n pixelsize = fimage.pixelsize\n if pixelsize:\n print(\"Pixel size: %.1f [arcsec]\" % pixelsize)\n print(\"Field of view: (%.2f, %.2f) [deg]\" % fimage.fov)\n data = fimage.image\n if abs_:\n data = np.abs(data)\n if center:\n print(\"Central box size: %d\" % center)\n rows, cols = data.shape\n rc, cc = rows//2, cols//2\n cs1, cs2 = center//2, (center+1)//2\n data = data[(rc-cs1):(rc+cs2), (cc-cs1):(cc+cs2)]\n min_ = np.nanmin(data)\n max_ = np.nanmax(data)\n mean = np.nanmean(data)\n median = np.nanmedian(data)\n std = np.nanstd(data)\n iqr = np.diff(np.nanpercentile(data, q=(25, 75)))\n mad = np.nanmedian(np.abs(data - median))\n rms = np.sqrt(np.nanmean(data**2))\n print(\"min: %13.6e\" % min_)\n print(\"max: %13.6e\" % max_)\n print(\"range: %13.6e (max - min)\" % (max_ - min_))\n print(\"mean: %13.6e\" % mean)\n print(\"median: %13.6e\" % median)\n print(\"std: %13.6e (standard deviation)\" % std)\n print(\"iqr: %13.6e (interquartile range)\" % iqr)\n print(\"mad: %13.6e (median absolute deviation)\" % mad)\n print(\"rms: %13.6e (root-mean-squared)\" % rms)", "def open_popup(title, image):\n self.im = PIL.Image.open(image)\n\n top = Toplevel(root)\n top.geometry()\n top.title(title)\n\n self.ph = ImageTk.PhotoImage(self.im, master=top)\n self.label = Label(top, image=self.ph)\n self.label.image = self.ph\n self.label.grid(row=1, column=1, columnspan=3)\n\n self.label2 = Label(top, text='Image saved @ %s' % image)\n self.label2.grid(row=2, column=1)", "def update(self):\n cv2.imshow(self.window_name, self.map.get_crop())", "def img_dict_updated(self, change):\n if change[\"value\"]:\n self.select_dataset(self.io_model.img_dict_default_selected_item)\n self.init_plot_status()", "def display(self, image):\n raise NotImplementedError()", "def popup(self):\n opencv.imshow('dbg', self.img)\n opencv.waitKey(0)", "def show_image_ref():\n return get_image_ref()", "def btn_display_color_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_color_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n self.show_as_waiting(False)", "def btn_compare_callback(self):\n self.show_as_waiting(True)\n mrs2_ids = self.tbl_images.get_mrs_ids(2)\n mrs2_names = self.tbl_images.get_mrs_names(2)\n\n for id, name in zip(mrs2_ids, mrs2_names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n self.img_displayer.new_display(image_fio, name)\n self.show_as_waiting(False)", "def onclick_open_image(self):\n filename = select_file(\n \"Select Image\",\n \"../\",\n \"Image Files (*.jpeg *.jpg *.png *.gif *.bmg)\")\n if filename:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.image = read_image(filename)\n self.h, self.w = self.image.shape[:2]\n self.show_to_window()", "def on_action_2_triggered(self):\n # TODO: not implemented yet\n button=QMessageBox.about(self, '介绍此软件', '此软件是用python语言开发,主要用到Dlib,opencv,pyqt5 三种库利用计算机视觉技术进行图像处理从而识别目标对象表情')", "def show(image,label,pred):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n s=\"True Label : \"+str(label)+\" Predicted label : \"+str(pred)\n pyplot.xlabel(s,fontname=\"Arial\", fontsize=20 )\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "def metadata_update(self, _):\n self.details.original_widget = YesNoWidget('Update metadata files?', self.__metadata_update)", "def open_imagingWindow(self):\n self.window = surveyWindow(self, imaging=True)\n self.hide()", "def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n if(self.iscapture):\n print(\"update\")\n direct = self.label1.text()\n if direct == \"~default\":\n direct = \"face_dataframes\"\n else:\n direct = direct + \"/face_dataframes\"\n \n if (not os.path.exists(direct)):\n os.mkdir(direct)\n cv2.imwrite(\"{1}/{2}{0}.jpeg\".format(self.count, direct,self.textbox.text()), cv_img)\n self.iscapture = False\n self.label2.setText(\"Image # 0{0} Saved\".format(self.count))\n self.pushButton0.setEnabled(False)\n self.count += 1\n \n \n if(self.count == 6):\n #print(\"greater\")\n self.pushButton.setEnabled(False)\n self.pushButton2.setDisabled(False)\n\n\n self.image_label.setPixmap(qt_img)", "def update_image(self, img):\r\n qt_img = self.convert_cv_qt(img)\r\n self.main.caption_feed.setPixmap(qt_img)", "def showInfoWindow():\n\treturn 0", "def btn_display_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n self.img_displayer.new_display(image_fio, name)\n self.show_as_waiting(False)", "def showImage(self, img):\n cv2.namedWindow(self.NAME_WINDOW,cv2.WINDOW_NORMAL)\n cv2.resizeWindow(self.NAME_WINDOW, 300, 700)\n cv2.imshow(self.NAME_WINDOW , img)\n cv2.waitKey(0)", "def getimage(self):", "def display(self):\n display(self.image)", "def show_image(self):\n cv2.imshow('Image', self.__diff_image())\n cv2.waitKey()", "def on_action_12_triggered(self):\n # TODO: not implemented yet\n button=QMessageBox.about(self, '注意', '此软件只可接受png/jpg格式图片输出统一格式为png')", "def img_cb(self, image):\n self.last_img = image\n self.is_new_img = True", "def disp_annotation(self):\n self.has_been_called = True\n print('Iterat #Fevals Hypervolume axis ratios '\n ' sigmas min&max stds\\n'+'(median)'.rjust(42) +\n '(median)'.rjust(10) + '(median)'.rjust(12))", "def display(self):\n image_qt = ImageQt.ImageQt(self.view_state.get_image())\n self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image_qt))\n self.imageLabel.adjustSize()", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)", "def visualize(**images):\n n_images = len(images)\n plt.figure(figsize=(20,8))\n for idx, (name, image) in enumerate(images.items()):\n plt.subplot(1, n_images, idx + 1)\n plt.xticks([]); \n plt.yticks([])\n # get title from the parameter names\n plt.title(name.replace('_',' ').title(), fontsize=20)\n plt.imshow(image)\n plt.savefig('sample_gt_pred_2_max.jpeg')\n plt.show()", "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def display_image(img, label):\n cv2.imshow(label,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def copyin(self, greyscale, mw):\n self.gs = greyscale\n self.imagearray = mw.currentimage\n self.imagetitle = mw.currentimage_title\n self.gsname.setText(self.gs.name)\n collist = self.gs.shades + [0, 255]\n collist.sort(reverse=not self.gs.inverse)\n self.currentshades = collist\n self.numcols = len(collist)\n\n # Set up dialog box appropriately\n\n self.performingupdate = True\n\n self.numscales.setValue(self.numcols)\n self.setupshades()\n\n vallist = self.gs.values\n\n if self.imagearray is None:\n self.meanvalue = 0\n self.stdvalue = 1\n self.minvalue = -100.0\n self.maxvalue = 70000.0\n self.minstdd = -100.0\n self.maxstdd = 100.0\n else:\n self.plotfigure = plt.figure(figsize=(mw.imwidth, mw.imheight))\n self.meanvalue = self.imagearray.mean()\n self.stdvalue = self.imagearray.std()\n self.minvalue = self.imagearray.min()\n self.maxvalue = self.imagearray.max()\n self.minstdd = (self.minvalue - self.meanvalue) / self.stdvalue\n self.maxstdd = (self.maxvalue - self.meanvalue) / self.stdvalue\n self.meanv.setText(\"%.2f\" % self.meanvalue)\n self.medianv.setText(\"%.2f\" % np.median(self.imagearray))\n self.sigmav.setText(\"%.2f\" % self.stdvalue)\n\n if self.gs.isfixed:\n self.fixedcount.setChecked(True)\n vallist.append(self.maxvalue)\n vallist.sort()\n self.currentvalues = vallist\n elif self.gs.isperc:\n self.percentile.setChecked(True)\n vallist.append(100.0)\n vallist.sort()\n self.currentpercents = vallist\n else:\n self.nstddevs.setChecked(True)\n vallist.append(self.maxstdd)\n vallist.sort()\n self.currentnsigs = vallist\n\n # Initialise minimum value fields (don't need zero percent)\n\n self.nsd0.setValue(self.minstdd)\n self.v0.setValue(self.minvalue)\n\n # Create other two lists and display\n\n self.createrest()\n self.fillingrid()\n self.setupenabled()\n self.performingupdate = False\n self.plotmap()", "def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]", "def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()", "def gammaDisplay(img_path: str, rep: int):\n global im\n if rep == LOAD_GRAY_SCALE:\n im = cv2.imread(img_path, 0)\n else:\n im = cv2.imread(img_path)\n cv2.namedWindow(title_window)\n name = 'Gamma x %d' % gamma_slider_max\n cv2.createTrackbar(name, title_window, 0, gamma_slider_max, on_trackbar)\n # Show some stuff\n on_trackbar(0)\n # Wait until user press some key\n cv2.waitKey()\n\n pass", "def _metadata_changed(self, old, new):\n\n #self.cross_plot.value_range.low = self.minz\n #self.cross_plot.value_range.high = self.maxz\n #self.cross_plot2.value_range.low = self.minz\n #self.cross_plot2.value_range.high = self.maxz\n if self._imag_index.metadata.has_key(\"selections\"):\n x_ndx, y_ndx = self._imag_index.metadata[\"selections\"]\n if y_ndx and x_ndx:\n# xdata, ydata = self._image_index.get_data()\n# xdata, ydata = xdata.get_data(), ydata.get_data()\n self.pd_horiz.set_data(\"horiz\", self._image_value.data[y_ndx,:])\n self.pd_vert.set_data(\"vert\", self._image_value.data[:,x_ndx])", "def showImage(self, image):\n \n self.image = img", "def _state_main(self, gui):\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def img_disp(name,img):\n cv2.imshow(name,img.astype(int)/255.0)\n cv2.waitKey()", "def show_map_window(image):\n cv2.imshow(_WINDOW_NAME, image)", "def display_image(self, window_name, image):\n cv2.namedWindow(window_name)\n cv2.imshow(window_name, image)\n cv2.waitKey(0)", "def do_info (self, line) :\n\t\tprint\n\t\tprint get_info_string( self.__image )\n\t\tprint", "def _AddAttenProbsImageSummary(self, name, atten_probs):\n\n def PlotAttention(fig, axes, cur_atten_probs, title):\n plot.AddImage(fig, axes, cur_atten_probs, title=title)\n axes.set_ylabel(plot.ToUnicode('Output sequence index'), wrap=True)\n axes.set_xlabel(plot.ToUnicode('Input sequence index'), wrap=True)\n\n with plot.MatplotlibFigureSummary(\n name + '/atten_example',\n figsize=(10, 10),\n max_outputs=1,\n subplot_grid_shape=(1, 1)) as fig:\n # Extract first entry in batch of attention prob matrices\n # [tgt_len, src_len]\n fig.AddSubplot([atten_probs], PlotAttention, title='atten_probs')", "def show(img, win_name='qr code'):\n cv2.imshow(win_name, img)\n cv2.waitKey(0)", "def observation_img_callback(self, oimg):\n self._observation_img = oimg", "def change_info(self):\n\t\ttry:\n\t\t\tnewName = self.ui.lista_act.currentItem().text()\n\t\t\tnewData = controller.search_data_act(newName)\n\t\t\tnewData = newData[0]\n\t\t\tnombre = newData[1]\n\t\t\tyear = newData[2]\n\t\t\tgenero = newData[3]\n\t\t\timg = newData[4]\n\t\texcept AttributeError as e:\n\t\t\tnombre = \"\"\n\t\t\tgenero = \"\"\n\t\t\tyear = \"\"\n\t\t\timg = \"\"\n\n\t\tself.ui.txt_nombre.setText(nombre)\n\t\tself.ui.txt_year.setText(year)\n\t\tself.ui.txt_genero.setText(genero)\n\t\tself.ui.img.setPixmap(QtGui.QPixmap(img))", "def display_image(window_name, img):\n cv2.imshow(window_name, img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def display_img(title,img):\r\n cv2.namedWindow('img', cv2.WINDOW_NORMAL)\r\n cv2.setWindowTitle('img',title)\r\n cv2.resizeWindow('img',600,400)\r\n\r\n #Display Image on screen\r\n cv2.imshow('img',img)\r\n\r\n #Mantain output until user presses a key\r\n cv2.waitKey(0)\r\n\r\n #Destroy windows when user presses a key\r\n cv2.destroyAllWindows()", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def analyze(self):\r\n self.current = 'score'\r\n popup = AnalyzeInterface(self.current_screen).open()", "def handle_gui_example_two_intent(self, message):\n self.gui.show_image(\"https://source.unsplash.com/1920x1080/?+random\")", "def on_info_click(self, event):\n def on_close(event, wind):\n wind.Close()\n wind.Destroy()\n event.Skip()\n wind = wx.PopupTransientWindow(self, wx.RAISED_BORDER)\n if self.auto_save.GetValue():\n info = \"'auto-save' is currently selected. Temperature bounds will be saved when you click 'next' or 'back'.\"\n else:\n info = \"'auto-save' is not selected. Temperature bounds will only be saved when you click 'save'.\"\n text = wx.StaticText(wind, -1, info)\n box = wx.StaticBox(wind, -1, 'Info:')\n boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)\n boxSizer.Add(text, 5, wx.ALL | wx.CENTER)\n exit_btn = wx.Button(wind, wx.ID_EXIT, 'Close')\n wind.Bind(wx.EVT_BUTTON, lambda evt: on_close(evt, wind), exit_btn)\n boxSizer.Add(exit_btn, 5, wx.ALL | wx.CENTER)\n wind.SetSizer(boxSizer)\n wind.Layout()\n wind.Popup()", "def set_info():\n #Example response\n '''{'copyright': 'Thomas Ashcraft', 'date': '2021-01-04', 'explanation': 'What causes sprite lightning? Mysterious bursts of light in the sky that momentarily resemble gigantic jellyfish have been recorded for over 30 years, but apart from a general association with positive cloud-to-ground lightning, their root cause remains unknown. Some thunderstorms have them -- most don\\'t. Recently, however, high speed videos are better detailing how sprites actually develop. The featured video, captured in mid-2019, is fast enough -- \n at about 100,000 frames per second -- to time-resolve several sprite \"bombs\" dropping and developing into the multi-pronged streamers that appear on still images. Unfortunately, the visual clues provided by videos like these do not fully resolve the sprite origins mystery. High speed vidoes do indicate to some researchers, though, that sprites are more likely to occur when plasma irregularities exist in the \nupper atmosphere. Astrophysicists: Browse 2,300+ codes in the Astrophysics Source Code Library', 'media_type': 'video', 'service_version': 'v1', 'title': 'Sprite Lightning at 100,000 Frames Per Second', 'url': 'https://www.youtube.com/embed/zS_XgF9i8tc?rel=0'}'''\n\n #Update the picture date and explanation\n picture_date.config(text=response['date'])\n picture_explanation.config(text=response['explanation'])\n\n #We need to use 3 images in other functions; an img, a thumb, and a full_img\n global img\n global thumb\n global full_img\n\n url = response['url']\n\n if response['media_type'] == 'image':\n #Grab the photo that is stored in our response.\n img_response = requests.get(url, stream=True)\n\n #Get the content of response and use BytesIO to open it as an image\n #Keep a reference to this img as this is what we can use to save the image (Image nit PhotoImage)\n #Create the full screen image for a second window\n img_data = img_response.content\n img = Image.open(BytesIO(img_data))\n\n full_img = ImageTk.PhotoImage(img)\n\n #Create the thumbnail for the main screen\n thumb_date = img_response.content\n thumb = Image.open(BytesIO(thumb_date))\n thumb.thumbnail((200,200))\n thumb = ImageTk.PhotoImage(thumb)\n\n #Set the thumbnail image\n picture_label.config(image=thumb)\n elif response['media_type'] == 'video':\n picture_label.config(text=url, image='')\n webbrowser.open(url)", "def displayImage(winName, img):\n cv.imshow(winName, img)\n cv.waitKey(0)", "def generate_image_info(image):\n image = ee.Image(image)\n image_vis = image.visualize(**{\n 'min': image_min,\n 'max': image_max,\n 'palette': image_palette\n })\n\n print(image_min, image_max)\n\n if 'hillshade' in r and r['hillshade']:\n image_vis = hillshade(image_vis,\n image.subtract(image_min).divide(ee.Image.constant(image_max).subtract(image_min)),\n True)\n\n m = image_vis.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def update_plot_preview(self):\n\n min_val = self.range_slider.first_position\n max_val = self.range_slider.second_position\n img_data = self.normalize_histogram(min_val, max_val)\n new_hist = self.calc_histogram(img_data)\n\n self.hist_canvas.axes.clear()\n self.hist_canvas.axes.bar(range(256), self.original_hist, color='b', alpha=0.7)\n self.hist_canvas.axes.bar(range(256), new_hist, color='g', alpha=0.7)\n self.hist_canvas.draw()\n\n self.current_img_data = img_data", "def init_info_pane(self):\n self.single_acq = QtWidgets.QPushButton(\"Single Acquisition\")\n self.start_acq = QtWidgets.QPushButton(\"Start Acquisition\")\n self.stop_acq = QtWidgets.QPushButton(\"Stop Acquisition\")\n\n self.exposure = QtWidgets.QDoubleSpinBox()\n self.exposure.setSuffix(\" ms\")\n self.get_exposure_params()\n\n self.maj_radius = QtWidgets.QLabel()\n self.min_radius = QtWidgets.QLabel()\n self.avg_radius = QtWidgets.QLabel()\n self.ellipticity = QtWidgets.QLabel()\n self.x_radius = QtWidgets.QLabel()\n self.y_radius = QtWidgets.QLabel()\n self.x_centroid = QtWidgets.QLabel()\n self.y_centroid = QtWidgets.QLabel()\n\n # Mark current beam position\n self.mark = QtWidgets.QPushButton(\"Mark\")\n self.unmark = QtWidgets.QPushButton(\"Unmark\")\n\n # Mark location\n self.mark_x = QtWidgets.QLabel()\n self.mark_y = QtWidgets.QLabel()\n\n # Beam distance from marked location\n self.x_delta = QtWidgets.QLabel()\n self.y_delta = QtWidgets.QLabel()\n\n # Keep a list of mark sub-widgets so we can hide/show them\n # Obviously we don't want to hide the mark buttons themselves\n self.mark_widgets.extend([\n self.mark_x, self.mark_y,\n # self.x_delta, self.y_delta,\n ])\n\n self.fps = QtWidgets.QLabel()\n self.message = QtWidgets.QLabel()\n self.status = QtWidgets.QLabel(\"Stopped\")", "def help_main():\n global help_window, my_iterator, iterable, canvas, forward_button, picture_lst, image\n my_iterator = iter(picture_lst)\n pill_image = Image.open(image_base)\n image = ImageTk.PhotoImage(pill_image)\n\n canvas = Canvas(help_window, width=700 + 15, height=490 + 15)\n canvas.create_image(10, 10, anchor=NW, image=image)\n\n canvas.place(x=170, y=10)\n\n forward_button = ttk.Button(help_window, text=\" Вперед \", command=forward)\n forward_button.place(x=910, y=250)\n help_window.mainloop()", "def updateImage(self, autoHistogramRange=True):\n super().updateImage(autoHistogramRange=autoHistogramRange)\n self.getImageItem().setLookupTable(self.lut)", "def __on_generating_photomosaic_finished(self):\n\n self.progress_window.hide()", "def initAlertMsgImage(self):\n return", "def view_thumbnail_complete():\n curItem = complete_tereeview.focus().strip('#')\n with open(\"images_url_dict.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = \"-\".join(curItem.lower().split())\n try:\n \"\"\"look for entry info from local database\"\"\"\n img_list = imgs_dict[name]\n img_url = img_list[0]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n\n except KeyError:\n print(\"Failed series image list....\")\n with open(\"Movies_List.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = curItem\n try:\n img_list = imgs_dict[name]\n img_url = img_list[1]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n except Exception as error_ml:\n print(\"Failed using movie list Error :: \\n\", error_ml)\n\n\n except Exception as local_excep:\n\n print(\"ERROR :: \" + str(local_excep))", "def create_image_caption_pairs(self):", "def show_picture(self, data):\n raise NotImplementedError", "def show(image, label, weights, prediction, ax):\n global img_objects\n if len(img_objects)==0:\n for i in range(10):\n _img = ax[0, i].imshow(weights[i].reshape(28,28), cmap='gray')\n img_objects.append(_img)\n _img = ax[1, 5].imshow(image.reshape(28,28), cmap='gray')\n img_objects.append(_img)\n else:\n for i in range(10):\n img_objects[i].set_data(weights[i].reshape(28,28))\n img_objects[i].set_clim(vmin=0, vmax=np.max(weights[i]))\n img_objects[10].set_data(image.reshape(28,28))\n ax[0,5].set_title('truth: %d, predict: %d'%(np.argmax(label), prediction))", "def callback(self,data):\n # convert image into openCV format\n bridge = CvBridge()\n try:\n # bgr8 is the pixel encoding -- 8 bits per color, organized as blue/green/red\n cv_image = bridge.imgmsg_to_cv(data, \"bgr8\")\n except CvBridgeError, e:\n # all print statements should use a rospy.log_ form, don't print!\n rospy.loginfo(\"Conversion failed\")\n\n # we could do anything we want with the image here\n # for now, we'll blur using a median blur\n cv2.Smooth(cv_image, cv_image, smoothtype=cv.CV_MEDIAN, param1=31, param2=0, param3=0, param4=0)\n\t\n\t\n\tret,th1 = cv2.threshold(cv_image,127,255,cv2.THRESH_BINARY)\n\tth2 = cv2.adaptiveThreshold(cv_image,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n\t\t cv2.THRESH_BINARY,11,2)\n\tth3 = cv2.adaptiveThreshold(cv_image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n\t\t cv2.THRESH_BINARY,11,2)\n\n\ttitles = ['Original Image', 'Global Thresholding (v = 127)',\n\t\t 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\n\timages = [cv_image, th1, th2, th3]\n\t\n\tfor i in xrange(4):\n\t\tplt.subplot(2,2,i+1),plt.imshow(images[i],'gray')\n\t\tplt.title(titles[i])\n\t\tplt.xticks([]),plt.yticks([])\n\tplt.show()\n\t\n '''# show the image\n cv2.ShowImage(\"image_view\", cv_image)\n cv2.WaitKey(3)\n\t'''", "def resetImage(self):\n filename = self.cnvImgOrig.getImageFilename()\n self.cnvImgTest.displayImage(filename)", "def gammaDisplay(img_path: str, rep: int):\n if rep == 2:\n # reading as BGR\n image = cv2.imread(img_path)/255\n elif rep == 1:\n image=cv2.imread(img_path,0)/255\n else:\n raise ValueError('Only RGB or GRAY_SCALE ')\n\n # the callback function, to find the gamma -divide by 100 to get values between 0.01 to 2\n def on_trackbar(val):\n gamma = val / 100\n corrected_image = np.power(image, gamma)\n cv2.imshow('Gamma display', corrected_image)\n\n cv2.namedWindow('Gamma display')\n trackbar_name = 'Gamma'\n cv2.createTrackbar(trackbar_name, 'Gamma display', 1, 200, on_trackbar)\n # Show some stuff\n on_trackbar(1)\n # Wait until user press some key\n cv2.waitKey()\n pass", "def load_metadata_i(self, metadata):\n self.p2_frame_metadata.configure(borderwidth=2, relief=\"groove\")\n self.p2_label_metadata_code.config(text=self.lang.VP_CODE + metadata[\"metadata\"][\"code\"])\n self.p2_label_metadata_grade.config(text=self.lang.VP_GRADE + str(metadata[\"metadata\"][\"grade\"]))\n self.p2_label_metadata_cm.config(text=self.lang.VP_DATE + metadata[\"metadata\"][\"date\"])", "def adjust(self, image):\n ...", "def show_result(self, img, result, **kwargs):\n return self.architecture.show_result(img, result, **kwargs)", "def see_image(self, idx, show=True):\n true_label = self.true_targets[idx]\n img, label, _ = self.__getitem__(idx) # img has channel as 1st dim\n img = np.transpose(img.numpy(), (1, 2, 0)) # channel as last dim\n if show:\n plt.imshow(img)\n plt.title(f\"Label: {self.classes_labels[true_label]}\")\n plt.show()\n else:\n return img, label, true_label", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()", "def put_img_infos(self,\n img,\n infos,\n text_color='white',\n font_size=26,\n row_width=20,\n win_name='',\n show=True,\n wait_time=0,\n out_file=None):\n self.prepare()\n\n text_color = color_val_matplotlib(text_color)\n img = mmcv.imread(img).astype(np.uint8)\n\n x, y = 3, row_width // 2\n img = mmcv.bgr2rgb(img)\n width, height = img.shape[1], img.shape[0]\n img = np.ascontiguousarray(img)\n\n # add a small EPS to avoid precision lost due to matplotlib's\n # truncation (https://github.com/matplotlib/matplotlib/issues/15363)\n dpi = self.fig_save.get_dpi()\n self.fig_save.set_size_inches((width + EPS) / dpi,\n (height + EPS) / dpi)\n\n for k, v in infos.items():\n if isinstance(v, float):\n v = f'{v:.2f}'\n label_text = f'{k}: {v}'\n self._put_text(self.ax_save, label_text, x, y, text_color,\n font_size)\n if show and not self.is_inline:\n self._put_text(self.ax_show, label_text, x, y, text_color,\n font_size)\n y += row_width\n\n self.ax_save.imshow(img)\n stream, _ = self.fig_save.canvas.print_to_buffer()\n buffer = np.frombuffer(stream, dtype='uint8')\n img_rgba = buffer.reshape(height, width, 4)\n rgb, _ = np.split(img_rgba, [3], axis=2)\n img_save = rgb.astype('uint8')\n img_save = mmcv.rgb2bgr(img_save)\n\n if out_file is not None:\n mmcv.imwrite(img_save, out_file)\n\n ret = 0\n if show and not self.is_inline:\n # Reserve some space for the tip.\n self.ax_show.set_title(win_name)\n self.ax_show.set_ylim(height + 20)\n self.ax_show.text(\n width // 2,\n height + 18,\n 'Press SPACE to continue.',\n ha='center',\n fontsize=font_size)\n self.ax_show.imshow(img)\n\n # Refresh canvas, necessary for Qt5 backend.\n self.fig_show.canvas.draw()\n\n ret = self.wait_continue(timeout=wait_time)\n elif (not show) and self.is_inline:\n # If use inline backend, we use fig_save to show the image\n # So we need to close it if users don't want to show.\n plt.close(self.fig_save)\n\n return ret, img_save", "def update_info(self):\n self.m_canvas.master.m_informations_displayer.set_operations(\n self.m_current_index\n )\n self.m_canvas.master.m_informations_displayer.set_time(\n self.m_history[self.m_current_index].m_passed_time\n )", "def image_viewer():\n key_file = auth_form()\n sg.theme('DarkBlue') \n layout = [\n [\n sg.Text(\"Image File\"),\n sg.Input(size=(25, 1), enable_events=True, key=\"file\"),\n sg.FilesBrowse(file_types=((\"Encrypted Images\", \"*.enc\"),)),\n sg.Button(\"Prev\"),\n sg.Button(\"Next\"),\n sg.Button(\"Export\")\n ],\n [sg.Image(key=\"image\")]\n ]\n window = sg.Window('Encrypted Image Viewer', layout, resizable=True).Finalize()\n window.Maximize()\n images = []\n location = 0\n while True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n sys.exit()\n if event == \"file\":\n images = values[\"file\"].split(';')\n if images:\n image_decrypted = load_image(images[0], window, key_file)\n if event == \"Next\" and images:\n if location == len(images) - 1:\n location = 0\n else:\n location += 1\n image_decrypted = load_image(images[location], window, key_file)\n if event == \"Prev\" and images:\n if location == 0:\n location = len(images) - 1\n else:\n location -= 1\n image_decrypted = load_image(images[location], window, key_file)\n \n if event == \"Export\" and images:\n image_decrypted.show()\n window.close()", "def _getGUIImage(self): \n # read the system of your computer\n\n image = ImagePIL.fromarray(self.cv_image)\n\n size = round(image.size[0]/2), round(image.size[1]/2)\n\n image.thumbnail(size, ImagePIL.ANTIALIAS)\n image = ImageTkPIL.PhotoImage(image)\n # self.panel = tki.Label(image=image)\n self.panel.config(image=image)\n self.panel.image = image", "def help(self):\n self.openCam.close()\n msgbox = QtWidgets.QMessageBox()\n msgbox.setWindowTitle(\"Help !!\")\n msgbox.setText(\n \"Moildev-Apps\\n\\n\"\n \"Moildev-Apps is software to process fisheye \"\n \"image with the result panorama view and Anypoint\"\n \" view. \\n\\nThe panoramic view may present a horizontal\"\n \"view in a specific immersed environment to meet the\"\n \"common human visual perception, while the Anypoint\"\n \"view is an image that has been undistorted in a certain\"\n \"area according to the input coordinates.\"\n \"\\n\\nMore reference about Moildev, contact us\\n\\n\")\n msgbox.setIconPixmap(QtGui.QPixmap('images/moildev.png'))\n msgbox.exec()", "def show(image):\n cv2.imshow('press ENTER to close', image)\n cv2.waitKey(0)", "def displayImage(self, iFrame, img=None):\n if not img:\n imgPath = self.imgList[iFrame.currImg]\n img = Image.open(imgPath); \n if img.mode == \"I;16\":\n print \"16 bit image, converting to 8 bit\"\n img.mode = 'I'\n img = img.point(lambda i:i*(1./256.)).convert(\"RGB\");\n img = img.resize((self.ni, self.nj))\n\n #iframe keeps track of its image\n iFrame.image = img\n \n #if point is generated, gotta draw squares first\n if self.point3d:\n point = self.allUVs[iFrame.currImg];\n self.drawBox(iFrame, point)\n \n # store photo image (probably not needed in iFrame)\n iFrame.tkpi = ImageTk.PhotoImage(img) \n \n #update frames' label \n iFrame.labString.set(\"img {0}\".format(iFrame.currImg))\n \n #create new label image\n if iFrame.label_image :\n iFrame.label_image.destroy()\n iFrame.label_image = Label(iFrame.frame, image=iFrame.tkpi)\n iFrame.label_image.image = iFrame.tkpi\n iFrame.label_image.bind(\"<Button-1>\", lambda event, arg=iFrame: self.runprobe(event, iFrame))\n iFrame.label_image.bind(\"<Button-3>\", lambda event, arg=iFrame: self.nextImage(event, iFrame)) \n iFrame.label_image.bind(\"<Button-2>\", lambda event, arg=iFrame: self.prevImage(event, iFrame))\n iFrame.label_image.pack(side = LEFT);", "def get_image_stats(image, out_dir, cur_file):\n # Output directory\n output_base = osp.join(out_dir, cur_file.split('.')[0])\n os.mkdir(output_base)\n # Print dimensions of the image\n width, height, color = image.shape\n print('The resolution of the image if of {}x{}x{}'.format(width,\n height,\n color))\n print('Total of {} pixels'.format(width * height * color))\n\n # Get histogram\n print('Calculating histogram')\n flat_img = image.mean(axis=2).flatten()\n counts, bins = np.histogram(flat_img, range(257))\n plt.bar(bins[:-1], counts, width=1, edgecolor='none')\n output_file = osp.join(out_dir, output_base, 'histogram.png')\n plt.xlabel('Intensidad')\n plt.ylabel('Número de pixeles')\n print('Saving histogram')\n plt.savefig(output_file, bbox_inches='tight')\n plt.close()\n\n # LAB space\n lab_image = cv2.cvtColor(image[8000:8500, 8000:8500, :], cv2.COLOR_BGR2LAB)\n output_file = osp.join(out_dir, output_base, 'lab.png')\n cv2.imwrite(output_file, lab_image)\n output_file = osp.join(out_dir, output_base, 'original.png')\n cv2.imwrite(output_file, image[8000:8500, 8000:8500, :])" ]
[ "0.69807404", "0.62527585", "0.58932143", "0.58668804", "0.58499783", "0.58255225", "0.5808261", "0.57940733", "0.5692784", "0.56442815", "0.5617792", "0.5611394", "0.55932826", "0.5588301", "0.5586624", "0.5570289", "0.555042", "0.5538848", "0.55376774", "0.55155426", "0.5489851", "0.5467636", "0.54669625", "0.5465186", "0.54592305", "0.5446267", "0.5443874", "0.54423237", "0.54378116", "0.5394324", "0.5383706", "0.5377431", "0.5356088", "0.5351375", "0.5342875", "0.53359187", "0.53066546", "0.52951396", "0.52947307", "0.52909523", "0.5290666", "0.52839637", "0.5283391", "0.5273236", "0.52700275", "0.5267258", "0.5249009", "0.52397394", "0.52324677", "0.5219686", "0.5209274", "0.5206478", "0.5203739", "0.5202672", "0.51989675", "0.51950324", "0.5185805", "0.5184422", "0.5179234", "0.51783615", "0.5162821", "0.51592964", "0.5155815", "0.51540214", "0.5153398", "0.5152375", "0.51510984", "0.51508224", "0.5149549", "0.5149161", "0.51488566", "0.5145558", "0.5144776", "0.5144695", "0.5144331", "0.51433074", "0.51422024", "0.51323485", "0.5128597", "0.5125277", "0.5121225", "0.51179194", "0.51076525", "0.5095413", "0.5088604", "0.50833493", "0.50831527", "0.5075283", "0.507416", "0.50697684", "0.5068202", "0.5061698", "0.50553", "0.5053334", "0.50508916", "0.50492316", "0.50469804", "0.5046765", "0.5045153", "0.5041471" ]
0.73585606
0
Callback function for im_metrics_btn2 A window will popup to display the processed image metadata
def display_metrics3(self): messagebox.showinfo("Processed Image Metrics", self.pro_metrics)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_metrics2(self):\n messagebox.showinfo(\"Original Image Metrics\", self.raw_metrics)", "def btn_display_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n del image_fio\n del img_hist_fio\n self.show_as_waiting(False)", "def win5_ManageData_Caption(event=None):\r\n\r\n clearwin()\r\n global IMAGES_FILE_PATH\r\n global i\r\n i = 0\r\n\r\n Descriptions = load()\r\n imgs = []\r\n if os.path.exists(IMAGES_FILE_PATH + sep + 'Images'):\r\n for picture in sorted(listdir(IMAGES_FILE_PATH + sep + 'Images')): # Store all the Images' Path in imgs []\r\n path = fr'Images\\{picture}'\r\n imgs.append(path)\r\n\r\n # Opens the first Image with an adequate format (size)\r\n Images = Image.open(imgs[0])\r\n baseheight = 600\r\n hpercent = (baseheight / float(Images.size[1]))\r\n wsize = int((float(Images.size[0]) * float(hpercent)))\r\n Image_copy = Images.resize((wsize, baseheight), Image.ANTIALIAS)\r\n photo = ImageTk.PhotoImage(Image_copy)\r\n img_label = tkinter.Label(mframe, image=photo)\r\n img_label.pack(padx=10, pady=10)\r\n\r\n def myClick(event=None):\r\n global i\r\n\r\n i = i + 1\r\n # get the description of image n*i\r\n save(Descriptions)\r\n if i > len(imgs):\r\n save(Descriptions)\r\n win1()\r\n return\r\n Descriptions[imgs[i - 1]] = T.get(\"1.0\", 'end')\r\n Descriptions[imgs[i - 1]] = clear_trailing_newlines(Descriptions[imgs[i - 1]])\r\n if i >= len(imgs): # If we reached the last image\r\n save(Descriptions)\r\n win1()\r\n return\r\n\r\n T.delete(\"1.0\", 'end')\r\n Images = Image.open(imgs[i])\r\n baseheight = 600\r\n hpercent = (baseheight / float(Images.size[1]))\r\n wsize = int((float(Images.size[0]) * float(hpercent)))\r\n Image_copy = Images.resize((wsize, baseheight), Image.ANTIALIAS)\r\n img_label.img = ImageTk.PhotoImage(Image_copy)\r\n img_label.config(image=img_label.img)\r\n img_label.image = img_label.img\r\n if imgs[i] in Descriptions:\r\n T.insert(\"1.0\", Descriptions[imgs[i]])\r\n\r\n def Back(event=None):\r\n global i\r\n i = i - 2\r\n myClick()\r\n\r\n # Adding the entry box\r\n T = tkinter.Text(mframe, height=10, width=40)\r\n if imgs[0] in Descriptions:\r\n T.insert(\"1.0\", Descriptions[imgs[0]])\r\n T.insert(\"1.0\", '')\r\n\r\n T.pack()\r\n # add_button\r\n my_button = tkinter.Button(mframe, text=\"Add Description\", command=lambda: myClick())\r\n my_button.pack(padx=10)\r\n T.bind('<Right>', func=myClick)\r\n T.bind('<Left>', func=Back)\r\n\r\n mframe.mainloop()\r\n\r\n back = tkinter.Button(mframe, command=win3_ManagePictures, text='Back')\r\n back.pack()\r\n else:\r\n def no_pictures_error():\r\n mframe.pack_propagate(0)\r\n\r\n b1 = tkinter.Label(mframe, text='Please Download Pictures (Or', font=(\"Courier\", 18), bg='pink')\r\n b1.pack(side='top', expand='YES')\r\n b1.place(relx=0.5, rely=0.4, anchor='center')\r\n b1 = tkinter.Label(mframe, text='Add Them Manually) Before Adding Captions', font=(\"Courier\", 18),\r\n bg='pink')\r\n b1.pack(side='top', expand='YES')\r\n b1.place(relx=0.5, rely=0.5, anchor='center')\r\n time.sleep(1.5)\r\n win3_ManagePictures()\r\n\r\n t = threading.Thread(target=no_pictures_error)\r\n t.start()", "def OnOpenClicked(self, event):\n \n # Create the image\n image = medipy.io.load(self._image_path) #, 0, loader_class= nmr2D.Nmr2D)\n \n # Insert a reference spectrum into the image if one has been specified\n if self._reference_path is not None:\n spectrum = numpy.fromfile(self._reference_path, numpy.int32)\n image.metadata[\"header\"][\"proton_spectrum\"] = spectrum\n \n # Load a list of annotations if an annotation file has been specified\n if self._annotations_path is not None:\n image.metadata[\"Data\"] = image.data\n dom = md.parse(self._annotations_path)\n peaks = dom.getElementsByTagName(\"Peak2D\")\n image.annotations = ObservableList()\n for peak in peaks:\n annotation = ImageAnnotation()\n ppm = (float(peak.getAttribute(\"F1\")),float(peak.getAttribute(\"F2\")))\n point = rbnmr.ppm_to_point(ppm, \n image.metadata[\"Procs\"],\n image.metadata[\"Proc2s\"])\n annotation.position = [0, point[-2], point[-1]]\n annotation.label = peak.getAttribute(\"annotation\")\n annotation.shape = ImageAnnotation.Shape.cross\n annotation.size = 10\n annotation.color = [0, 1., 0.]\n annotation.filled = False\n annotation.depth = 10\n image.annotations.append(annotation)\n \n self.GetParent().append_image([{\"image\":image}])\n \n # Close the window\n self.Destroy()", "def btn_compare_callback(self):\n self.show_as_waiting(True)\n mrs2_ids = self.tbl_images.get_mrs_ids(2)\n mrs2_names = self.tbl_images.get_mrs_names(2)\n\n for id, name in zip(mrs2_ids, mrs2_names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n self.img_displayer.new_display(image_fio, name)\n self.show_as_waiting(False)", "def on_image(self, image):", "def btn_display_color_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_color_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n self.show_as_waiting(False)", "def imageinfo(self, *args, **kwargs):\n return self.logger.log(logging.INFO-1, *args, **kwargs)", "def btn_display_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n self.img_displayer.new_display(image_fio, name)\n self.show_as_waiting(False)", "def showBtnImg(*args, **kwargs):\n\targs[0].get_image().show()", "def btn_equalize_hist_callback(self):\n self.show_as_waiting(True)\n self.image_proc_selected('Histogram Equalization')\n self.show_as_waiting(False)", "def on_action_2_triggered(self):\n # TODO: not implemented yet\n button=QMessageBox.about(self, '介绍此软件', '此软件是用python语言开发,主要用到Dlib,opencv,pyqt5 三种库利用计算机视觉技术进行图像处理从而识别目标对象表情')", "def screeninfo(self):\n\t\tDevice().capture_screenshot()\n\t\tresolution = (self.width, self.height)\n\t\tdroid = AQMdroid('image.png', resolution, self.filename)\n\t\t\n\t\ttry:\n\t\t\tdroid.getorigin()\n\t\texcept Exception as e:\n\t\t\tScriptGen(self.filename).log_checker(self.log_handler)\n\t\t\tScriptGen(self.filename).log_checker(self.generate_log_file)\n\t\t\tprint \"\\nExit Point Triggered.\"\n\t\t\tsys.exit()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def show_info(filename, abs_=None, center=None):\n fimage = FITSImage(filename)\n print(\"Image data shape: {0}\".format(fimage.shape))\n print(\"Image size: %dx%d\" % (fimage.Nx, fimage.Ny))\n print(\"Data unit: [%s]\" % fimage.bunit)\n pixelsize = fimage.pixelsize\n if pixelsize:\n print(\"Pixel size: %.1f [arcsec]\" % pixelsize)\n print(\"Field of view: (%.2f, %.2f) [deg]\" % fimage.fov)\n data = fimage.image\n if abs_:\n data = np.abs(data)\n if center:\n print(\"Central box size: %d\" % center)\n rows, cols = data.shape\n rc, cc = rows//2, cols//2\n cs1, cs2 = center//2, (center+1)//2\n data = data[(rc-cs1):(rc+cs2), (cc-cs1):(cc+cs2)]\n min_ = np.nanmin(data)\n max_ = np.nanmax(data)\n mean = np.nanmean(data)\n median = np.nanmedian(data)\n std = np.nanstd(data)\n iqr = np.diff(np.nanpercentile(data, q=(25, 75)))\n mad = np.nanmedian(np.abs(data - median))\n rms = np.sqrt(np.nanmean(data**2))\n print(\"min: %13.6e\" % min_)\n print(\"max: %13.6e\" % max_)\n print(\"range: %13.6e (max - min)\" % (max_ - min_))\n print(\"mean: %13.6e\" % mean)\n print(\"median: %13.6e\" % median)\n print(\"std: %13.6e (standard deviation)\" % std)\n print(\"iqr: %13.6e (interquartile range)\" % iqr)\n print(\"mad: %13.6e (median absolute deviation)\" % mad)\n print(\"rms: %13.6e (root-mean-squared)\" % rms)", "def display(self, image):\n raise NotImplementedError()", "def measure(self, imgage, previous=None):", "def __init__(self):\n self.root = tk.Tk()\n self.root.minsize(width=150, height=150)\n self.root.maxsize(height=500)\n self.root.title(\"GUV analysis\")\n self.root.iconbitmap(os.path.join(os.path.dirname(__file__), \"icon.ico\"))\n self.window = tk.Frame(self.root)\n self.window.pack(side=\"top\", fill=\"both\", expand=True)\n\n self.widgets = {}\n self.images = {}\n\n self.widgets['lblTitle'] = tk.Label(self.window, text='GUV analysis tool', font=\"-weight bold -size 20\")\n self.widgets['lblTitle'].grid(column=0, row=0, columnspan=3)\n\n self.images['newImg'] = tk.PhotoImage(file=os.path.join(os.path.dirname(__file__),'icon-new.png')).subsample(2,2)\n self.widgets['btnNew'] = tk.Button(self.window, text='New analysis', image=self.images['newImg'], command=self.start_new_analysis, compound=tk.TOP, borderwidth=0)\n self.widgets['btnNew'].grid(column=0, row=1, padx=10)\n\n self.images['openImg'] = PhotoImage_cd('icon-open.png').subsample(2,2)\n self.widgets['btnOpen'] = tk.Button(self.window, text='Open existing analysis', command=self.reopen_existing_analysis, image=self.images['openImg'], compound=tk.TOP, borderwidth=0)\n self.widgets['btnOpen'].grid(column=1, row=1, padx=10)\n\n self.images['closeImg'] = PhotoImage_cd('icon-close.png').subsample(2,2)\n self.widgets['btnClose'] = tk.Button(self.window, text='Close program', command=self.root.quit, image=self.images['closeImg'], compound=tk.TOP, borderwidth=0)\n self.widgets['btnClose'].grid(column=2, row=1, padx=10)", "def imshow_infos(img,\n infos,\n text_color='white',\n font_size=26,\n row_width=20,\n win_name='',\n show=True,\n fig_size=(15, 10),\n wait_time=0,\n out_file=None):\n with ImshowInfosContextManager(fig_size=fig_size) as manager:\n _, img = manager.put_img_infos(\n img,\n infos,\n text_color=text_color,\n font_size=font_size,\n row_width=row_width,\n win_name=win_name,\n show=show,\n wait_time=wait_time,\n out_file=out_file)\n return img", "def update_information(self):\n if self._image_path is not None:\n self._open_button.Enable() \n else:\n self._open_button.Disable() \n \n self.Fit()\n self.GetSizer().SetSizeHints(self)", "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "def __on_generating_photomosaic_finished(self):\n\n self.progress_window.hide()", "def display(self):\n display(self.image)", "def onclick_open_image(self):\n filename = select_file(\n \"Select Image\",\n \"../\",\n \"Image Files (*.jpeg *.jpg *.png *.gif *.bmg)\")\n if filename:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.image = read_image(filename)\n self.h, self.w = self.image.shape[:2]\n self.show_to_window()", "def callback(self,data):\n # convert image into openCV format\n bridge = CvBridge()\n try:\n # bgr8 is the pixel encoding -- 8 bits per color, organized as blue/green/red\n cv_image = bridge.imgmsg_to_cv(data, \"bgr8\")\n except CvBridgeError, e:\n # all print statements should use a rospy.log_ form, don't print!\n rospy.loginfo(\"Conversion failed\")\n\n # we could do anything we want with the image here\n # for now, we'll blur using a median blur\n cv2.Smooth(cv_image, cv_image, smoothtype=cv.CV_MEDIAN, param1=31, param2=0, param3=0, param4=0)\n\t\n\t\n\tret,th1 = cv2.threshold(cv_image,127,255,cv2.THRESH_BINARY)\n\tth2 = cv2.adaptiveThreshold(cv_image,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n\t\t cv2.THRESH_BINARY,11,2)\n\tth3 = cv2.adaptiveThreshold(cv_image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n\t\t cv2.THRESH_BINARY,11,2)\n\n\ttitles = ['Original Image', 'Global Thresholding (v = 127)',\n\t\t 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\n\timages = [cv_image, th1, th2, th3]\n\t\n\tfor i in xrange(4):\n\t\tplt.subplot(2,2,i+1),plt.imshow(images[i],'gray')\n\t\tplt.title(titles[i])\n\t\tplt.xticks([]),plt.yticks([])\n\tplt.show()\n\t\n '''# show the image\n cv2.ShowImage(\"image_view\", cv_image)\n cv2.WaitKey(3)\n\t'''", "def disp_annotation(self):\n self.has_been_called = True\n print('Iterat #Fevals Hypervolume axis ratios '\n ' sigmas min&max stds\\n'+'(median)'.rjust(42) +\n '(median)'.rjust(10) + '(median)'.rjust(12))", "def show_result(self, img, result, **kwargs):\n return self.architecture.show_result(img, result, **kwargs)", "def showInfoWindow():\n\treturn 0", "def OnButtonOKButton(self):\r\n\t\tmeta = {}\r\n\t\tfor key in self._config.keys():\r\n\t\t\tif self._config[key] != self._configtmp[key]:\r\n\t\t\t\tmeta[key] = self._configtmp[key]\r\n\t\tif meta:\r\n\t\t\t# Since if frame open, wallpaper downloads are delayed,\r\n\t\t\t# the image data is always valid\r\n\t\t\tself._config.update(self._configtmp)\r\n\t\t\tself._config.Save(self._twirlpath)\r\n\t\t\tmeta.update({\"username\":self._config[\"username\"].encode(\"utf-8\"),\r\n\t\t\t\t\"userhash\":self._config[\"userhash\"],\r\n\t\t\t\t\"imageid\":self._config[\"imageid\"]})\r\n\t\t\tnetops.SendMetadata(consts.URL_SEND_META, meta)\r\n\t\tself.Hide()", "def update_image(self, cv_img):\n\t\tqt_img = self.convert_cv_qt(cv_img)\n\t\tself.label.setPixmap(qt_img)\n\t\tself.display_info()", "def popup(self):\n opencv.imshow('dbg', self.img)\n opencv.waitKey(0)", "def do_info (self, line) :\n\t\tprint\n\t\tprint get_info_string( self.__image )\n\t\tprint", "def process(self,pixmap):", "def __print_metrics_info(self, occurrence_metric):\n print(\" Name: \", self.get_metric_name(occurrence_metric))\n print(\" Type: Metric\")\n print(\" Description:\",\n self.get_metric_description(occurrence_metric))\n print(\" Formula: \", self.get_metric_formula(occurrence_metric))\n return 0", "def run_ML_onImg_and_display(self):\r\n self.Matdisplay_Figure.clear()\r\n ax1 = self.Matdisplay_Figure.add_subplot(111)\r\n \r\n # Depends on show_mask or not, the returned figure will be input raw image with mask or not.\r\n self.MLresults, self.Matdisplay_Figure_axis, self.unmasked_fig = self.ProcessML.DetectionOnImage(self.MLtargetedImg, axis = ax1, show_mask=False, show_bbox=False) \r\n self.Mask = self.MLresults['masks']\r\n self.Label = self.MLresults['class_ids']\r\n self.Score = self.MLresults['scores']\r\n self.Bbox = self.MLresults['rois']\r\n\r\n self.SelectedCellIndex = 0\r\n self.NumCells = int(len(self.Label))\r\n self.selected_ML_Index = []\r\n self.selected_cells_infor_dict = {}\r\n \r\n self.Matdisplay_Figure_axis.imshow(self.unmasked_fig.astype(np.uint8))\r\n \r\n self.Matdisplay_Figure.tight_layout()\r\n self.Matdisplay_Canvas.draw()", "def on_pushButton_2_clicked(self):\n # TODO: not implemented yet\n try:\n str='str.png'\n process_pic.graphics ().process (str)\n self.click=\"process\"\n pixMap = QPixmap(\"temp.png\").scaled(self.label.width(),self.label.height())\n self.label.setPixmap(pixMap)\n except:\n button=QMessageBox.about(self, '注意', '应先向空白处导入图片后再进行处理')\n else:\n pass\n\n\n\n #os.popen('python process_pic.py')", "def process(self, image):", "def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()", "def _state_main(self, gui):\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def getPicturesPrintEval(self):\n col_print_quality = [[sg.Radio('Good Print', \"Print Quality\", default=True, enable_events = True, key=\"good_radio\", metadata='not_disable')],\n [sg.Radio('Bad Print', \"Print Quality\", default=False, enable_events = True, key=\"bad_radio\", metadata='not_disable')],\n [sg.Text(\"Please input your comments (if any): \"),sg.Multiline()],\n [sg.Button(button_text= \"OK\", enable_events= True, key =\"OK\")]]\n\n col_images = [[sg.Text(\"Folder with Images Location:\")], \n [sg.Input(key = \"FolderName\"), sg.FolderBrowse(button_text = \"Browse\")]]\n layout = [[sg.Column(col_print_quality), sg.Column(col_images)]]\n \n \n\n window = sg.Window('Print Assesment', layout, keep_on_top=True)#Creation of the window\n while True:\n event, values = window.read()\n # End program if user closes window or\n # presses the OK button\n # you can use switch-case here instead of if statements\n if event == sg.WIN_CLOSED:\n #Indicate abort\n window.close()\n return (\"Good Print\", None) if values[\"good_radio\"] else (\"Bad Print\", None)\n elif event == \"OK\":\n fileName = values[\"FolderName\"]\n window.close()\n return (\"Good Print\", fileName) if values[\"good_radio\"] else (\"Bad Print\", fileName)", "def open_popup(title, image):\n self.im = PIL.Image.open(image)\n\n top = Toplevel(root)\n top.geometry()\n top.title(title)\n\n self.ph = ImageTk.PhotoImage(self.im, master=top)\n self.label = Label(top, image=self.ph)\n self.label.image = self.ph\n self.label.grid(row=1, column=1, columnspan=3)\n\n self.label2 = Label(top, text='Image saved @ %s' % image)\n self.label2.grid(row=2, column=1)", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def init_info_pane(self):\n self.single_acq = QtWidgets.QPushButton(\"Single Acquisition\")\n self.start_acq = QtWidgets.QPushButton(\"Start Acquisition\")\n self.stop_acq = QtWidgets.QPushButton(\"Stop Acquisition\")\n\n self.exposure = QtWidgets.QDoubleSpinBox()\n self.exposure.setSuffix(\" ms\")\n self.get_exposure_params()\n\n self.maj_radius = QtWidgets.QLabel()\n self.min_radius = QtWidgets.QLabel()\n self.avg_radius = QtWidgets.QLabel()\n self.ellipticity = QtWidgets.QLabel()\n self.x_radius = QtWidgets.QLabel()\n self.y_radius = QtWidgets.QLabel()\n self.x_centroid = QtWidgets.QLabel()\n self.y_centroid = QtWidgets.QLabel()\n\n # Mark current beam position\n self.mark = QtWidgets.QPushButton(\"Mark\")\n self.unmark = QtWidgets.QPushButton(\"Unmark\")\n\n # Mark location\n self.mark_x = QtWidgets.QLabel()\n self.mark_y = QtWidgets.QLabel()\n\n # Beam distance from marked location\n self.x_delta = QtWidgets.QLabel()\n self.y_delta = QtWidgets.QLabel()\n\n # Keep a list of mark sub-widgets so we can hide/show them\n # Obviously we don't want to hide the mark buttons themselves\n self.mark_widgets.extend([\n self.mark_x, self.mark_y,\n # self.x_delta, self.y_delta,\n ])\n\n self.fps = QtWidgets.QLabel()\n self.message = QtWidgets.QLabel()\n self.status = QtWidgets.QLabel(\"Stopped\")", "def show(image,label,pred):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n s=\"True Label : \"+str(label)+\" Predicted label : \"+str(pred)\n pyplot.xlabel(s,fontname=\"Arial\", fontsize=20 )\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def visualize(**images):\n n_images = len(images)\n plt.figure(figsize=(20,8))\n for idx, (name, image) in enumerate(images.items()):\n plt.subplot(1, n_images, idx + 1)\n plt.xticks([]); \n plt.yticks([])\n # get title from the parameter names\n plt.title(name.replace('_',' ').title(), fontsize=20)\n plt.imshow(image)\n plt.savefig('sample_gt_pred_2_max.jpeg')\n plt.show()", "def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1", "def getimage(self):", "def analyze(self):\r\n self.current = 'score'\r\n popup = AnalyzeInterface(self.current_screen).open()", "def on_action_12_triggered(self):\n # TODO: not implemented yet\n button=QMessageBox.about(self, '注意', '此软件只可接受png/jpg格式图片输出统一格式为png')", "def put_img_infos(self,\n img,\n infos,\n text_color='white',\n font_size=26,\n row_width=20,\n win_name='',\n show=True,\n wait_time=0,\n out_file=None):\n self.prepare()\n\n text_color = color_val_matplotlib(text_color)\n img = mmcv.imread(img).astype(np.uint8)\n\n x, y = 3, row_width // 2\n img = mmcv.bgr2rgb(img)\n width, height = img.shape[1], img.shape[0]\n img = np.ascontiguousarray(img)\n\n # add a small EPS to avoid precision lost due to matplotlib's\n # truncation (https://github.com/matplotlib/matplotlib/issues/15363)\n dpi = self.fig_save.get_dpi()\n self.fig_save.set_size_inches((width + EPS) / dpi,\n (height + EPS) / dpi)\n\n for k, v in infos.items():\n if isinstance(v, float):\n v = f'{v:.2f}'\n label_text = f'{k}: {v}'\n self._put_text(self.ax_save, label_text, x, y, text_color,\n font_size)\n if show and not self.is_inline:\n self._put_text(self.ax_show, label_text, x, y, text_color,\n font_size)\n y += row_width\n\n self.ax_save.imshow(img)\n stream, _ = self.fig_save.canvas.print_to_buffer()\n buffer = np.frombuffer(stream, dtype='uint8')\n img_rgba = buffer.reshape(height, width, 4)\n rgb, _ = np.split(img_rgba, [3], axis=2)\n img_save = rgb.astype('uint8')\n img_save = mmcv.rgb2bgr(img_save)\n\n if out_file is not None:\n mmcv.imwrite(img_save, out_file)\n\n ret = 0\n if show and not self.is_inline:\n # Reserve some space for the tip.\n self.ax_show.set_title(win_name)\n self.ax_show.set_ylim(height + 20)\n self.ax_show.text(\n width // 2,\n height + 18,\n 'Press SPACE to continue.',\n ha='center',\n fontsize=font_size)\n self.ax_show.imshow(img)\n\n # Refresh canvas, necessary for Qt5 backend.\n self.fig_show.canvas.draw()\n\n ret = self.wait_continue(timeout=wait_time)\n elif (not show) and self.is_inline:\n # If use inline backend, we use fig_save to show the image\n # So we need to close it if users don't want to show.\n plt.close(self.fig_save)\n\n return ret, img_save", "def print_image_info(self):\r\n\r\n maxt = np.max(self.times)\r\n\r\n print (\" Duration of Image Stack: %9.3f s (%8.3f min) period = %8.3f s\" % (maxt, maxt/60.0, self.period))\r\n\r\n print (' Image shape: ', self.imageData.shape)\r\n\r\n print (' nFrames: %d framerate: %9.3f\\n' % (self.nFrames, self.framerate))", "def print_results(self, final_table=None):\n\n assert self.info\n\n if not final_table:\n final_table = [\"\\n\\n{:-^80}\\n\".format(\"ANALYSIS OF RESULTS\")]\n\n if not self.info.categories[\"integrated\"]:\n final_table.append(\"NO IMAGES INTEGRATED!\")\n else:\n label_lens = [len(v[\"label\"]) for k, v in self.info.stats.items()]\n max_label = int(5 * round(float(np.max(label_lens)) / 5)) + 5\n for k, v in self.info.stats.items():\n if k in (\"lres\", \"res\", \"beamX\", \"beamY\"):\n continue\n line = (\n \"{: <{l}}: max = {:<6.2f} min = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n final_table.append(line)\n\n # TODO: Figure out what to do with summary charts\n # # If more than one integrated image, plot various summary graphs\n # if len(self.info.categories['integrated']) > 1:\n # plot = Plotter(self.params, self.info)\n # if self.params.analysis.summary_graphs:\n # if ( self.params.advanced.processing_backend == 'ha14' and\n # self.params.cctbx_ha14.grid_search.type is not None\n # ):\n # plot.plot_spotfinding_heatmap(write_files=True)\n # plot.plot_res_histogram(write_files=True)\n # med_beamX, med_beamY, pixel_size = plot.plot_beam_xy(write_files=True,\n # return_values=True)\n # else:\n # with warnings.catch_warnings():\n # # To catch any 'mean of empty slice' runtime warnings\n # warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # beamXY_info = plot.calculate_beam_xy()\n # beamX, beamY = beamXY_info[:2]\n # med_beamX = np.median(beamX)\n # med_beamY = np.median(beamY)\n # pixel_size = beamXY_info[-1]\n\n final_table.append(\n \"{: <{l}}: X = {:<4.2f}, Y = {:<4.2f}\"\n \"\".format(\n \"Median Beam Center\",\n self.info.stats[\"beamX\"][\"mean\"],\n self.info.stats[\"beamY\"][\"mean\"],\n l=max_label,\n )\n )\n\n # Special entry for resolution last\n v = self.info.stats[\"res\"]\n final_table.append(\n \"{: <{l}}: low = {:<6.2f} high = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n\n for item in final_table:\n util.main_log(self.info.logfile, item, False)\n self.info.update(final_table=final_table)", "def generate_image_info(image):\n image = ee.Image(image)\n image_vis = image.visualize(**{\n 'min': image_min,\n 'max': image_max,\n 'palette': image_palette\n })\n\n print(image_min, image_max)\n\n if 'hillshade' in r and r['hillshade']:\n image_vis = hillshade(image_vis,\n image.subtract(image_min).divide(ee.Image.constant(image_max).subtract(image_min)),\n True)\n\n m = image_vis.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def show_image_ref():\n return get_image_ref()", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def display_callback(self, data):\n if self.enable_overlay_publish:\n self.publish_lidar_overlay_image(data)", "def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]", "def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)", "def display_images():\n vc = cv2.VideoCapture(0) # Open webcam\n figure, ax = plt.subplots(1, 2, figsize=(10, 5)) # Intiialise plot\n\n count = 0 # Counter for number of aquired frames\n intensity = [] # Append intensity across time\n\n # For loop over generator here\n intensity.append(imageintensity)\n plot_image_and_brightness() # Call plot function\n count += 1\n\n # This triggers exit sequences when user presses q\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # Clean up here\n plt.close('all') # close plots\n generator.close() # Use generator exit for clean up,\n break # break loop", "def update(self):\n cv2.imshow(self.window_name, self.map.get_crop())", "def update_image(self, cv_img):\n qt_img = self.convert_cv_qt(cv_img)\n if(self.iscapture):\n print(\"update\")\n direct = self.label1.text()\n if direct == \"~default\":\n direct = \"face_dataframes\"\n else:\n direct = direct + \"/face_dataframes\"\n \n if (not os.path.exists(direct)):\n os.mkdir(direct)\n cv2.imwrite(\"{1}/{2}{0}.jpeg\".format(self.count, direct,self.textbox.text()), cv_img)\n self.iscapture = False\n self.label2.setText(\"Image # 0{0} Saved\".format(self.count))\n self.pushButton0.setEnabled(False)\n self.count += 1\n \n \n if(self.count == 6):\n #print(\"greater\")\n self.pushButton.setEnabled(False)\n self.pushButton2.setDisabled(False)\n\n\n self.image_label.setPixmap(qt_img)", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def get_image_stats(image, out_dir, cur_file):\n # Output directory\n output_base = osp.join(out_dir, cur_file.split('.')[0])\n os.mkdir(output_base)\n # Print dimensions of the image\n width, height, color = image.shape\n print('The resolution of the image if of {}x{}x{}'.format(width,\n height,\n color))\n print('Total of {} pixels'.format(width * height * color))\n\n # Get histogram\n print('Calculating histogram')\n flat_img = image.mean(axis=2).flatten()\n counts, bins = np.histogram(flat_img, range(257))\n plt.bar(bins[:-1], counts, width=1, edgecolor='none')\n output_file = osp.join(out_dir, output_base, 'histogram.png')\n plt.xlabel('Intensidad')\n plt.ylabel('Número de pixeles')\n print('Saving histogram')\n plt.savefig(output_file, bbox_inches='tight')\n plt.close()\n\n # LAB space\n lab_image = cv2.cvtColor(image[8000:8500, 8000:8500, :], cv2.COLOR_BGR2LAB)\n output_file = osp.join(out_dir, output_base, 'lab.png')\n cv2.imwrite(output_file, lab_image)\n output_file = osp.join(out_dir, output_base, 'original.png')\n cv2.imwrite(output_file, image[8000:8500, 8000:8500, :])", "def load_metadata_i(self, metadata):\n self.p2_frame_metadata.configure(borderwidth=2, relief=\"groove\")\n self.p2_label_metadata_code.config(text=self.lang.VP_CODE + metadata[\"metadata\"][\"code\"])\n self.p2_label_metadata_grade.config(text=self.lang.VP_GRADE + str(metadata[\"metadata\"][\"grade\"]))\n self.p2_label_metadata_cm.config(text=self.lang.VP_DATE + metadata[\"metadata\"][\"date\"])", "def run_image_viewer( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"feh\", \"-dZ\", \"-g\", \"800x600\", self.record[\"filename\"]] )", "def open_imagingWindow(self):\n self.window = surveyWindow(self, imaging=True)\n self.hide()", "def handle_gui_example_two_intent(self, message):\n self.gui.show_image(\"https://source.unsplash.com/1920x1080/?+random\")", "def update_info(self):\n self.execution_status_widget.update()\n self.execution_info_widget.update()\n self.cluster_widget.update() # update the cluster info even if it is not being displayed\n self.details.original_widget.update()", "def image_cb(self, msg):\n\n # Save the camera image\n self.camera_image = msg\n\n # I sufficient information is available...\n if not None in (self.camera_image, self.waypoint_tree, self.lights):\n\n # Find index and color state of next light\n light_wp, state = self.process_traffic_lights()\n\n # If the light is green...\n if state == TrafficLight.GREEN:\n\n # Publish sentinel indicatig no red light\n self.upcoming_red_light_pub.publish(Int32(-1))\n\n else:\n\n # Publish the traffic light index\n self.upcoming_red_light_pub.publish(Int32(light_wp))", "def download_function(self):\n # Ask user for directory and user ID\n savepath = filedialog.askdirectory()\n ID = self.user_name.get()\n\n self.msg2.set('Saving files to the designated folder')\n\n # Get selected filenames\n index = self.name_list.curselection()\n select_files = [self.image_names[i] for i in index]\n\n single = check_multi_single(select_files)\n\n if single is True:\n\n filename = select_files[0]\n try:\n pro_img_obj, raw_img_obj, raw_img_name, \\\n pro_hist_obj, raw_hist_obj = get_image_pair(filename, ID)\n except ValueError:\n self.msg2.set(get_image_pair(filename, ID))\n else:\n # Get Image metrics\n self.raw_metrics = client.image_metrics(ID, raw_img_name)\n self.pro_metrics = client.image_metrics(ID, filename)\n\n s = self.raw_metrics['size']\n size = image_size(s)\n\n # display the raw and process image in GUI\n raw_img = ImageTk.PhotoImage(raw_img_obj.resize(size))\n self.raw_img_label.configure(image=raw_img)\n self.raw_img_label.image = raw_img\n\n pro_img = ImageTk.PhotoImage(pro_img_obj.resize(size))\n self.pro_img_label.configure(image=pro_img)\n self.pro_img_label.image = pro_img\n\n # display raw and process histogram in GUI\n raw_hist = ImageTk.PhotoImage(raw_hist_obj.resize([385, 450]))\n self.raw_hist_label.configure(image=raw_hist)\n self.raw_hist_label.image = raw_hist\n\n pro_hist = ImageTk.PhotoImage(pro_hist_obj.resize([385, 450]))\n self.pro_hist_label.configure(image=pro_hist)\n self.pro_hist_label.image = pro_hist\n\n # Save file to a designated folder\n full_name = savepath + '/' + filename + '.' + self.saveas.get()\n pro_img_obj.save(full_name)\n self.msg2.set('Image is saved successfully')\n\n else:\n download_multiple(select_files, savepath, ID, self.saveas.get())\n self.msg2.set('Images are saved successfully '\n 'in \"processed_images.zip\"')", "def observation_img_callback(self, oimg):\n self._observation_img = oimg", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def display_image(img, label):\n cv2.imshow(label,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def display(self):\n image_qt = ImageQt.ImageQt(self.view_state.get_image())\n self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image_qt))\n self.imageLabel.adjustSize()", "def create_image_caption_pairs(self):", "def show_picture(self, data):\n raise NotImplementedError", "def copyin(self, greyscale, mw):\n self.gs = greyscale\n self.imagearray = mw.currentimage\n self.imagetitle = mw.currentimage_title\n self.gsname.setText(self.gs.name)\n collist = self.gs.shades + [0, 255]\n collist.sort(reverse=not self.gs.inverse)\n self.currentshades = collist\n self.numcols = len(collist)\n\n # Set up dialog box appropriately\n\n self.performingupdate = True\n\n self.numscales.setValue(self.numcols)\n self.setupshades()\n\n vallist = self.gs.values\n\n if self.imagearray is None:\n self.meanvalue = 0\n self.stdvalue = 1\n self.minvalue = -100.0\n self.maxvalue = 70000.0\n self.minstdd = -100.0\n self.maxstdd = 100.0\n else:\n self.plotfigure = plt.figure(figsize=(mw.imwidth, mw.imheight))\n self.meanvalue = self.imagearray.mean()\n self.stdvalue = self.imagearray.std()\n self.minvalue = self.imagearray.min()\n self.maxvalue = self.imagearray.max()\n self.minstdd = (self.minvalue - self.meanvalue) / self.stdvalue\n self.maxstdd = (self.maxvalue - self.meanvalue) / self.stdvalue\n self.meanv.setText(\"%.2f\" % self.meanvalue)\n self.medianv.setText(\"%.2f\" % np.median(self.imagearray))\n self.sigmav.setText(\"%.2f\" % self.stdvalue)\n\n if self.gs.isfixed:\n self.fixedcount.setChecked(True)\n vallist.append(self.maxvalue)\n vallist.sort()\n self.currentvalues = vallist\n elif self.gs.isperc:\n self.percentile.setChecked(True)\n vallist.append(100.0)\n vallist.sort()\n self.currentpercents = vallist\n else:\n self.nstddevs.setChecked(True)\n vallist.append(self.maxstdd)\n vallist.sort()\n self.currentnsigs = vallist\n\n # Initialise minimum value fields (don't need zero percent)\n\n self.nsd0.setValue(self.minstdd)\n self.v0.setValue(self.minvalue)\n\n # Create other two lists and display\n\n self.createrest()\n self.fillingrid()\n self.setupenabled()\n self.performingupdate = False\n self.plotmap()", "def view_thumbnail_complete():\n curItem = complete_tereeview.focus().strip('#')\n with open(\"images_url_dict.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = \"-\".join(curItem.lower().split())\n try:\n \"\"\"look for entry info from local database\"\"\"\n img_list = imgs_dict[name]\n img_url = img_list[0]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n\n except KeyError:\n print(\"Failed series image list....\")\n with open(\"Movies_List.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = curItem\n try:\n img_list = imgs_dict[name]\n img_url = img_list[1]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n except Exception as error_ml:\n print(\"Failed using movie list Error :: \\n\", error_ml)\n\n\n except Exception as local_excep:\n\n print(\"ERROR :: \" + str(local_excep))", "def see_image(self, idx, show=True):\n true_label = self.true_targets[idx]\n img, label, _ = self.__getitem__(idx) # img has channel as 1st dim\n img = np.transpose(img.numpy(), (1, 2, 0)) # channel as last dim\n if show:\n plt.imshow(img)\n plt.title(f\"Label: {self.classes_labels[true_label]}\")\n plt.show()\n else:\n return img, label, true_label", "def setUI(self):\n \n l = QtGui.QLabel(\"Open file:\")\n browseButton = QtGui.QPushButton(\"Browse\")\n analyzeButton = QtGui.QPushButton(\"Analyse\")\n self.filelabel = QtGui.QLabel(\"\")\n self.messageLabel = QtGui.QLabel(\"\")\n \n #camera intrasec values\n self.fxlabel = QtGui.QLabel('focal x')\n self.fylabel = QtGui.QLabel('focal y')\n self.dist1label = QtGui.QLabel('K1')\n self.dist2label = QtGui.QLabel('K2')\n self.dist3label = QtGui.QLabel('P1')\n self.dist4label = QtGui.QLabel('P2')\n\n #set layout\n self.grid = QtGui.QGridLayout()\n a = self.grid.addWidget\n a(l, 0,0)\n a(browseButton, 0,2)\n a(self.filelabel,0,1)\n a(self.messageLabel, 1,0,1,4)\n a(analyzeButton, 2,0,1,4)\n\n a(self.fxlabel, 3,0)\n a(self.fylabel, 3,1)\n a(self.dist1label, 4,0)\n a(self.dist2label, 5,0)\n a(self.dist3label, 6,0)\n a(self.dist4label, 7,0)\n\n self.setLayout(self.grid)\n\n\n #connect signals to methods\n self.connect(browseButton, QtCore.SIGNAL('clicked()'), self.onOpenFileClicked)\n self.connect(analyzeButton, QtCore.SIGNAL('clicked()'), self.startAnalyze)", "def set_info():\n #Example response\n '''{'copyright': 'Thomas Ashcraft', 'date': '2021-01-04', 'explanation': 'What causes sprite lightning? Mysterious bursts of light in the sky that momentarily resemble gigantic jellyfish have been recorded for over 30 years, but apart from a general association with positive cloud-to-ground lightning, their root cause remains unknown. Some thunderstorms have them -- most don\\'t. Recently, however, high speed videos are better detailing how sprites actually develop. The featured video, captured in mid-2019, is fast enough -- \n at about 100,000 frames per second -- to time-resolve several sprite \"bombs\" dropping and developing into the multi-pronged streamers that appear on still images. Unfortunately, the visual clues provided by videos like these do not fully resolve the sprite origins mystery. High speed vidoes do indicate to some researchers, though, that sprites are more likely to occur when plasma irregularities exist in the \nupper atmosphere. Astrophysicists: Browse 2,300+ codes in the Astrophysics Source Code Library', 'media_type': 'video', 'service_version': 'v1', 'title': 'Sprite Lightning at 100,000 Frames Per Second', 'url': 'https://www.youtube.com/embed/zS_XgF9i8tc?rel=0'}'''\n\n #Update the picture date and explanation\n picture_date.config(text=response['date'])\n picture_explanation.config(text=response['explanation'])\n\n #We need to use 3 images in other functions; an img, a thumb, and a full_img\n global img\n global thumb\n global full_img\n\n url = response['url']\n\n if response['media_type'] == 'image':\n #Grab the photo that is stored in our response.\n img_response = requests.get(url, stream=True)\n\n #Get the content of response and use BytesIO to open it as an image\n #Keep a reference to this img as this is what we can use to save the image (Image nit PhotoImage)\n #Create the full screen image for a second window\n img_data = img_response.content\n img = Image.open(BytesIO(img_data))\n\n full_img = ImageTk.PhotoImage(img)\n\n #Create the thumbnail for the main screen\n thumb_date = img_response.content\n thumb = Image.open(BytesIO(thumb_date))\n thumb.thumbnail((200,200))\n thumb = ImageTk.PhotoImage(thumb)\n\n #Set the thumbnail image\n picture_label.config(image=thumb)\n elif response['media_type'] == 'video':\n picture_label.config(text=url, image='')\n webbrowser.open(url)", "def displayImage(self, iFrame, img=None):\n if not img:\n imgPath = self.imgList[iFrame.currImg]\n img = Image.open(imgPath); \n if img.mode == \"I;16\":\n print \"16 bit image, converting to 8 bit\"\n img.mode = 'I'\n img = img.point(lambda i:i*(1./256.)).convert(\"RGB\");\n img = img.resize((self.ni, self.nj))\n\n #iframe keeps track of its image\n iFrame.image = img\n \n #if point is generated, gotta draw squares first\n if self.point3d:\n point = self.allUVs[iFrame.currImg];\n self.drawBox(iFrame, point)\n \n # store photo image (probably not needed in iFrame)\n iFrame.tkpi = ImageTk.PhotoImage(img) \n \n #update frames' label \n iFrame.labString.set(\"img {0}\".format(iFrame.currImg))\n \n #create new label image\n if iFrame.label_image :\n iFrame.label_image.destroy()\n iFrame.label_image = Label(iFrame.frame, image=iFrame.tkpi)\n iFrame.label_image.image = iFrame.tkpi\n iFrame.label_image.bind(\"<Button-1>\", lambda event, arg=iFrame: self.runprobe(event, iFrame))\n iFrame.label_image.bind(\"<Button-3>\", lambda event, arg=iFrame: self.nextImage(event, iFrame)) \n iFrame.label_image.bind(\"<Button-2>\", lambda event, arg=iFrame: self.prevImage(event, iFrame))\n iFrame.label_image.pack(side = LEFT);", "def show_image(self):\n cv2.imshow('Image', self.__diff_image())\n cv2.waitKey()", "def image_info(image, task_state, video_state):\n image_info = 'Frame {}/{} ({})'.format(video_state.image_idx + 1, video_state.num_frames, video_state.get_image_name())\n cv2.putText(image, image_info, (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, video_state.color, 1)\n\n label_info = []\n if len(video_state.labels) > 0:\n label_info = ['{}'.format(a) for (f, a) in video_state.labels.items() if video_state.get_image_name().split('.')[0] == f]\n if len(label_info) == 0:\n label_info = ['None']\n for i, row in enumerate(label_info):\n cv2.putText(image, row, (5, 35 + i * 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, video_state.color, 1)\n cv2.imshow('Video', image)\n if video_state.look_ahead == 0: # no lookahead\n cv2.destroyWindow('Foresight')\n cv2.destroyWindow('Hindsight')\n elif video_state.look_ahead == 1: # only foresight\n foresight(video_state)\n elif video_state.look_ahead == 2: # foresight and hindsight\n foresight(video_state)\n hindsight(video_state)", "def showImage(self, img):\n cv2.namedWindow(self.NAME_WINDOW,cv2.WINDOW_NORMAL)\n cv2.resizeWindow(self.NAME_WINDOW, 300, 700)\n cv2.imshow(self.NAME_WINDOW , img)\n cv2.waitKey(0)", "def _main_window_callback(self, x_image, y_image):\n\n x, y = x_image, y_image\n\n print('xy is {} {}'.format(x, y))\n # Find the closest cutout to this click. Each line of the process_result...\n # list corresponds to a line of Y_tsne, so the one that is closest to the middle\n # and has the same filename is the line we need to lookup in the Y_tsne.\n # distances = [np.sum((np.array([x, y]) - np.array(middle))**2)\n # for filename, cutout_no, middle in self._process_result_filename_cutout_number\n # if self._main_window_filename in filename]\n distances = []\n for fits_filename, filename, cutout_no, middle in self._process_result_filename_cutout_number:\n if self._main_window_filename.split('/')[-1] == fits_filename.split('/')[-1]:\n d = np.sum((np.array([x, y]) - np.array(middle))**2)\n distances.append(d)\n print(distances[:9])\n inds = np.argsort(np.array(distances))\n\n fits_filename, filename, cutoutnumber, middle = self._process_result_filename_cutout_number[inds[0]]\n\n axis = self._main_window\n axis.plot([middle[0] - 112, middle[0] - 112], [middle[1] - 112, middle[1] + 112], 'y')\n axis.plot([middle[0] + 112, middle[0] + 112], [middle[1] - 112, middle[1] + 112], 'y')\n axis.plot([middle[0] - 112, middle[0] + 112], [middle[1] - 112, middle[1] - 112], 'y')\n axis.plot([middle[0] - 112, middle[0] + 112], [middle[1] + 112, middle[1] + 112], 'y')\n\n self._display_from_tsne(self._Y_tsne[inds[0],0], self._Y_tsne[inds[0],1])", "def _info(self, message):\r\n dlg = wx.MessageDialog(self, message,\r\n 'xmi2magik',\r\n wx.OK | wx.ICON_INFORMATION\r\n )\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def on_info_click(self, event):\n def on_close(event, wind):\n wind.Close()\n wind.Destroy()\n event.Skip()\n wind = wx.PopupTransientWindow(self, wx.RAISED_BORDER)\n if self.auto_save.GetValue():\n info = \"'auto-save' is currently selected. Temperature bounds will be saved when you click 'next' or 'back'.\"\n else:\n info = \"'auto-save' is not selected. Temperature bounds will only be saved when you click 'save'.\"\n text = wx.StaticText(wind, -1, info)\n box = wx.StaticBox(wind, -1, 'Info:')\n boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)\n boxSizer.Add(text, 5, wx.ALL | wx.CENTER)\n exit_btn = wx.Button(wind, wx.ID_EXIT, 'Close')\n wind.Bind(wx.EVT_BUTTON, lambda evt: on_close(evt, wind), exit_btn)\n boxSizer.Add(exit_btn, 5, wx.ALL | wx.CENTER)\n wind.SetSizer(boxSizer)\n wind.Layout()\n wind.Popup()", "def update_display(self):\n \n # check availability of display queue of the wide camera\n# if not hasattr(self,'wide_disp_queue'):\n# pass\n# elif self.wide_disp_queue.empty():\n# pass\n# else:\n# try:\n# wide_disp_image = self.wide_disp_queue.get()\n# \n# self.wide_disp_counter += 1\n# self.wide_disp_counter %= 2\n# if self.wide_disp_counter == 0:\n# if type(wide_disp_image) == np.ndarray:\n# if wide_disp_image.shape == (self.wide_cam.settings.height.value(),self.wide_cam.settings.width.value()):\n# try:\n# self.wide_cam_image.setImage(wide_disp_image)\n# except Exception as ex:\n# print('Error: %s' % ex)\n# except Exception as ex:\n# print(\"Error: %s\" % ex)\n \n # check availability of display queue of the track camera \n if not hasattr(self,'track_disp_queue'):\n pass\n elif self.track_disp_queue.empty():\n pass\n else:\n try:\n track_disp_image = self.track_disp_queue.get()\n self.track_disp_counter += 1\n self.track_disp_counter %= 4\n if self.track_disp_counter == 0:\n if type(track_disp_image) == np.ndarray:\n if track_disp_image.shape == (self.track_cam.settings.height.value(),self.track_cam.settings.width.value()):\n try:\n self.track_cam_image.setImage(track_disp_image)\n except Exception as ex:\n print('Error: %s' % ex)\n \n x = int(self.settings.x.value())\n y = int(self.settings.y.value())\n self.tracker_data[:] = 0\n self.tracker_data[x,y] = 1\n self.tracker_image.setImage(np.copy(self.tracker_data))\n except Exception as ex:\n print(\"Error: %s\" % ex)", "def image_viewer():\n key_file = auth_form()\n sg.theme('DarkBlue') \n layout = [\n [\n sg.Text(\"Image File\"),\n sg.Input(size=(25, 1), enable_events=True, key=\"file\"),\n sg.FilesBrowse(file_types=((\"Encrypted Images\", \"*.enc\"),)),\n sg.Button(\"Prev\"),\n sg.Button(\"Next\"),\n sg.Button(\"Export\")\n ],\n [sg.Image(key=\"image\")]\n ]\n window = sg.Window('Encrypted Image Viewer', layout, resizable=True).Finalize()\n window.Maximize()\n images = []\n location = 0\n while True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n sys.exit()\n if event == \"file\":\n images = values[\"file\"].split(';')\n if images:\n image_decrypted = load_image(images[0], window, key_file)\n if event == \"Next\" and images:\n if location == len(images) - 1:\n location = 0\n else:\n location += 1\n image_decrypted = load_image(images[location], window, key_file)\n if event == \"Prev\" and images:\n if location == 0:\n location = len(images) - 1\n else:\n location -= 1\n image_decrypted = load_image(images[location], window, key_file)\n \n if event == \"Export\" and images:\n image_decrypted.show()\n window.close()", "def help_main():\n global help_window, my_iterator, iterable, canvas, forward_button, picture_lst, image\n my_iterator = iter(picture_lst)\n pill_image = Image.open(image_base)\n image = ImageTk.PhotoImage(pill_image)\n\n canvas = Canvas(help_window, width=700 + 15, height=490 + 15)\n canvas.create_image(10, 10, anchor=NW, image=image)\n\n canvas.place(x=170, y=10)\n\n forward_button = ttk.Button(help_window, text=\" Вперед \", command=forward)\n forward_button.place(x=910, y=250)\n help_window.mainloop()", "def showResult(self, ref_im=None):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.') \n \n fov_mas = self.fov * 1e6 * 3600 * 180 / np.pi\n ticks = np.linspace(0, self.naxis-1, 7)\n ticklabels = np.linspace(fov_mas/2, -fov_mas/2, 7, dtype=int)\n \n if not isinstance(ref_im, type(None)):\n \n fig, axs = plt.subplots(1, 2)\n plt.subplots_adjust(wspace=-0.1)\n \n \n minVal = np.min([ref_im,self.res])\n maxVal = np.max([ref_im,self.res])\n \n im = axs[0].imshow(ref_im, cmap='gray', vmin=minVal, vmax=maxVal)\n temp = fig.colorbar(im, ax=axs[0], shrink=0.575, label='Jy/pixel')\n temp.remove()\n axs[0].set_xticks(ticks)\n axs[0].set_xticklabels(ticklabels)\n axs[0].set_yticks(ticks)\n axs[0].set_yticklabels(ticklabels)\n axs[0].set_title('Reference') \n axs[0].set_xlabel('Right Ascension [$\\mu$as]')\n axs[0].set_ylabel('Declination [$\\mu$as]')\n\n im = axs[1].imshow(self.res, cmap='gray', vmin=minVal, vmax=maxVal)\n axs[1].set_title('Reconstruction')\n axs[1].set_xticks(ticks)\n axs[1].set_xticklabels(ticklabels)\n axs[1].set_yticks([])\n axs[1].set_xlabel('Right Ascension [$\\mu$as]')\n fig.colorbar(im, ax=axs[1], shrink=0.575, label='Jy/pixel')\n \n plt.show()\n \n else:\n \n fig, ax = plt.subplots(1, 1)\n \n im = plt.imshow(self.res, cmap='gray')\n ax.set_xticks(ticks)\n ax.set_xticklabels(ticklabels)\n ax.set_yticks(ticks)\n ax.set_yticklabels(ticklabels)\n ax.set_title('Reconstruction') \n ax.set_xlabel('Right Ascension [$\\mu$as]')\n ax.set_ylabel('Declination [$\\mu$as]')\n fig.colorbar(im, ax=ax, label='Jy/pixel')\n \n plt.show()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def show(image, label, weights, prediction, ax):\n global img_objects\n if len(img_objects)==0:\n for i in range(10):\n _img = ax[0, i].imshow(weights[i].reshape(28,28), cmap='gray')\n img_objects.append(_img)\n _img = ax[1, 5].imshow(image.reshape(28,28), cmap='gray')\n img_objects.append(_img)\n else:\n for i in range(10):\n img_objects[i].set_data(weights[i].reshape(28,28))\n img_objects[i].set_clim(vmin=0, vmax=np.max(weights[i]))\n img_objects[10].set_data(image.reshape(28,28))\n ax[0,5].set_title('truth: %d, predict: %d'%(np.argmax(label), prediction))", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def display_image(self, window_name, image):\n cv2.namedWindow(window_name)\n cv2.imshow(window_name, image)\n cv2.waitKey(0)", "def save_button_handler(obj_response):\n mjpeg_info_dict = redis_tools.get_dict(db,'mjpeg_info_dict')\n\n # Get dictionary of calibration data \n calibration_dict = {}\n for camera, info in mjpeg_info_dict.iteritems():\n topic = info['image_topic']\n calibrator_node = get_calibrator_from_topic(topic)\n if homography_calibrator.is_calibrated(calibrator_node):\n num_row, num_col, data = homography_calibrator.get_matrix(calibrator_node)\n calibration = {\n 'rows' : num_row,\n 'cols' : num_col,\n 'data' : list(data),\n }\n calibration_dict[camera] = calibration \n\n # Save calibration data \n file_tools.write_homography_calibration(calibration_dict)\n\n table_data = []\n camera_list = calibration_dict.keys()\n camera_list.sort(cmp=camera_name_cmp)\n for camera_name in camera_list:\n table_data.append('<tr> <td>')\n table_data.append(camera_name)\n table_data.append('</td> </tr>')\n table_data = '\\n'.join(table_data)\n\n if calibration_dict:\n obj_response.html('#message', 'Saved calibrations for cameras:')\n obj_response.html('#message_table', table_data)\n obj_response.attr('#message_table', 'style', 'display:block')\n else:\n obj_response.html('#message', 'No data to save')\n obj_response.attr('#message_table', 'style', 'display:none')" ]
[ "0.7458454", "0.616558", "0.60215414", "0.5975251", "0.584566", "0.582564", "0.5801999", "0.5760714", "0.5755779", "0.5739632", "0.5733116", "0.56553966", "0.56404024", "0.5639404", "0.5581315", "0.5552886", "0.5552128", "0.55366683", "0.5528491", "0.55033773", "0.5477598", "0.5433417", "0.5431364", "0.54234403", "0.54216486", "0.5419501", "0.53844136", "0.5382505", "0.5369436", "0.5354557", "0.5354475", "0.53493655", "0.5332083", "0.53235704", "0.5305531", "0.528945", "0.5287395", "0.5282819", "0.52733266", "0.5268381", "0.52627623", "0.52591956", "0.5254881", "0.5252194", "0.52507836", "0.52504843", "0.5250431", "0.52434677", "0.52401876", "0.5227492", "0.5224962", "0.52177614", "0.5216599", "0.5198695", "0.519861", "0.5192375", "0.5188819", "0.51877826", "0.51866966", "0.51746494", "0.51723474", "0.5171677", "0.517134", "0.5166927", "0.51567173", "0.5145921", "0.51421833", "0.5140607", "0.5135874", "0.5127273", "0.51251805", "0.51210797", "0.5111035", "0.51022595", "0.50977683", "0.50958824", "0.5093294", "0.5088916", "0.50846756", "0.5084019", "0.5081761", "0.5075886", "0.5074532", "0.5073733", "0.50719315", "0.5070162", "0.5068439", "0.5066578", "0.50662977", "0.50653124", "0.5064824", "0.50621015", "0.5054885", "0.50539666", "0.50518006", "0.50474125", "0.50473535", "0.50394017", "0.50350845", "0.50340533" ]
0.74962056
0
Open a popup window to let user choose file(s) The full filepath will be stored in the class
def import_file(self): from tkinter import filedialog self.filepath = filedialog.askopenfilenames( initialdir="/", title="Select file", filetypes=(("PNG files", "*.png"), ("JPEG files", "*.jpeg"), ("TIFF files", "*.tiff"), ("ZIP files", "*.zip"), ("all files", "*.*")))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_popup(file) -> str:\n layout = [\n [sg.Text(f\"Select the action to perform on\\n\\n{file}\")],\n [sg.Button(\"Open File\", key=\"-APP-\"),\n sg.Button(\"Open in File Explorer\", key=\"-EXPLORER-\"),\n sg.Button(\"Delete File\", key=\"-DEl-\",\n button_color=(\"Black\", \"OrangeRed\"))]\n ]\n window = sg.Window(\"Open selected file.\", layout, finalize=True)\n button, value = window.read()\n window.close()\n del window\n return button", "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def open_file(self: object) -> None:\n self.file = filedialog.askopenfilename(\n initialdir= os.getcwd(),title=\"Select File\",filetypes=(\n (\"Text Files\", \"*.txt\"),(\"all files\",\"*.*\")))\n\n if self.file:\n messagebox.showinfo(\"Selected file\", \"You have selected %s\"%(\n self.file))", "def choose_file():\r\n import tkinter\r\n from tkinter import filedialog\r\n\r\n root_window = tkinter.Tk()\r\n root_window.withdraw()\r\n\r\n return filedialog.askopenfilename()", "def browse( self ):\n Tk.Tk().withdraw()\n filename = askopenfilename( initialdir = self.initialdir,\n title = self.title ,\n filetypes = self.filetypes )\n\n if filename == \"\":\n return\n\n self.set_text( filename )\n #rint( f\"get_text = {self.get_text()}\", flush = True )", "def choose_file(self):\n pass", "def ask_file(message=\"Select file for open.\", title=None):\n return dialog(\"ask_file\", message=message, title=title)", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def choosefile():\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**options)\r\n #print filename, '*****'\r\n\r\n # open file on your own\r\n if filename:\r\n #return open(filename, 'r')\r\n tasks.upload_chosen = filename", "def open_file(self): # need to fix this to open in a new window\n\t\tself.file_path = filedialog.askopenfilename()\n\t\tf = open(self.file_path)\n\t\tfreader = f.read()\n\t\tself.textBox.insert(END, freader)", "def on_open_button(self, event):\n wildcard = \"All files (*.*)|*.*|\"\\\n \"Preprocessed _iso_res.csv file (*_iso_res.csv)|*_iso_res.csv|\"\\\n \"Massacre iso_csv file (*_iso.csv)|*_iso.csv|\"\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n \n if dlg.ShowModal() == wx.ID_OK:\n fullname = dlg.GetPaths()[0].split('/')\n dpa = '/'.join(fullname[:-1]) + '/'\n self.currentDirectory = dpa\n fna = fullname[-1]\n [dfr, pul, vlab] = openFile(dpa+fna)\n startApp(dfr, dpa, fna, pul, vlab, fsize=self.fsize, size=self.size)\n\n dlg.Destroy()", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def select_file() -> True:\n current_directory = os.getcwd()\n selected_file = eg.fileopenbox(title=EG_TITLE+': Open a file',\n default=os.path.join(current_directory, \"..\"),\n filetypes=\"*.txt,*.py\")\n print(f\"Selected file: {os.path.basename(selected_file)}\")\n print(f\"In directory: {os.path.dirname(selected_file)}\")\n return True", "def open_file(self, event=None):\n file = fd.askopenfile(title=\"Choose file to open\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n if imghdr.what(\n file.name): # if file is image return image type otherwise return None if file is not an image type\n from project_explorer import ProjectExplorer\n ProjectExplorer().open_image(file.name)\n else:\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()", "def on_open(self):\n\n ftypes = [('CSV', '.csv'), ('JSON', '.json'), ('All files', '*')]\n dlg = filedialog.Open(self, filetypes=ftypes)\n\n absolute_file_path = dlg.show()\n \n if absolute_file_path:\n # extract the file name from the absolute path\n file_name = absolute_file_path.split('/')[len(absolute_file_path.split('/')) - 1]\n \n # update the label text\n self.selected_file_name.configure(text=file_name)\n\n self.__set_full_path_of_file(absolute_file_path)\n else:\n # update the label text\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n self.__set_full_path_of_file(None)", "def buttonClick(self):\n \n self.fpath=filedialog.askopenfilename()\n self.label_fpath.config(text=self.fpath)\n self.err_label.config(text='')\n pass", "def open_file_dialog(self, title, initial_directory=None, file_types=None, multiselect=False):\n return self._impl.open_file_dialog(title, initial_directory, file_types, multiselect)", "def popup(self, title, callfn, initialdir=None, filename=None):\n self.cb = callfn\n filenames = QtGui.QFileDialog.getOpenFileNames(\n self.parent, title, initialdir, filename)\n\n # Special handling for PyQt5, see\n # https://www.reddit.com/r/learnpython/comments/2xhagb/pyqt5_trouble_with_openinggetting_the_name_of_the/\n filenames = filenames[0]\n\n all_paths = []\n for filename in filenames:\n\n # Special handling for wildcard or extension.\n # This is similar to open_files() in FBrowser plugin.\n if '*' in filename or '[' in filename:\n info = iohelper.get_fileinfo(filename)\n ext = iohelper.get_hdu_suffix(info.numhdu)\n files = glob.glob(info.filepath) # Expand wildcard\n paths = ['{0}{1}'.format(f, ext) for f in files]\n if self.all_at_once:\n all_paths.extend(paths)\n else:\n for path in paths:\n self.cb(path)\n\n else:\n # Normal load\n if self.all_at_once:\n all_paths.append(filename)\n else:\n self.cb(filename)\n\n if self.all_at_once and len(all_paths) > 0:\n self.cb(all_paths)", "def browse(self):\n\n self.filepath.set(fd.askopenfilename(initialdir=self._initaldir,\n filetypes=self._filetypes))", "def menu_Open():\n asdf = tkFileDialog.askopenfilename()\n print(asdf)", "def locatefile(self):\r\n dm = DialogManager()\r\n print \"Opening file chooser ...\"\r\n file = dm.choosefile(\"Choose Raw File\")\r\n return file", "def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])", "def file_to_open(self, title='Open file..', initial_folder=None, extension=\"All files (*.*)\", datafolder=None):\n pass", "def _filename(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilename(**self._kwargs)", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def choosefile(self, diagtitle):\r\n root = Tk()\r\n root.withdraw()\r\n sfile = tkFileDialog.askopenfilename(\r\n parent=root,\r\n filetypes = [('.TXT files', '.txt')],\r\n title=diagtitle )\r\n return sfile", "def showInputFileInExplorer(self):\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot show input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(filename)))", "def filepicker():\n import tkinter as tk\n from tkinter import filedialog\n\n root = tk.Tk()\n root.withdraw()\n\n file_path = filedialog.askopenfilename()\n return file_path", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def open_file_browser(path: str):\n call(file_browser + [path])", "def FileOpenDialog( message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile='' ):\n style = style | wx.OPEN | wx.CHANGE_DIR\n return FileDialog( message, wildcard, style, defaultDir, defaultFile )", "def OnOpenFile( self, event ):\n dialog = wx.FileDialog( self, style=wx.OPEN|wx.FD_MULTIPLE )\n if dialog.ShowModal( ) == wx.ID_OK:\n paths = dialog.GetPaths()\n if self.loader:\n # we've already got a displayed data-set, open new window...\n frame = MainFrame()\n frame.Show( True )\n frame.load( *paths )\n else:\n self.load( *paths )", "def open_file(self):\n try:\n filename = tkFileDialog.askopenfilename()\n file = open(filename)\n self.image_window.status.config(text='Opened: ' + filename)\n return file\n except:\n self.status.config(text='You fool!')\n tkMessageBox.showwarning(\"Open file\",\n \"Cannot open file \" + filename)\n return None", "def askOpen(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN):\r\n defaultDir,defaultFile = [GPath(x).s for x in (defaultDir,defaultFile)]\r\n dialog = wx.FileDialog(parent,title,defaultDir,defaultFile,wildcard, style )\r\n if dialog.ShowModal() != wx.ID_OK: \r\n result = False\r\n elif style & wx.MULTIPLE:\r\n result = map(GPath,dialog.GetPaths())\r\n else:\r\n result = GPath(dialog.GetPath())\r\n dialog.Destroy()\r\n return result", "def askopenfilename():\r\n file_opt = options = {}\r\n options['defaultextension'] = '.csv'\r\n options['filetypes'] = [('all files', '.*'), ('csv files', '.csv')]\r\n options['initialdir'] = os.getcwd()\r\n options['initialfile'] = 'profile.csv'\r\n options['title'] = 'choose file'\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**file_opt)\r\n\r\n # open file on your own\r\n return filename", "def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()", "def openFile(self):\n\n file_path = self.openFileExplorer()\n\n if file_path is not None:\n file_name = self.getFileNameFromPath(file_path)\n print('open file')\n\n self.add_new_tab(file_path=file_path, file_name=file_name)", "def file_menu_open_activate(self, widget, data=None):\n self.open_chooser.show()", "def action(self):\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\" or self.filename is None:\n return\n\n dirname = fs.path.forcedir(\".\")\n if self.wparm is not None:\n dirname = self.selected_dir\n if dirname.startswith(self.active_url):\n filename = \"{}{}\".format(fs.path.forcedir(self.active_url), self.filename)\n else:\n # We can't use fs.path.join and also not fs.path.abspath because of protocol url\n filename = \"{}{}{}\".format(\n fs.path.forcedir(self.active_url),\n fs.path.forcedir(dirname),\n self.filename,\n )\n filename = filename.replace(fs.path.forcedir(\".\"), \"\")\n if self.show_save_action and not self.show_dirs_only:\n self.save_settings()\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\":\n return\n info = self.get_info(fs.path.split(filename)[1], namespaces=None)\n if info is not None and info.is_dir:\n sel = QtWidgets.QMessageBox.warning(\n self,\n \"Warning\",\n \"You can't create a file with this name: {0}\".format(self.filename),\n QtWidgets.QMessageBox.No,\n )\n elif info is not None and info.is_file:\n sel = QtWidgets.QMessageBox.question(\n self,\n \"Replace Filename\",\n \"This will replace the filename: {0}. Continue?\".format(\n self.filename\n ),\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n )\n if sel == QtWidgets.QMessageBox.Yes:\n self.filename = filename\n self.close()\n else:\n pass\n else:\n self.filename = filename\n self.close()\n else:\n self.filename = filename\n self.close()", "def open_files():\n import Tkinter\n import tkFileDialog\n\n root = Tkinter.Tk()\n root.withdraw()\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n \n root.deiconify()\n root.lift()\n root.focus_force()\n \n filenames = tkFileDialog.askopenfilenames(parent=root, title = \"Open file\")\n root.destroy()\n \n return filenames[0]", "def _open_file(self, path):\n path = os.path.normpath(os.path.abspath(path))\n while True:\n dialog = self._app.window(class_name='#32770')\n dialog.wait('ready')\n\n # If asked whether to save changes, say no\n try:\n dialog_text = dialog.StaticWrapper2.element_info.name\n if 'Save it?' in dialog_text:\n dialog.Button2.click()\n continue\n except MatchError:\n pass\n break\n\n dialog.Edit1.set_edit_text(path)\n dialog.Edit1.send_keystrokes('{ENTER}')\n self.wait_ready(timeout=60)", "def popup(self, title, callfn, initialdir=None):\n self.cb = callfn\n dirname = QtGui.QFileDialog.getExistingDirectory(\n self.parent, title, initialdir)\n if dirname:\n self.cb(dirname)", "def on_load_clicked(self, button):\n # Define the dialog to open launch file\n\n dialog = Gtk.FileChooserDialog(\"Please select launch file\", self,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n \"Select\", Gtk.ResponseType.OK))\n\n dialog.set_current_folder(basepath + '/launch')\n dialog.set_default_size(800, 400)\n\n\n # Initiate filter and set it to XML files\n filter = Gtk.FileFilter()\n filter.add_pattern(\"*.launch\")\n\n # Add filter to dialog\n dialog.add_filter(filter)\n\n # Open defined dialog\n response = dialog.run()\n\n # Define dialog options\n if response == Gtk.ResponseType.OK:\n print(\"Select clicked\")\n print(\"File selected: \" + dialog.get_filename())\n\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n\n dialog.destroy()", "def read_file(self):\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n self.filename = askopenfilename(title='Select Hospital Text File') # show an \"Open\" dialog box and return the path to the selected file", "def open_fileDialog(self):\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Открыть исходный файл\", os.path.expanduser(\"~\"),\n \"XML Файлы (*.xml);;JSON Файлы (*.json)\", options=options)\n if fileName:\n file_format = fileName.split('.')[1]\n if file_format == 'xml':\n self.data_from_xml(fileName)\n elif file_format == 'json':\n self.data_from_json(fileName)\n self.msg2Statusbar.emit('Импорт из файла {0}'.format(fileName))", "def callDialog(self):\n self.pathTuple = filedialog.askopenfilenames(filetypes=[(\"Excel files\", \".xlsx .xls .xlsm .xlsb\")])\n self.fileNames = [basename(path.abspath(name)) for name in self.pathTuple]", "def selectFile(title=\"Select image\", initialdir=None, multiple=False):\r\n file = filedialog.askopenfilename(\r\n initialdir=initialdir,\r\n multiple=multiple,\r\n title=title\r\n )\r\n return file", "def cb_open(self, button):\n print(\"Open File callback\")\n dialog = Gtk.FileChooserDialog(\n title=\"Please choose a file\", \n parent=self, \n action=Gtk.FileChooserAction.OPEN\n )\n dialog.add_buttons(\n Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN,\n Gtk.ResponseType.OK,\n )\n\n self.add_filters(dialog)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n print(\"Open clicked\")\n print(\"File selected: \" + dialog.get_filename())\n self.filename = dialog.get_filename()\n if TESTING:\n # Testing. Place a time stamp into the file each time it is opened.\n # E.g. 'Fri May 7 16:46:41 2021'\n with open(self.filename, \"a\") as fout:\n fout.write(\"Opened: \" + time.ctime() + \"\\n\") \n \n \n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n\n dialog.destroy()", "def browse_file_dialog():\n root = Tkinter.Tk()\n # Make window almost invisible to focus it and ensure directory browser\n # doesn't end up loading in the background behind main window.\n root.withdraw()\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n root.deiconify()\n root.lift()\n root.focus_force()\n root.update()\n file_path = tkFileDialog.askopenfilename()\n root.destroy()\n if file_path:\n return os.path.normpath(file_path)\n else:\n return file_path", "def _open_file(self):\n file = QFileDialog.getOpenFileName(self, \"Open file\", \".\")[0]\n if file:\n self.try_add_tab(file)", "def file(self, win):\n name = QtWidgets.QFileDialog.getOpenFileName(win, 'Open file')\n self.file_name = name[0]\n self.setImage(name[0])", "def openInputFile(self):\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot open input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(filename))", "def on_browse(self, event):\r\n wildcard = \"All files (*.*)|*.*\"\r\n with wx.FileDialog(None, \"Choose a file\",\r\n wildcard=wildcard,\r\n style=wx.ID_OPEN) as dialog:\r\n if dialog.ShowModal() == wx.ID_OK:\r\n self.grin_location.SetValue(dialog.GetPath())", "def open_file(self):\n files = [('Text Document', '*.txt'), ('PDF Document', '*.pdf'), ('Word Document', '*.docx')]\n text_file = askopenfile(mode='r', title=\"Open your file\", filetypes=files,\n defaultextension=files)\n if text_file is not None:\n self.file_path = text_file.name\n text_inside = self.file.load_file(text_file.name)\n text_file.close()\n self.textbox.delete(\"1.0\", tk.END)\n self.textbox.insert(\"1.0\", text_inside)\n self.text = self.textbox", "def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)", "def msg_open(self,msg):\r\n filepaths = msg.get_data()\r\n if filepaths is ():\r\n #Create the file open dialog.\r\n filepaths,index = DoFileDialog(self.frame, wildcard = \"Python source (*.py,*.pyw)|*.py;*.pyw|All files (*,*.*)|*.*;*\")\r\n if filepaths==None:\r\n return\r\n\r\n if (filepaths is not None) and (filepaths!=[]):\r\n #open the file requested\r\n for path in filepaths:\r\n self.frame.notebook.OpenFile(path)\r\n self.frame.Show()\r\n self.frame.Raise()", "def request_file():\n \n from tkinter import Tk\n from tkinter.filedialog import askopenfilename\n \n # Make a top-level instance and hide from user.\n root = Tk()\n root.withdraw()\n\n # Make it almost invisible - no decorations, 0 size, top left corner.\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n\n # Show window again and lift it to top so it can get focus, otherwise dialogs will end up behind the terminal.\n root.deiconify()\n root.lift()\n root.focus_force()\n\n # Show an \"Open\" dialog box and return the path to the selected file\n file_path = askopenfilename(initialdir='./IR_Datasets/',\n title='Excel to Read',\n filetypes=(('New Excel', '*xlsx'), ('Old Excel', '*.xls')),\n parent=root)\n\n # Get rid of the top-level instance once to make it actually invisible.\n root.destroy()\n \n return file_path", "def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def file_menu_new_activate(self, widget, data=None):\n self.new_chooser.show()", "def fileBrowserDialog(*args, actionName: AnyStr=\"\", dialogStyle: int=0, fileCommand:\n Script=None, fileType: AnyStr=\"\", filterList: Union[AnyStr,\n List[AnyStr]]=\"\", includeName: AnyStr=\"\", mode: int=0, operationMode:\n AnyStr=\"\", tipMessage: AnyStr=\"\", windowTitle: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def askOpenFile(dirname=\".\"):\n\n import Tkinter,tkFileDialog\n root = Tkinter.Tk()\n file = tkFileDialog.askopenfile(parent=root,mode='rb',title='Choose a file',initialdir=dirname)\n return file", "def FileDialog( message, wildcard, style, defaultDir=os.getcwd(), defaultFile='' ):\n dlg = wx.FileDialog( wx.GetApp().GetTopWindow(), message, defaultDir, defaultFile, wildcard, style )\n if dlg.ShowModal() == wx.ID_OK:\n if style & wx.MULTIPLE:\n result = dlg.GetPaths()\n else:\n result = dlg.GetPath()\n else:\n result = False\n dlg.Destroy()\n \n return result", "def OnOpen(self, e):\n self.mainparent.statusbar.SetStatusText(\"Loading Files ...\", 0)\n\n dirname = os.getcwd()\n dlg = wx.FileDialog(self, \"Select File\", dirname, \"\", \"*\", wx.FD_OPEN)\n\n if (dlg.ShowModal() != wx.ID_OK):\n dlg.Destroy()\n self.mainparent.reset_statusbar()\n return\n\n full_path = str(dlg.GetPath()) # get selected filename and convert to standard string\n\n self.mainparent.input_file = InputFile(full_path) # parse input file\n\n self.mainparent.update_namelist_menu() # update available namelist menu\n\n self.mainparent.reset_statusbar()\n self.mainparent.statusbar.SetStatusText(\"File: {}\".format(full_path), 2)\n\n self.mainparent.file_loaded = True", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilenames()\n self._import_path_var.set(filename)", "def askopenfilename(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def load_file(self):\n return tkinter.filedialog.askopenfilename(defaultextension=\".txt\")", "def menu_open_files(self, event=None):\n self.parentPanel.open(event)", "def _launch_file_b(self):\n types = [\n (\"JPG\", \"*.jpg\"),\n (\"Bitmap\", \"*.bmp\"),\n (\"PNG\", \"*.png\"),\n (\"GIF\", \"*.gif\"),\n (\"All files\", \"*\")]\n dialog = tkFileDialog.Open(self, filetypes = types)\n self._file_path = dialog.show()\n\n self._file_name = self._scrub_name(self._file_path)\n self._move_img()\n return self._file_name", "def send_file_name():\n if value.get() == \"----------------------\":\n messagebox.showinfo(\"Choose File\", \"Please choose a file to edit.\", parent=app_frame)\n return\n elif len(entries) != 0:\n messagebox.showinfo(\"Warning!\", \"You must first close the current file!\", parent=app_frame)\n return\n\n events = get_file(value.get())\n # Call display_lr_assignments() and send events file to be displayed in the application window\n display_lr_assignments(events)", "def open_file(self, widget, data=None):\n\n #Displays a fiel chooser dialog\n dialog = gtk.FileChooserDialog(\"Open..\",None,\n gtk.FILE_CHOOSER_ACTION_OPEN,\n (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n dialog.set_default_response(gtk.RESPONSE_OK)\n\n response = dialog.run()\n \n self.drawing.set_initial_values()\n self.drawing.cr.move_to(20,20)\n\n if response == gtk.RESPONSE_OK:\n self.filename = dialog.get_filename() \n self.window.set_title(\"Python Viewer - \" + self.filename )\n\n try: \n ifile = open(self.filename, 'r')\n self.drawing.text = ifile.read().split('\\n')\n #self.drawing.text = ifile.read()\n ifile.close()\n dialog.destroy()\n \n self.drawing.line_count = len(self.drawing.text)\n \n self.drawing.parse_text()\n\n self.drawing.redraw_canvas(0) \n except IOError:\n pass\n \n elif response == gtk.RESPONSE_CANCEL:\n self.window.set_title(\"Python Viewer\")\n dialog.destroy()", "def open_file(self, widget):\n\n\t\tdialog = Gtk.FileChooserDialog(\"Please choose a file\", None,\n\t\t\tGtk.FileChooserAction.OPEN,\n\t\t\t(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n\t\t\t Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\n\t\tfile_filters.add_filter_database(dialog)\n\n\t\tresponse = dialog.run()\n\t\tif response == Gtk.ResponseType.OK:\n\t\t\tfile_selected = dialog.get_filename()\n\t\t\ttry:\n\t\t\t\tself.engine = karmaEngine(session_file=file_selected)\n\n\t\t\t\t# update the hostlist\n\t\t\t\tself._clear_workspace()\n\t\t\t\tself._sync(reset=True)\n\t\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tprint (e) \n\t\t\t\n\t\telif response == Gtk.ResponseType.CANCEL:\n\t\t\tdialog.destroy()\n\n\t\tdialog.destroy()", "def __openFile(self):\n itm = self.findList.selectedItems()[0]\n self.on_findList_itemDoubleClicked(itm, 0)", "def _openButton(self):\n #get the specified file\n selected_file = self.view.list.getSelected()\n\n if selected_file:\n self.model.open(selected_file)\n return\n\n #prompt if they really want to open maya\n dialogs = Dialogs(self.view)\n\n msg = 'No file selected!'\n msg += '\\n\\nAre you sure you want to open maya without a file?'\n dialogs.confirmPrompt(msg)\n\n self.model.open()", "def on_File1_toolButton_clicked(self):\n my_file = QtWidgets.QFileDialog.getOpenFileName(self, u'打开文件', '/')\n if my_file[0]:\n self.File1_lineEdit.setText(my_file[0])\n else:\n QtWidgets.QMessageBox.warning(self, u'警告', u'请选择输入文件')", "def FileToRun(self):\n FileOrFilesToRun = QFileDialog.getExistingDirectory(self, \n \"Select your file or files you wish to run\",\n \"/home\")\n self.FileFolder.setText(FileOrFilesToRun)", "def _open_files(self):\n file_names = filedialog.askopenfilenames(initialdir=self.current_directory, title = \"Select file\")\n if(file_names): self.current_directory = os.path.dirname(file_names[0])\n if(len(file_names) == 1):\n file_names = file_names[0]\n return file_names", "def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)", "def OnOpen(self, e):\n\t\tsuccess = False\n\t\tdlg = wx.FileDialog(self, \"Choose a file\", self.dirname, \"\", \"*.*\", wx.FD_OPEN)\n\t\tif dlg.ShowModal() == wx.ID_OK:\n\t\t\tsuccess = True\n\t\t\tself.dirname, self.filename = os.path.split(dlg.GetPath())\n\t\tdlg.Destroy()\n\t\tif success:\n\t\t\tself.FileText.SetLabel(\"File: \"+self.filename)\n\t\t\tself.raw_file = data.load_data(os.path.join(self.dirname, self.filename))\n\t\t\tself.combine_data()\n\t\t\tself.plot_data()", "def pkg_app_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._pkg_app_path_var.set(filename)", "def onOpenMenu(self, item):\n self.dialog = pyui.dialogs.FileDialog(os.getcwd(), self.onOpenChosen, \".*stk\")\n self.dialog.doModal()\n return 1", "def getFile():\n from tkinter import Tk, filedialog\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n return(filedialog.askopenfilenames())", "def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)", "def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)", "def showOpenImageDialog(self, event):\r\n openImageDialog = wx.FileDialog(self, \"Open\",\r\n style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\r\n if openImageDialog.ShowModal() == wx.ID_CANCEL:\r\n return\r\n self.setImage(openImageDialog.GetPath())", "def open_app(self, event=None):\n if not self.ask_save():\n return\n default_path = os.path.dirname(common.root.filename or \"\") or self.cur_dir\n infile = wx.FileSelector(_(\"Open file\"),\n wildcard=\"wxGlade files (*.wxg)|*.wxg|wxGlade Template files (*.wgt)|*.wgt|\"\n \"XML files (*.xml)|*.xml|All files|*\",\n flags=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, default_path=default_path)\n if not infile: return\n self._open(infile)", "def planet_clicked(self, filename):\n self.chosen_filename = filename\n self.accept()", "def select_file():\n filename = filedialog.askopenfilename(\n initialdir=os.getcwd(), title=\"Select Backup file...\",\n filetypes=((\"JSON Files\", \"*.json\"),\n (\"Text Files\", \"*.txt\"),\n (\"All Files\", \"*.*\")))\n self.init_data(filename)", "def loadPathMenuAction(self):\n logger.info('loadPathMenuAction')\n fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', '',\"Files (*.csv *.xlsx)\")\n fname = fname[0] # fname is a tuple\n print(f'fname: \"{fname}\"')\n if os.path.isfile(fname):\n self.loadPath(fname)", "def open_file(event):\r\n\r\n filepath = askopenfilename(\r\n\r\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\r\n\r\n )\r\n\r\n if not filepath:\r\n\r\n return\r\n\r\n # Si se abre un archivo esta línea borra todo el contenido de el text Area\r\n txt_Area.delete(\"1.0\", tk.END)\r\n\r\n # Se abre el archivo\r\n with open(filepath, \"r\") as input_file:\r\n\r\n # Se lee el archivo\r\n text = input_file.read()\r\n\r\n # Se inserta el archivo\r\n txt_Area.insert(tk.END, text)\r\n\r\n window.title(f\"Simple Text Editor - {filepath}\")", "def openFile(self, index):\n page_name = index.data().toString()\n file_name = self.file_names[str(page_name)]\n self.main_help_window.setHtml(open(file_name, 'r').read())", "def open(self):\n file = askopenfilename(\n initialdir=self.initial_directory,\n filetypes=(\n (\"Audio Video Interleave\", \"*.avi\"),\n (\"Matroska\", \"*.mkv\"),(\"MPEG-4 AVC\",\"*.mp4\"),\n )\n )\n if isinstance(file, tuple):\n return\n if os.path.isfile(file):\n self.play_film(file)", "def file_select(self):\r\n # select ui file and change file extension to .py\r\n self.lineEdit_Ui_file_selection.clear()\r\n self.lineEdit_Py_file_name.clear()\r\n options = QtWidgets.QFileDialog.Options()\r\n options |= QtWidgets.QFileDialog.DontUseNativeDialog\r\n self.fileName, _ = QtWidgets.QFileDialog.getOpenFileName(\r\n None,\r\n \"QFileDialog.getOpenFileName()\",\r\n \"\",\r\n \"UI Files (*.ui);;All Files (*)\",\r\n options=options)\r\n py_Filename = self.fileName[:-2]\r\n py_Filename = py_Filename + \"py\"\r\n self.lineEdit_Ui_file_selection.insert( self.fileName )\r\n if self.fileName:\r\n self.lineEdit_Py_file_name.insert( py_Filename )", "def open_file_dialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self,\n \"Select text file\",\n \"\",\n \"Text Files(*);;\",\n options=options)\n if file_name:\n try:\n content = read_file(file_name)\n self.ui.plainTextEdit.setPlainText(\"\".join(content))\n except:\n QMessageBox.question(self, 'Error', \"Chosen file is not text\",\n QMessageBox.Ok | QMessageBox.NoButton)", "def _filename_multi(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilenames(**self._kwargs)", "def handleAddFileButtonClicked(self):\n # Find the directory of the most recently opened image file\n mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )\n if mostRecentImageFile is not None:\n defaultDirectory = os.path.split(mostRecentImageFile)[0]\n else:\n defaultDirectory = os.path.expanduser('~')\n\n # Launch the \"Open File\" dialog\n fileNames = self.getImageFileNamesToOpen(defaultDirectory)\n\n # If the user didn't cancel\n if len(fileNames) > 0:\n PreferencesManager().set('DataSelection', 'recent image', fileNames[0])\n try:\n self.addFileNames(fileNames)\n except RuntimeError as e:\n QMessageBox.critical(self, \"Error loading file\", str(e))", "def browse( self ):\n Tk.Tk().withdraw()\n dirname = askdirectory()\n\n self.set_text( dirname )\n #rint( f\"get_text = {self.get_text()}\", flush = True )", "def on_lookpushButton_clicked(self):\n # TODO: not implemented yet\n self.openFile()" ]
[ "0.81688225", "0.80503184", "0.78248763", "0.77235496", "0.7679591", "0.76397675", "0.76276994", "0.7585482", "0.7508304", "0.74986696", "0.7496582", "0.7495827", "0.74400616", "0.7432761", "0.7358748", "0.7349472", "0.7333362", "0.73316485", "0.7327523", "0.7324655", "0.72692263", "0.72661793", "0.7195993", "0.7193288", "0.71919584", "0.7189422", "0.71518004", "0.71488786", "0.7123981", "0.70869035", "0.70712817", "0.70297575", "0.7016446", "0.7011079", "0.69754946", "0.6971003", "0.6959194", "0.6953209", "0.69529575", "0.69467825", "0.6930888", "0.6920647", "0.69206", "0.6907717", "0.68819296", "0.6862954", "0.68445706", "0.6843804", "0.68423057", "0.6832814", "0.6823864", "0.6816814", "0.6805153", "0.6795342", "0.6794053", "0.6776275", "0.67702395", "0.675848", "0.6750556", "0.6750556", "0.6750556", "0.6739367", "0.6733198", "0.6731776", "0.67274606", "0.67113316", "0.67091894", "0.6706775", "0.6695964", "0.66746664", "0.6659987", "0.6645203", "0.66372263", "0.6636582", "0.66259164", "0.6623437", "0.6621684", "0.6610731", "0.6603884", "0.6601723", "0.6600491", "0.65974534", "0.65933347", "0.65863836", "0.65840095", "0.65840095", "0.6582098", "0.6568164", "0.6562903", "0.65585166", "0.65527225", "0.65460086", "0.6528116", "0.6527741", "0.65217006", "0.65174073", "0.6504509", "0.64995825", "0.6486807", "0.6472038" ]
0.7110334
29
Control the load button Load a list of processed images in the database. User could choose one or more images to download. If only one image is chosen the resulted plot will be displayed in the GUI. If multiple file is chosen, the files will be zip to a zip archive and save to a designated path.
def load_function(self): self.image_names = client.get_image_list(self.user_name.get()) # clear listbox self.name_list.delete(0, END) for i in self.image_names: self.name_list.insert(END, i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_function(self):\n # Ask user for directory and user ID\n savepath = filedialog.askdirectory()\n ID = self.user_name.get()\n\n self.msg2.set('Saving files to the designated folder')\n\n # Get selected filenames\n index = self.name_list.curselection()\n select_files = [self.image_names[i] for i in index]\n\n single = check_multi_single(select_files)\n\n if single is True:\n\n filename = select_files[0]\n try:\n pro_img_obj, raw_img_obj, raw_img_name, \\\n pro_hist_obj, raw_hist_obj = get_image_pair(filename, ID)\n except ValueError:\n self.msg2.set(get_image_pair(filename, ID))\n else:\n # Get Image metrics\n self.raw_metrics = client.image_metrics(ID, raw_img_name)\n self.pro_metrics = client.image_metrics(ID, filename)\n\n s = self.raw_metrics['size']\n size = image_size(s)\n\n # display the raw and process image in GUI\n raw_img = ImageTk.PhotoImage(raw_img_obj.resize(size))\n self.raw_img_label.configure(image=raw_img)\n self.raw_img_label.image = raw_img\n\n pro_img = ImageTk.PhotoImage(pro_img_obj.resize(size))\n self.pro_img_label.configure(image=pro_img)\n self.pro_img_label.image = pro_img\n\n # display raw and process histogram in GUI\n raw_hist = ImageTk.PhotoImage(raw_hist_obj.resize([385, 450]))\n self.raw_hist_label.configure(image=raw_hist)\n self.raw_hist_label.image = raw_hist\n\n pro_hist = ImageTk.PhotoImage(pro_hist_obj.resize([385, 450]))\n self.pro_hist_label.configure(image=pro_hist)\n self.pro_hist_label.image = pro_hist\n\n # Save file to a designated folder\n full_name = savepath + '/' + filename + '.' + self.saveas.get()\n pro_img_obj.save(full_name)\n self.msg2.set('Image is saved successfully')\n\n else:\n download_multiple(select_files, savepath, ID, self.saveas.get())\n self.msg2.set('Images are saved successfully '\n 'in \"processed_images.zip\"')", "def execute_file(self, event=None):\n file_list = self.get_path_list()\n print(file_list)\n if not file_list:\n return\n # merge image\n # 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片\n try:\n self.photos.destroy()\n except:\n pass\n self.photos.imgs = file_list \n merged_photo = self.photos.merge_photos()\n\n # show image\n try:\n window.destroy()\n except:\n import traceback\n traceback.print_exc()\n window.build_img_canvas()\n window.show_img_in_canvas(merged_photo)", "def load_files(self, filenames):\n self.filenames = filenames\n self.slider.setRange(0, len(self.filenames) - 1)\n self.slider.setSliderPosition(0)\n self.update_image()", "def load_file(self):\n extensions = DataReader().get_supported_extensions_as_string()\n file_name, _ = QFileDialog.getOpenFileName(self, \"Open data set\", \"\",\n \"Images (\" + extensions + \")\")\n if not file_name:\n return\n\n self.render_widget.load_file(file_name)\n self.switch_to_simple()", "def download_images(self, im_format: str):\n rows = self.tbl_images.get_selected_rows()\n ids = []\n names = []\n for r in rows:\n ids.append(self.tbl_images.item(r, 0).text())\n names.append(self.tbl_images.item(r, 1).text())\n\n if len(ids) == 1:\n\n # Create File Save Dialog\n dialog = QFileDialog(parent=self, caption='Save As..')\n\n dialog.setMimeTypeFilters([\"image/\"+im_format.lower()])\n dialog.setFileMode(QFileDialog.AnyFile)\n\n if dialog.exec_() == QDialog.Accepted:\n filename = dialog.selectedFiles()[0]\n ret = api.get_download_images(ids, im_format, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_b = b64s_to_b(ret['data'])\n with open(filename, 'wb+') as f:\n f.write(image_b)\n\n elif len(ids) >= 1:\n\n # Create File Save Dialog\n dialog = QFileDialog(parent=self, caption='222Save As..')\n dialog.setMimeTypeFilters(['application/zip'])\n dialog.setFileMode(QFileDialog.AnyFile)\n\n if dialog.exec_() == QDialog.Accepted:\n filename = dialog.selectedFiles()[0]\n ret = api.get_download_images(ids, im_format, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_b = b64s_to_b(ret['data'])\n with open(filename, 'wb+') as f:\n f.write(image_b)\n else:\n return", "def download_images_jpg(self):\n self.show_as_waiting(True)\n self.download_images('JPEG')\n self.show_as_waiting(False)", "def images(datasets, parent, dtype=numpy.single, \n size=(700,700), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER) :\n \n dialog = ExplorerDialog(parent, size=size,style=style)\n dialog.set_datasets(datasets)\n \n # Size must be specified explicitely on Windows\n dialog.SetSize((700,700))\n \n if dialog.ShowModal() != wx.ID_OK :\n dialog.Destroy()\n return []\n dialog.Destroy()\n \n # Get selected series from dialog, load it.\n series = dialog.get_selected_datasets()\n \n periodic_progress_dialog = PeriodicProgressDialog(\n 0.2, \"Loading files\", \"Loading files ...\")\n worker_thread = WorkerThread(\n periodic_progress_dialog, \n target=medipy.io.dicom.load_dicomdir_records, args=(series,))\n worker_thread.start()\n periodic_progress_dialog.start()\n worker_thread.join()\n periodic_progress_dialog.Destroy()\n if worker_thread.exception is not None :\n wx.MessageBox(\n \"Could not load series : %s\"%(worker_thread.exception,), \n \"Could not load series\")\n return []\n \n series = worker_thread.result\n \n # Reconstruct one image per stack\n series = medipy.io.dicom.split.images(series)\n series = medipy.io.dicom.normalize.normalize(series)\n stacks = medipy.io.dicom.stacks(series)\n \n result = []\n periodic_progress_dialog = PeriodicProgressDialog(\n 0.2, \"Reconstructing images\", \n \"Reconstructing images (%i/%i) ...\"%(0, len(stacks)))\n for index, stack in enumerate(stacks) :\n periodic_progress_dialog.Pulse(\n \"Reconstructing images (%i/%i) ...\"%(index+1, len(stacks)))\n \n worker_thread = WorkerThread(periodic_progress_dialog,\n target=medipy.io.dicom.image, args=(stack,))\n worker_thread.start()\n periodic_progress_dialog.start()\n worker_thread.join()\n if worker_thread.exception is not None :\n wx.MessageBox(\n \"Could not reconstruct image : %s\"%(worker_thread.exception,), \n \"Could not reconstruct image\")\n else :\n image = worker_thread.result\n if dtype is not None :\n image.data = image.data.astype(dtype)\n result.append(image)\n periodic_progress_dialog.Destroy()\n \n return result", "def download_images_png(self):\n self.show_as_waiting(True)\n self.download_images('PNG')\n self.show_as_waiting(False)", "def load_button_released(self, event):\r\n if self.winfo_containing(event.x_root, event.y_root) == self.load_button: # If the clicked area contains the\r\n # load button\r\n filename = filedialog.askopenfilename() # A file dialog opens asking the user to select the file\r\n img = cv2.imread(filename) # Image is read from that file location\r\n img = img.astype('float32') # Convert the pixels to 8 bit float to perform float operations on them\r\n\r\n if img is not None: # If image is selected\r\n self.master.filename = filename # Set the filename parameter\r\n self.master.images.append(img) # Append the selected image in the stack\r\n self.master.display_image.display_image(img=img) # Display the image on the window\r", "def load_images(image_name_to_label):\n images = []\n labels = []\n\n image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)\n\n # Remove directories\n image_names.remove(\"COVID-19\")\n image_names.remove(\"Normal\")\n image_names.remove(\"ViralPneumonia\")\n\n # Load images from specific image directories (COVID-19, normal, viral pneumonia)\n def load_directory(directory):\n notifier.send(\" Loading from directory: \" + directory + \"...\")\n directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory\n directory_image_names = os.listdir(directory_path)\n for i, image_name in enumerate(directory_image_names):\n base_image_name = get_base_image_name(image_name)\n query_name = directory + \"/\" + base_image_name\n query_name = query_name.lower().replace(\" \", \"\")\n if query_name in image_name_to_label:\n print(f\" {i / len(directory_image_names) * 100}% - [{image_name}]\")\n image_path = directory_path + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[query_name])\n load_directory(\"COVID-19\")\n load_directory(\"Normal\")\n load_directory(\"ViralPneumonia\")\n\n # Load images from default directory\n if LOAD_ALL_IMAGES:\n notifier.send(\" Loading from directory: default...\")\n for i, image_name in enumerate(image_names):\n base_image_name = get_base_image_name(image_name)\n if base_image_name in image_name_to_label:\n print(f\" {i / len(image_names) * 100}% - [{image_name}]\")\n image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[base_image_name])\n\n return images, labels", "def loadPredictions(self):\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n message = 'Select folder'\n folderDialog = QtWidgets.QFileDialog(self, message, dir_path)\n folderDialog.setFileMode(QtWidgets.QFileDialog.Directory)\n folderDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)\n fileName = [] # Returns a list of the directory\n\n # Plot the window to select the csv file\n if folderDialog.exec_():\n fileName = folderDialog.selectedFiles()\n # Debug\n #fileName = ['/media/dimitris/TOSHIBA EXT/Image_Document_Classification/PMC-Dataset']\n print(fileName)\n if os.path.isdir(str(fileName[0])):\n self.loadFolder(str(fileName[0]))\n else:\n message = 'Only csv files'\n self.messageBox(message)\n return\n\n self.selectFigures()", "def plot_dir(main):\n try:\n wd = str(main.lineEdit_8.text())\n if wd == '':\n main.msg(\"Error \"+errorPath+\"plot_dir: Must choose directory first\")\n return\n for fi in os.listdir(wd):\n dataPath = os.path.join(wd, fi)\n main.msg(\"Plotting \"+str(fi))\n img = mpimg.imread(str(dataPath))\n imgObj = Img.Img(img, title=str(fi), filePath=str(dataPath))\n main.imgObjList.append(imgObj)\n func.update(main)\n slider.slider_update(main)\n except:\n main.msg(\"Error \"+errorPath+\"plot_dir: Make sure all files are images (tiff, jpeg, etc.)\")", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)", "def data_processing_interface(root):\n try:\n root.withdraw()\n top = ThemedTk(theme=\"aqua\")\n top.title(\"Data Processing\")\n top.geometry(\"500x300\")\n\n # label at the top of person database name\n my_label = Label(top, text=\"Data Processing\", font=(\"Times New Roman\", 18, \"bold\"))\n my_label.grid(row=0, column=1, padx=(50, 50), pady=(10, 20))\n\n def path_of_image():\n \"\"\" open file explorer to select directory \"\"\"\n top.folder_name = filedialog.askdirectory(title=\"select directory\",\n initialdir=\"C:/Users/Ayush sagore/JUPITER NOTEBOOK ML/CNN Model/\"\n \"test_dataset/\")\n path_name.insert(0, top.folder_name)\n\n # label and input field for path and button right to input field for browsing to data source folder\n path_name_label = Label(top, text=\"Source Path :\", font=(\"Times New Roman\", 12, \"bold\"))\n path_name_label.grid(row=1, column=0, padx=(0, 10), pady=(20, 10))\n\n path_name = Entry(top, width=25, font=(\"Times New Roman\", 12, \"bold\"))\n path_name.grid(row=1, column=1, padx=(13, 10), pady=(20, 10))\n\n path_btn = Button(top, text=\"->\", command=path_of_image)\n path_btn.grid(row=1, column=2, columnspan=2, pady=(15, 0), padx=(0, 0), ipady=1, ipadx=6)\n\n # label and input field for folder name\n folder_name_label = Label(top, text=\"Folder Name :\", font=(\"Times New Roman\", 12, \"bold\"))\n folder_name_label.grid(row=2, column=0, padx=(10, 10), pady=(20, 10))\n\n folder_name = Entry(top, width=25, font=(\"Times New Roman\", 12, \"bold\"))\n folder_name.grid(row=2, column=1, padx=20, pady=(20, 10))\n\n def hide_open2():\n root.deiconify()\n top.destroy()\n\n exit2_btn = Button(top, text=\"EXIT\", font=(\"Times New Roman\", 12, \"bold\"), bg=\"gray\", command=hide_open2)\n exit2_btn.grid(row=20, column=1, columnspan=2, pady=25, padx=(100, 0), ipadx=50)\n\n def check_given_data():\n \"\"\" This function check whether data given by user valid or not. \"\"\"\n if os.path.exists(path_name.get()) and folder_name.get() != \"\":\n # if True:\n progress = ThemedTk(theme=\"aqua\")\n progress.title(\"Progress\")\n\n top.withdraw()\n\n info_label = Label(progress, text=\"Attentive set filtering is on progress\", font=(\"Times New Roman\", 12, \"bold\"))\n info_label.pack(pady=10)\n progress_bar = Progressbar(progress, orient=HORIZONTAL, length=220, mode='determinate')\n progress_bar.pack(pady=20)\n filter_dataset(path_name.get(), folder_name.get(), progress_bar, info_label, progress, root)\n\n else:\n messagebox.showerror(\"Message\", \"Please enter valid directory path\\n \\\n and folder name\")\n\n # show and exit button\n next_page_btn = Button(top, text=\"Next Page\", font=(\"Times New Roman\", 12, \"bold\"), bg=\"gray\",\n command=check_given_data) # next_page_interface(top, root)\n next_page_btn.grid(row=20, column=0, columnspan=2, pady=25, padx=(0, 200), ipadx=22)\n\n except Exception as e:\n messagebox.showerror(\"Message\", \"Error in the data processing interface\")", "def populateImagesList(self):\n \n self._gui_server.getImagesList(self._populateImagesList)", "def ImgDirDialog(self):\r\n \r\n self.img_dir = tk.filedialog.askdirectory(title = \"Select Destination Directory for image data\")\r\n self.file_names = [fn for fn in sorted(os.listdir(self.img_dir)) if any(fn.endswith(ext) for ext in file_extensions)]\r\n self.paths = [self.img_dir + '/' + file_name for file_name in self.file_names]\r\n \r\n # Number of labels and paths\r\n self.n_labels = len(self.labels)\r\n self.n_paths = len(self.paths)\r\n \r\n # set image container to first image\r\n self.set_image(self.paths[self.index])\r\n \r\n # if copy_or_move == 'copy':\r\n # try:\r\n # df = pd.read_csv(df_path, header=0)\r\n # # Store configuration file values\r\n # except FileNotFoundError:\r\n # df = pd.DataFrame(columns=[\"im_path\", 'sorted_in_folder'])\r\n # df.im_path = self.paths\r\n # df.sorted_in_folder = self.paths\r\n \r\n if copy_or_move == 'move':\r\n self.df = pd.DataFrame(columns=[\"im_path\", 'sorted_in_folder'])\r\n self.df.im_path = self.paths\r\n self.df.sorted_in_folder = self.paths", "def load_analysis_file_clicked(self):\n if len(self._file_table_model._filelist) == 0:\n no_image_loaded_error_msg = QMessageBox(self)\n no_image_loaded_error_msg.setText(\"Can't load analysis file\")\n no_image_loaded_error_msg.setDetailedText(\"Please load image file before loading analysis file.\")\n no_image_loaded_error_msg.show()\n else:\n [tf, filename] = self._main_controller.load_analysis_file()\n if (self._model.has_segmentation_image) and tf:\n self._filter_controller.index_objects()\n self._ui.analysisFileDisplay.setText(filename)", "def start_button_action(self):\n if self.dynamic.output_file.text() and os.path.isdir(\n self.dynamic.output_directory.text()\n ):\n\n additional_settings = {\n \"Save_data\": True,\n \"Filepath\": self.dynamic.output_directory.text(),\n \"Filename\": self.dynamic.output_file.text(),\n \"skip_init\": False,\n }\n\n # Generate a Lookuptable for the plots\n steps = (\n int(\n abs(\n float(self.dynamic.max_voltage_IV.value())\n / float(self.dynamic.voltage_steps_IV.value())\n )\n )\n + 1\n )\n self.cmapLookup = self.cmap.getLookupTable(1.0, 3.0, steps)\n self.variables.reset_plot_data()\n\n self.generate_dynamicwaiting_job(additional_settings)\n # self.variables.reset_plot_data()\n\n else:\n reply = QMessageBox.information(\n None,\n \"Warning\",\n \"Please enter a valid filepath and filename.\",\n QMessageBox.Ok,\n )", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def load_batch(self, request, *args, **kwargs):\n try:\n # get a list of the files in the associated path\n base_path = self.request.user.profile.Window_path\n file_list = listdir(base_path)\n # include only csv files\n file_list = [el[:-4].replace('BF_', '') for el in file_list if ('.tif' in el) and ('.xml' not in el)\n and ('_BF_' in el)]\n # get a list of the existing file names\n existing_rows = [el[0] for el in Window.objects.values_list('slug')]\n # for all the files\n for file in file_list:\n # check if the entry already exists\n if file.lower() in existing_rows:\n # if so, skip making a new one\n continue\n # get the data for the entry\n data_dict = parse_path_image(file, self, 'Window_path')\n print(data_dict)\n # check the paths in the filesystem, otherwise leave the entry empty\n for key, value in data_dict.items():\n if (isinstance(value, str)) and ('Path' in key) and (not exists(value)):\n data_dict[key] = ''\n # create the model instance with the data\n model_instance = Window.objects.create(**data_dict)\n\n # save the model instance\n model_instance.save()\n\n return HttpResponseRedirect('/loggers/window/')\n except:\n return HttpResponseBadRequest('loading file failed, check file names')", "def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()", "def preload_pathimgs(self, pathimgs):\n self.pathimgs = pathimgs\n print('build list images :' + self.pathimgs)\n listfiles = self.get_list_files()\n listfiles.sort(key=lambda v: v.upper())\n for imgpath in listfiles:\n if imgpath.endswith('gif'):\n listgif = self.build_list_gif(imgpath)\n self.listimages += listgif * self.passgif\n self.tempo += [self.durationgif] * len(listgif) * self.passgif\n else:\n img = Image.open(imgpath)\n img = img.resize((self.matrix.width, self.matrix.height), Image.ANTIALIAS)\n self.listimages.append(img.convert('RGB'))\n self.tempo += [self.durationimg]\n print(\" duration: {}s, {} Images\".format(int(sum(self.tempo, 0)), len(self.listimages)))", "def load(self, step=0):\n \n # take a step, if requested\n self.step_and_validate(step)\n \n with self.img_output:\n clear_output(wait=True)\n display(Image(self.imgs[self.i], width=850, unconfined=True))", "def load_image(self, **kwargs):\n ...", "def btn_display_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n self.img_displayer.new_display(image_fio, name)\n self.show_as_waiting(False)", "def initGui(self):\n\n icon_path = ''\n self.add_action(\n icon_path,\n text=self.tr(u'圖資操作處理工具v3.0'),\n callback=self.run,\n parent=self.iface.mainWindow())\n \n \n\n #照片EXIF定位ImportPhotos初始參數\n self.clickPhotos = self.add_action(\n icon_path,\n text=self.tr(u'檢視照片'),\n callback=self.mouseClick,\n parent=self.iface.mainWindow())\n \n self.clickPhotos.setCheckable(True)\n self.clickPhotos.setEnabled(True)\n # self.iface.addPluginToMenu(TFB_Tools3.description(), self.action)\n self.listPhotos = []\n self.layernamePhotos = [] \n self.toolMouseClick = MouseClick(self.canvas, self)\n \n\n self.fields = ['ID', 'Name', 'Date', 'Time', 'Lon', 'Lat', 'Altitude', 'North', 'Azimuth', 'Camera Maker',\n 'Camera Model', 'Title', 'Comment', 'Path', 'RelPath', 'Timestamp']\n\n self.extension_switch = {\n \".shp\": \"ESRI Shapefile\",\n \".geojson\": \"GeoJSON\",\n \".gpkg\":\"GPKG\",\n \".csv\": \"CSV\",\n \".kml\": \"KML\",\n \".tab\": \"MapInfo File\"\n }\n\n self.extension_switch2 = {\n \"ESRI Shapefile (*.shp *.SHP)\": \".shp\",\n \"GeoJSON (*.geojson *.GEOJSON)\": \".geojson\",\n \"GeoPackage (*.gpkg *.GPKG)\":\".gpkg\",\n \"Comma Separated Value (*.csv *.CSV)\": \".csv\",\n \"Keyhole Markup Language (*.kml *.KML)\": \".kml\",\n \"Mapinfo TAB (*.tab *.TAB)\": \".tab\"\n }\n\n self.extension_switch_types = {\n \".shp\": \"ESRI Shapefile\",\n \".geojson\": \"GeoJSON\",\n \".gpkg\":\"GPKG\",\n \".csv\": \"CSV\",\n \".kml\": \"KML\",\n \".tab\": \"MapInfo File\"\n }\n\n ############################\n\n self.pointEmitter = QgsMapToolEmitPoint(self.iface.mapCanvas())\n self.iface.mapCanvas().setMapTool( self.pointEmitter )", "def load_images(image_filename):\n\n # Write code here to loop over image data and populate DB.", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def _load_button_cb(self):\n filename = askopenfilename(\n defaultextension='pickle',\n filetypes=((\"Pickle files\", \"*.pickle\"), ('All files', '*.*'))\n )\n\n with open(filename, 'rb') as f:\n occupancy_grid = pickle.load(f)\n\n self.resolution = occupancy_grid.info.resolution\n self.origin_x = occupancy_grid.info.origin.position.x\n self.origin_y = occupancy_grid.info.origin.position.y\n\n grid_data = np.array(occupancy_grid.data)\n grid_data = np.reshape(grid_data, (occupancy_grid.info.height, -1))\n # Don't flip the map so it's easier to handle the transformations\n #grid_data = np.flip(grid_data, 0)\n\n map_image = Image.fromarray(np.uint8(grid_data))\n tk_map_image = ImageTk.PhotoImage(map_image)\n\n self.canvas.create_image(0, 0, image=tk_map_image, anchor='nw')\n\n label = tk.Label(image=tk_map_image)\n label.image = tk_map_image", "def _load_data_worker(self,img_dir,lbl_dir):\n data = []\n\n for img,lbl in zip(glob(img_dir+\"/*.jpg\"),glob(lbl_dir+\"/*.txt\")):\n im = np.array(Image.open(img))\n im = make_square_image_with_padding(im, self.core_config.num_colors)\n lbl_fh = open(lbl,encoding='utf-8')\n\n objects = self._get_objects(lbl_fh)\n sorted_objects = sort_object_list(objects)\n object_class = self._get_object_classes(sorted_objects)\n \n image_with_objects = {\n 'img':im,\n 'objects':sorted_objects,\n 'object_class': object_class\n }\n\n image_with_mask = convert_to_mask(image_with_objects, self.core_config)\n\n data.append(image_with_mask)\n lbl_fh.close()\n\n return data", "def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1", "def on_extpushButton_clicked(self):\n # TODO: not implemented yet\n self.progressBar.setValue(0)\n py = self.file_path()\n outpath = self.pathlineEdit.text()\n fileList = []\n if not self.malistWidget.count() ==0:\n for i in list(range(1, self.malistWidget.count() + 1)):\n fileList.append(self.malistWidget.item(i - 1).text())\n\n self.progressBar.setMaximum(len(fileList))\n self.staBrowser.setText(u'获取'+str(len(fileList))+u'个文件')\n\n for f in fileList:\n cmd = 'mayapy \"{}\" \"{}\" \"{}\"'.format(py, f, outpath)\n os.chdir(\"C:/Program Files/Autodesk/Maya2017/bin\")\n print cmd\n pro = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = pro.communicate()\n print out\n print err\n # import exportABC\n\n\n self.staBrowser.setText(u'正在输出 ' + f)\n\n # # exportABC.expabc(f,outpath)\n self.progressBar.setValue(fileList.index(f)+1)\n self.staBrowser.setText(f+u'完成')\n self.staBrowser.setText(u'输出完成')", "def show_next_image(self):\r\n self.index += 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n #### added in version 2\r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n \r\n #Add Current Label\r\n print(sorting_string)\r\n for label in labels:\r\n if label not in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n ####\r\n\r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index])\r\n else:\r\n self.master.quit()", "def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def addLoadPaths(self):\n #create a static boxsizer\n load_box = wx.StaticBox(self, label=\"Step 1: Input Data\")\n box_sizer = wx.StaticBoxSizer(load_box, wx.VERTICAL)\n fgs = wx.FlexGridSizer(rows=2, cols=2, vgap=10, hgap=10)\n box_sizer.Add(fgs, proportion=1, flag=wx.EXPAND)\n\n #actual data handled by a FGS\n self.data_btn = wx.Button(self, label=\"Load Data\")\n self.data_btn.Bind(wx.EVT_BUTTON, self.onLoadData)\n\n self.assign_btn = wx.Button(self, label=\"Add Assignments\")\n self.assign_btn.Bind(wx.EVT_BUTTON, self.onAddAssignment)\n self.assign_btn.Disable()\n\n pub.subscribe(self.dataLoaded, \"data_loaded\")\n \n fgs.Add(self.data_btn, proportion=1, flag = wx.EXPAND)\n fgs.Add(wx.StaticText(self), proportion=1, flag = wx.EXPAND)\n\n fgs.Add(self.assign_btn)\n btn_label = wx.StaticText(self, label=\"(optional)\")\n new_font = btn_label.GetFont()\n new_font.SetStyle(wx.FONTSTYLE_ITALIC)\n btn_label.SetFont(new_font)\n fgs.Add(btn_label)\n \n \n fgs.Add(wx.StaticText(self), proportion=1, flag = wx.EXPAND)\n\n self.SetSizerAndFit(box_sizer)", "def plot_chosen_data(main, dataPath):\n error = \"Error \"+errorPath+\"plot_chosen_data: Must choose data of proper format (tiff, jpeg, etc.)\"\n try:\n if dataPath == '':\n main.msg('thinks it has nothing')\n main.msg(error)\n return\n data = mpimg.imread(dataPath)\n imgObj = Img.Img(data, title = os.path.basename(dataPath), filePath = dataPath)\n main.imgObjList.append(imgObj)\n main.horizontalSlider.setMaximum(len(main.imgObjList)-1)\n main.horizontalSlider.setValue(main.horizontalSlider.maximum())\n func.plot_img_obj(main, imgObj)\n except:\n main.msg(error)", "def imagePages(files, choice):\n options = [\"Byte\", \"Markov\", \"Hilbert\"]\n type = options[int(ui.prompt(\"Choose a visualization type\", options))]\n\n targets = []\n pageNames = []\n pageSize = 100\n pages = range(math.ceil(len(files)/pageSize))\n for page in pb.progressbar(pages):\n # print(\"\\nPage {}/{}\".format(page+1, len(pages)))\n gc.collect() # Garbage collect\n\n images = []\n start = page*pageSize\n if choice == \"Create\":\n images, targets = buildImages(files[start:start+pageSize], targets, type)\n elif choice == \"Load\":\n images, targets = loadImages(files[start:start+pageSize], targets)\n pageNames.append(\"./pages/images_page{}.npy\".format(page))\n np.save(pageNames[-1], images)\n return targets, pageNames", "def help_load_file():\n global picture_lst\n picture_lst = [\"test/load1.png\", \"test/load2.png\", \"test/load3.png\", \"test/load4.png\"]\n help_main()", "def image_viewer():\n key_file = auth_form()\n sg.theme('DarkBlue') \n layout = [\n [\n sg.Text(\"Image File\"),\n sg.Input(size=(25, 1), enable_events=True, key=\"file\"),\n sg.FilesBrowse(file_types=((\"Encrypted Images\", \"*.enc\"),)),\n sg.Button(\"Prev\"),\n sg.Button(\"Next\"),\n sg.Button(\"Export\")\n ],\n [sg.Image(key=\"image\")]\n ]\n window = sg.Window('Encrypted Image Viewer', layout, resizable=True).Finalize()\n window.Maximize()\n images = []\n location = 0\n while True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n sys.exit()\n if event == \"file\":\n images = values[\"file\"].split(';')\n if images:\n image_decrypted = load_image(images[0], window, key_file)\n if event == \"Next\" and images:\n if location == len(images) - 1:\n location = 0\n else:\n location += 1\n image_decrypted = load_image(images[location], window, key_file)\n if event == \"Prev\" and images:\n if location == 0:\n location = len(images) - 1\n else:\n location -= 1\n image_decrypted = load_image(images[location], window, key_file)\n \n if event == \"Export\" and images:\n image_decrypted.show()\n window.close()", "def press(button):\r\n if button == 'Process':\r\n src_file = app.getEntry('Input_File')\r\n dest_dir = app.getEntry('Output_Directory')\r\n out_file = app.getEntry('Output_name')\r\n points = app.getEntry('Points')\r\n out_file = out_file + '.csv'\r\n plot = app.getCheckBox('Plot Results')\r\n iter(src_file, Path(dest_dir, out_file), points,plot)\r\n else:\r\n app.stop()", "def load(self):\n dir = '.'\n ext = '.saved_story'\n saved_stories = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir,f)) and f.endswith(ext)]\n saved_stories.sort()\n \n from direct.gui.DirectGui import DirectScrolledList,DirectButton\n labels = []\n for saved_story in saved_stories:\n filename,ext = os.path.splitext(saved_story)\n l = DirectButton(text=filename, scale=0.05, command=self._load, extraArgs=[saved_story])\n labels.append(l) \n self.load_list = DirectScrolledList(\n decButton_pos= (0.35, 0, 0.53),\n decButton_text = \"/\\\\\",\n decButton_text_scale = 0.04,\n decButton_borderWidth = (0.005, 0.005),\n \n incButton_pos= (0.35, 0, -0.02),\n incButton_text = \"\\\\/\",\n incButton_text_scale = 0.04,\n incButton_borderWidth = (0.005, 0.005),\n \n #frameSize = (0.0, 0.7, -0.05, 0.59),\n #frameColor = (1,0,0,0.5),\n pos = self.load_button.getPos(aspect2d),\n items = labels,\n numItemsVisible = 4,\n forceHeight = 0.11,\n itemFrame_frameSize = (-0.3, 0.3, -0.37, 0.11),\n itemFrame_pos = (0.35, 0, 0.4),\n )", "def download_images_tiff(self):\n self.show_as_waiting(True)\n self.download_images('TIFF')\n self.show_as_waiting(False)", "def _state_main(self, gui):\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def images(self, **kwargs):\n\n raise NotImplementedError", "def _showphotos(self, btn):\n global layout\n global curdir\n\n # Create the layouts.\n layout = GridLayout(cols=5, padding=0, spacing=0, size_hint=(1, None))\n layout.bind(minimum_height=layout.setter(\"height\"))\n\n foldername = btn\n\n # Args is combined with \"curdir\" to load the thumbnails, and add them to the Gridlayout.\n if foldername == \"\":\n pass\n else:\n for filename in sorted(glob(join(curdir, \"thumb\", foldername, \"*\"))):\n try:\n canvas = BoxLayout(size_hint=(1, None))\n im = Image(source=filename)\n canvas.add_widget(im)\n layout.add_widget(canvas)\n\n except Exception:\n print(\"Pictures: Unable to load <%s>\" % filename)\n\n return layout", "def filebrowse_png(self, test=False):\n\n if self.timer.isActive():\n # this can be done in parallel if you implement multithreading as done in the dicom editor\n functions.auxiliary.popupmsg(\"please pause video\", \"warning\")\n return\n\n a = QtWidgets.QFileDialog()\n a.setDirectory(\"./data/png/\") #std directory\n\n if test is False:\n path = str(a.getExistingDirectory(MainWindow, 'select folder with pngs'))\n else:\n path = \"/home/jelle/PycharmProjects/UTMR/data/png/0315_moving_blur\"\n # 'get existing directory' never uses the final '/' so you have to manually input it.\n self.lineEdit_importpath.setText(path)\n filelist = os.listdir(path)\n filelist.sort()\n if functions.auxiliary.checkifpng(filelist) == 0:\n functions.auxiliary.popupmsg(\"NO PNG IN FOLDER\", \"warning\")\n self.pb_play.setEnabled(False)\n self.pb_play.setToolTip(\"Try selecting a folder with .png\")\n return\n self.pb_play.setEnabled(True)\n self.pb_play.setToolTip(\"\")\n\n cropsize = [58, 428, 243, 413] # im = im[58:428, 143:513]\n self.imlist = functions.auxiliary.loadin(filelist, path, size=cropsize)\n self.CurMov.get_imlist(imlist=self.imlist)\n self.progress_bar.setMaximum(self.CurMov.maxframes)\n self.update_all_things()\n # print(SliderClass.all_sliders)", "def view_thumbnail_complete():\n curItem = complete_tereeview.focus().strip('#')\n with open(\"images_url_dict.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = \"-\".join(curItem.lower().split())\n try:\n \"\"\"look for entry info from local database\"\"\"\n img_list = imgs_dict[name]\n img_url = img_list[0]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n\n except KeyError:\n print(\"Failed series image list....\")\n with open(\"Movies_List.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = curItem\n try:\n img_list = imgs_dict[name]\n img_url = img_list[1]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n except Exception as error_ml:\n print(\"Failed using movie list Error :: \\n\", error_ml)\n\n\n except Exception as local_excep:\n\n print(\"ERROR :: \" + str(local_excep))", "def loadDataset(dataset):\n # List of images.\n images = []\n\n\n\n # Read all filenames from the dataset.\n for filename in dataset:\n # Read the input image.\n image = cv2.imread(filename)\n\n # Add the current image on the list.\n if image is not None: \n images.append(image)\n else:\n print(\"Could not read file: {}\".format(filename))\n sys.exit()\n\n # Return the images list.\n return images", "def image_list(path):\n global user_select\n user_list = os.listdir(path)\n print('=======================================')\n print('Listing files...')\n for i, v in enumerate(user_list):\n print(i, v)\n print('=======================================')\n user_select = input_path+'/'+user_list[int(input('Select images which you what to scale: '))]\n print(\"Selected: \", user_select)", "def btn_display_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n del image_fio\n del img_hist_fio\n self.show_as_waiting(False)", "def process(directory):\n files = []\n\n options = [\"Load\", \"Create\"]\n choice = options[int(ui.prompt(options=options))]\n\n for item in os.listdir(directory):\n if os.path.isfile(os.path.join(directory, item)):\n filename = os.path.join(directory, item)\n if choice == \"Load\" and item.endswith(\".png\"):\n files.append(filename)\n elif choice == \"Create\" and item.endswith(\".file\"):\n files.append(filename)\n\n filenames, pageNames = imagePages(files, choice)\n \n targets = [name.split('/')[-1][:5] for name in filenames]\n return pageNames, targets, filenames", "def OnLoadComplete(self, event):\n message = \"[{}] Complete model loading\".format(datetime.datetime.now().strftime(\"%H:%M:%S\"))\n print message\n self.SetStatusText(message)\n print dir_list\n self.__root_panel.SetBackgroundColour('white')\n for p in self.__panels:\n if not p.isTextCustomized():\n path = p.getImagePath()\n cls = getClassIndex(my_model, path)\n text = dir_list[cls]\n p.setPanel(path, text)", "def on_pushButton_2_clicked(self):\n # TODO: not implemented yet\n try:\n str='str.png'\n process_pic.graphics ().process (str)\n self.click=\"process\"\n pixMap = QPixmap(\"temp.png\").scaled(self.label.width(),self.label.height())\n self.label.setPixmap(pixMap)\n except:\n button=QMessageBox.about(self, '注意', '应先向空白处导入图片后再进行处理')\n else:\n pass\n\n\n\n #os.popen('python process_pic.py')", "def accept(self):\n self.selectedoptions = []\n model = self.lstLayers.model()\n for i in range(model.rowCount()):\n item = model.item(i)\n if item.checkState() == Qt.Checked:\n self.selectedoptions.append(item.text())\n # Error message if no layer is selected\n if len(self.selectedoptions) == 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"No layer selected!\")\n msg.setInformativeText(\"Select at least one layer to generate image of the map\")\n msg.setWindowTitle(\"Error\")\n msg.show()\n msg.exec_()\n else:\n layers = []\n for option in self.selectedoptions:\n layers.append(QgsProject.instance().mapLayersByName(option)[0])\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n # Store string of directory path, use interactive dialog to let user select the directory\n img_path, _ = QFileDialog.getSaveFileName(self, \"Save Image\", option, \"PNG (*.png)\", options=options)\n if img_path.endswith(\".png\"):\n pass\n else:\n img_path += \".png\"\n # render map image\n settings = QgsMapSettings()\n settings.setOutputSize(QSize(512, 512))\n settings.setExtent(layers[0].extent())\n settings.setLayers(layers)\n job = QgsMapRendererSequentialJob(settings)\n job.start()\n job.waitForFinished()\n img = job.renderedImage()\n img.save(img_path, \"png\")\n QDialog.accept(self)", "def importImg(self):\n logger.info(\"import image \"+ str(self))\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Image',\n BASE_DIR,\"Image files (*.jpg *.gif *.png)\")\n logger.debug(file)\n self.imageFile = file\n self.image.setPixmap(QtGui.QPixmap(file))\n self.image.adjustSize()", "def on_source_img_browse_btn_click(self):\r\n\t\tdlg = QFileDialog()\r\n\t\toptions = dlg.Options()\r\n\t\toptions |= QFileDialog.DontUseNativeDialog\r\n\t\tsource_img_filename, _ = dlg.getOpenFileName(\r\n\t\t\tself,\r\n\t\t\t\"Select Input Numpy Array\",\r\n\t\t\t\".\",\r\n\t\t\t\"NumPy Files (*.npy)\",\r\n\t\t\toptions=options)\r\n\t\tif source_img_filename:\r\n\t\t\tself.filestate.set_source_img_filename(source_img_filename)\r\n\t\t\tself.check_line_edits_and_refresh_filestate()\r\n\t\t\tself.refresh_UI()", "def refreshImages(self):\n fileName1 = \"DECK/\" + str(self.card1) + \".gif\"\n fileName2 = \"DECK/\" + str(self.card2) + \".gif\"\n fileName3 = \"DECK/\" + str('b') + \".gif\"\n self.image1 = PhotoImage(file = fileName1)\n self.cardLabel1[\"image\"] = self.image1\n self.image2 = PhotoImage(file = fileName2)\n self.cardLabel2[\"image\"] = self.image2\n self.image3 = PhotoImage(file = fileName3)\n self.cardLabel3[\"image\"] = self.image3", "def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs", "def load_dataset(display = False, flag=\"train\"):\n\n if flag ==\"train\":\n print(\"Loading training set image X ...\")\n train_X_data = DataUtils(filename=trainfile_X).getImage()\n print(\"Loading training set label y ...\")\n train_y_data = DataUtils(filename=trainfile_y).getLabel()\n print(\"size of training set X = \", train_X_data.shape)\n print(\"size of training set y = \", train_y_data.shape) \n\n if display:\n path_trainset = \"MNIST_dataset/imgs_train\"\n if not os.path.exists(path_trainset):\n os.mkdir(path_trainset)\n outImg(train_X_data, train_y_data, 30, out_path)\n DataUtils(outpath=path_trainset).outImg(train_X_data, train_y_data, 30)\n\n return train_X_data, train_y_data\n \n elif flag == \"test\":\n print(\"Loading test set image X ...\")\n test_X_data = DataUtils(testfile_X).getImage()\n print(\"Loading test set label y ...\")\n test_y_data = DataUtils(testfile_y).getLabel()\n print(\"size of test set X = \", test_X_data.shape)\n print(\"size of test set y = \", test_y_data.shape)\n\n if display:\n path_testset = \"MNIST_dataset/imgs_test\"\n if not os.path.exists(path_testset):\n os.mkdir(path_testset)\n DataUtils(outpath=path_testset).outImg(test_X_data, test_y_data, 30)\n\n return test_X_data, test_y_data", "def open_slot(self):\n caption = 'Open files'\n directory = './'\n filter_mask = \"JPEG File Interchange Format (*.jpg *.jpeg *jfif)|\" + \"*.jpg;*.jpeg;*.jfif\"\n files = QFileDialog.getOpenFileNames(None, caption, directory, filter_mask)[0]\n self._model.set_filenames(files)\n if len(files) > 1:\n self._ui.bt_next.setEnabled(True)\n self._ui.bt_prev.setEnabled(True)\n self._ui.bt_right.setEnabled(True)\n self._ui.bt_left.setEnabled(True)\n elif len(files) == 1:\n self._ui.bt_left.setEnabled(True)\n self._ui.bt_right.setEnabled(True)\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n else:\n self._ui.bt_left.setEnabled(False)\n self._ui.bt_right.setEnabled(False)\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n\n self.refresh_images()", "def add_images(self, images_list, show_pbar=False):\n filenames = []\n if show_pbar:\n images_list = tqdm(images_list)\n for image in images_list:\n filenames.append(self.add_image(image))\n return filenames", "def handle_result(self):\n if not self.scidb_connector.running and self.scidb_connector.result_queue.qsize() == 0:\n print >> sys.stdout, 'Stopping Image Loader...'\n return\n \n if self.scidb_connector.result_queue.qsize():\n try:\n print >> sys.stdout, 'Loading image...'\n start_time = time.clock()\n \n query_json = self.scidb_connector.result_queue.get(0)\n result = SciDB.SciDB_Result(query_json)\n \n # Make figure\n if query_json['request']['options']['iterative']:\n scatter = scalrr_vis.plot_image(result, ZSCALE, master=self.master, title=\"Iteration \" + str(len(self.iterations) + 1))\n self.master = result.data_array\n \n self.add_graph(scatter)\n else:\n self.master = None\n scatter = scalrr_vis.plot_image(result, ZSCALE, title=query_json['request']['query'])\n \n self.update_graph(scatter)\n \n end_time = time.clock() \n print >> sys.stdout, 'Done... ', end_time-start_time\n \n self.ui_root.after(500, self.handle_result)\n except Queue.Empty:\n pass\n \n else:\n self.ui_root.after(1000, self.handle_result)", "def download_multiple(select_files, savepath, id, ext):\n with zipfile.ZipFile(savepath + '/processed_images.zip', mode='w') as zf:\n\n for file in select_files:\n pro_img, _, _, _, _ = get_image_pair(file, id)\n output = io.BytesIO()\n pro_img.save(output, format=ext)\n filename = file + '.' + ext\n zf.writestr(filename, output.getvalue())", "def load_images(self, filename):\n\n self.images = self.load(filename)\n self.length = len(self.images)\n self.create_teacher()", "def gather_images():\n # Import an empty image\n null_img = Image.open('assests/null/null.png')\n null_img = ImageTk.PhotoImage(null_img.resize((100,100), Image.ANTIALIAS))\n\n # Import image and icon for X\n X_img = Image.open('assests/X_Assets/X.png')\n X_icon = ImageTk.PhotoImage(X_img.resize((15, 12), Image.ANTIALIAS))\n X_img = ImageTk.PhotoImage(X_img.resize((95, 80), Image.ANTIALIAS))\n\n # Import horizontally striked X\n X_hor = Image.open('assests/X_Assets/X_hor.png')\n X_hor = ImageTk.PhotoImage(X_hor.resize((95, 80), Image.ANTIALIAS))\n\n # Import vertically striked X\n X_vert = Image.open('assests/X_Assets/X_vert.png')\n X_vert = ImageTk.PhotoImage(X_vert.resize((95, 80), Image.ANTIALIAS))\n\n # Import diagonally strikedX\n X_diag = Image.open('assests/X_Assets/X_diag.png')\n X_diag = ImageTk.PhotoImage(X_diag.resize((95, 80), Image.ANTIALIAS))\n\n # Import another diagonally striked X\n X_diag2 = Image.open('assests/X_Assets/X_diag2.png')\n X_diag2 = ImageTk.PhotoImage(X_diag2.resize((95, 80), Image.ANTIALIAS))\n\n # Import image and icon for O\n O_img = Image.open('assests/O_Assets/O.png')\n O_icon = ImageTk.PhotoImage(O_img.resize((14, 14), Image.ANTIALIAS))\n O_img = ImageTk.PhotoImage(O_img.resize((90, 90), Image.ANTIALIAS))\n\n # Import horizontally striked O\n O_hor = Image.open('assests/O_Assets/O_hor2.png')\n O_hor = ImageTk.PhotoImage(O_hor.resize((90, 90), Image.ANTIALIAS))\n\n # Import vertically striked O\n O_vert = Image.open('assests/O_Assets/O_vert2.png')\n O_vert = ImageTk.PhotoImage(O_vert.resize((90, 90), Image.ANTIALIAS))\n\n # Import diagonally striked O\n O_diag = Image.open('assests/O_Assets/O_diag.png')\n O_diag = ImageTk.PhotoImage(O_diag.resize((90, 90), Image.ANTIALIAS))\n\n # Import another diagonally striked O\n O_diag2 = Image.open('assests/O_Assets/O_diag2.png')\n O_diag2 = ImageTk.PhotoImage(O_diag2.resize((90, 90), Image.ANTIALIAS))\n\n return (null_img, X_icon, X_img, X_hor, X_vert, X_diag, X_diag2, O_icon, O_img, O_hor, O_vert, O_diag, O_diag2)", "def load_image(self):\n # Minimal progress display while image is loaded.\n group = displayio.Group()\n group.append(centered_label('LOADING...', 40, 3))\n #self.rect = Rect(-board.DISPLAY.width, 120,\n # board.DISPLAY.width, 40, fill=0x00B000)\n #group.append(self.rect)\n board.DISPLAY.show(group)\n\n # pylint: disable=eval-used\n # (It's cool, is a 'trusted string' in the code)\n duration = eval(TIMES[self.time]) # Playback time in seconds\n # The 0.9 here is an empirical guesstimate; playback is ever-so-\n # slightly slower than benchmark speed due to button testing.\n rows = int(duration * self.rows_per_second * 0.9 + 0.5)\n # Remap brightness from 0.0-1.0 to brightness_range.\n brightness = (self.brightness_range[0] + self.brightness *\n (self.brightness_range[1] - self.brightness_range[0]))\n try:\n self.num_rows = self.bmp2led.process(self.path + '/' +\n self.images[self.image_num],\n self.tempfile,\n rows, brightness,\n self.loop,\n self.load_progress)\n except (MemoryError, BMPError):\n group = displayio.Group()\n group.append(centered_label('TOO BIG', 40, 3))\n board.DISPLAY.show(group)\n sleep(4)\n\n board.DISPLAY.show(displayio.Group()) # Clear display\n self.clear_strip() # LEDs off", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def prepare_images(self):\n\n qt_original_image = self.convert_image_to_QTformat(self.original_image)\n self.send_original_photo_to_gui.emit(qt_original_image)\n\n self.processed_image = self.procces_image(self.original_image)\n qt_processed_image = self.convert_image_to_QTformat(self.processed_image)\n self.send_processed_photo_to_gui.emit(qt_processed_image)", "def view_thumbnail_break():\n curItem = onbreak_treeview.focus().strip('#')\n with open(\"images_url_dict.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = \"-\".join(curItem.lower().split())\n try:\n \"\"\"look for entry info from local database\"\"\"\n img_list = imgs_dict[name]\n img_url = img_list[0]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n\n except KeyError:\n print(\"Failed series image list....\")\n with open(\"Movies_List.json\", \"r\") as f:\n imgs_dict = json.load(f)\n\n name = curItem\n try:\n img_list = imgs_dict[name]\n img_url = img_list[1]\n print(img_list)\n r = requests.get(img_url, stream=True, headers={'User-agent': 'Mozilla/5.0'})\n if r.status_code == 200:\n with open(\"thumbnails\\\\{}.jpg\".format(name), 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"Done downloading\")\n image = ImageTk.PhotoImage(Image.open(\"thumbnails\\\\{}.jpg\".format(name)))\n # image = PhotoImage(file='thumbnails\\\\search_ico.png').subsample(12, 12)\n Label.image = image\n preview_box.window_create(index=1.0, window=Label(preview_box, image=image))\n except Exception as error_ml:\n print(\"Failed using movie list Error :: \\n\", error_ml)\n\n\n except Exception as local_excep:\n\n print(\"ERROR :: \" + str(local_excep))", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def updateImages(self, msg, arg2=None):\n\t\tself.picPaths = msg\n\t\tself.totalPictures = len(self.picPaths)\n\t\tself.loadImage(self.picPaths[0])", "def onclick_open_image(self):\n filename = select_file(\n \"Select Image\",\n \"../\",\n \"Image Files (*.jpeg *.jpg *.png *.gif *.bmg)\")\n if filename:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.image = read_image(filename)\n self.h, self.w = self.image.shape[:2]\n self.show_to_window()", "def setFilenames(self, filenames):\n\t\tself.filenames = filenames\n\t\tif len(filenames) == 0:\n\t\t\treturn\n\n\t\tif not self.dimensions:\n\t\t\tself.retrieveImageInfo(filenames[0])\n\n\t\tif not self.checkImageDimensions(filenames):\n\t\t\traise Logging.GUIError(\"Image dimensions do not match\", \\\n\t\t\t\t\t\t\t\t\t\"Some of the selected files have differing dimensions, \\\n\t\t\t\t\t\t\t\t\tand cannot be imported into the same dataset.\")\t\t \n\t\tself.getReadersFromFilenames()\n\t\tself.numberOfImages = len(filenames)\n\t\tif self.is3D:\n\t\t\tif self.readers:\n\t\t\t\tself.numberOfImages = 0\n\t\t\t\tfor rdr in self.readers:\n\t\t\t\t\tself.numberOfImages += rdr.GetNumberOfSubFiles()", "def _populateImagesList(self, images_list):\n\n def callback_factory(img_obj):\n def callback(instance):\n self.root.images_gallery.scatter_image.img_obj = img_obj \n self.root.images_gallery.scatter_image.source = img_obj.path\n self.root.images_gallery.scatter_image.parent.rotation = -img_obj._yaw\n return callback\n \n for img_path, img_tn_path, data_path in images_list:\n \n img_obj = AUVSIcv.Image(img_path, data_path)\n self.imgs_list.append(img_obj)\n \n btn = Button(\n size_hint=(None, None),\n size=(100, 75),\n background_normal=img_tn_path,\n border=(0,0,0,0)\n )\n \n btn.bind(on_press=callback_factory(img_obj))\n self.root.images_gallery.stacked_layout.add_widget(btn)", "def _load_img_label(self):\n name = self._launch_file_b()\n self._img_label.configure(text=name)", "def next_page_interface(source_path_name, dataset_path, attentive, not_attentive, root):\n try:\n next_instance = ThemedTk(theme=\"aqua\")\n next_instance.title(\"Data Processing\")\n next_instance.geometry(\"630x400\")\n\n # label at the top of person database name\n my_label = Label(next_instance, text=\"Data Processing\", font=(\"Times New Roman\", 18, \"bold\"))\n my_label.grid(row=0, column=0, columnspan=2, padx=(35, 0), pady=(10, 30))\n\n # label and input field for image count\n image_count_label = Label(next_instance, text=\"Image Count For Processing:\", font=(\"Times New Roman\", 12, \"bold\"))\n image_count_label.grid(row=14, column=0, padx=(10, 10), pady=(10, 20))\n\n total_count = DoubleVar(next_instance)\n default_len = min(len(attentive), len(not_attentive)) // 2\n total_count.set(default_len)\n\n image_count = Scale(next_instance, from_=200, to=default_len, sliderlength=20, length=270, orient=HORIZONTAL,\n variable=total_count)\n image_count.grid(row=14, column=1, padx=(0, 0), pady=(10, 20))\n\n # label and input field for training rate\n options = ['50%', '60%', '70%', '80%']\n\n training_rate_clicked = StringVar(next_instance)\n training_rate_clicked.set(\"50%\")\n\n training_rate_label = Label(next_instance, text=\"Image count For Training :\", font=(\"Times New Roman\", 12, \"bold\"))\n training_rate_label.grid(row=16, column=0, padx=(10, 10), pady=(10, 0))\n\n training_rate = OptionMenu(next_instance, training_rate_clicked, *options)\n training_rate.config(width=40)\n training_rate.grid(row=16, column=1, padx=20, pady=(10, 0))\n\n # label and input field for dimension of images\n options = ['64 x 64', '100 x 100', '128 x 128']\n\n dimension_clicked = StringVar(next_instance)\n dimension_clicked.set(\"100 x 100\")\n\n dimension_label = Label(next_instance, text=\"Dimension :\", font=(\"Times New Roman\", 12, \"bold\"))\n dimension_label.grid(row=18, column=0, padx=(30, 10), pady=(30, 30))\n\n dimension = OptionMenu(next_instance, dimension_clicked, *options)\n dimension.config(width=40)\n dimension.grid(row=18, column=1, padx=20, pady=(30, 30))\n\n def default_setting():\n \"\"\" This function changes total images count, training rate and dimension of images \"\"\"\n total_count.set(default_len)\n training_rate_clicked.set('50%')\n dimension_clicked.set('100 x 100')\n\n default_btn = Button(next_instance, text=\"Default Model\", font=(\"Times New Roman\", 12, \"bold\"), bg=\"gray\",\n command=default_setting)\n default_btn.grid(row=20, column=0, pady=10, padx=10, ipadx=30)\n\n # show and exit button\n process_btn = Button(next_instance, text=\"Process Data\", font=(\"Times New Roman\", 12, \"bold\"),\n command=lambda: loading_data(source_path_name, dataset_path, attentive, not_attentive,\n image_count.get(), training_rate_clicked.get(),\n dimension_clicked.get(), next_instance, root))\n process_btn.grid(row=20, column=1, pady=10, padx=10, ipadx=30)\n\n def hide_open2():\n root.deiconify()\n next_instance.destroy()\n\n exit2_btn = Button(next_instance, text=\"EXIT\", font=(\"Times New Roman\", 12, \"bold\"), bg=\"gray\",\n command=hide_open2)\n exit2_btn.grid(row=22, column=0, columnspan=2, pady=10, padx=10, ipadx=60)\n\n except:\n messagebox.showerror(\"Message\", \"Error in the data processing interface\")", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def handleAddFileButtonClicked(self):\n # Find the directory of the most recently opened image file\n mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )\n if mostRecentImageFile is not None:\n defaultDirectory = os.path.split(mostRecentImageFile)[0]\n else:\n defaultDirectory = os.path.expanduser('~')\n\n # Launch the \"Open File\" dialog\n fileNames = self.getImageFileNamesToOpen(defaultDirectory)\n\n # If the user didn't cancel\n if len(fileNames) > 0:\n PreferencesManager().set('DataSelection', 'recent image', fileNames[0])\n try:\n self.addFileNames(fileNames)\n except RuntimeError as e:\n QMessageBox.critical(self, \"Error loading file\", str(e))", "def help_download_file():\n global picture_lst\n picture_lst = [\"test/download1.png\", \"test/download2.png\", \"test/download3.png\", \"test/download4.png\",\n \"test/download5.png\", \"test/download6.png\", \"test/download7.png\", \"test/download8.png\",\n \"test/download9.png\", \"test/download10.png\", \"test/download11.png\", \"test/download12.png\",\n \"test/download13.png\"]\n help_main()", "def show_files(file_locations):\n for file_loc in file_locations:\n show_image(file_loc)", "def plot_list_image_path(list_image_path, log_image_path=False):\n i = 1\n nb_img = len(list_image_path)\n plt.figure(figsize=(10, 2 * nb_img))\n for image_path in list_image_path:\n if not os.path.isfile(image_path):\n continue\n img = load_img(image_path)\n plt.subplot(math.ceil(nb_img/3) + 1, 3, i)\n i += 1\n if log_image_path:\n plt.title(image_path)\n plt.imshow(img)\n plt.axis('off')\n plt.show()", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def displayPicture(self):\n size = 0\n image = None\n for url in self.__imageList:\n im = Image.open(requests.get(url, stream=True).raw)\n height, weight = im.size\n imgSize = height * weight\n\n print(url)\n print(size)\n if imgSize > size:\n image = im\n # if image:\n # image.show()", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def load_source_png_images(self, num_slice):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n data = [] \n for l in self.locations.LABELS:\n slice_file = self.locations.SOURCE_PNG % (l, num_slice)\n \n #print 'Loading Input Image \\t\\t%s'%slice_file \n slice_data = misc.imread(slice_file) \n data.append(slice_data)\n \n return data #images in the same order as labels", "def move_next_image(self):\r\n self.index += 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n \r\n # if 'OCT_V2' in sorting_string:\r\n # cat_string = 'Unlabelled'\r\n # else:\r\n # cat_string = \r\n \r\n for label in labels:\r\n if label not in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index])\r\n else:\r\n self.master.quit()", "def __on_pre_processing_images_started(self):\n\n self.progress_window.show_pre_process_images_animation()", "def __row_loading(self, filename=None, PC=-1, top=50, bot=50, label_key=None, all=False, **kargs):\n assert not self.rowwise, \"loading(): You probably don't mean to use this when rowwise=True\"\n assert PC >= 0, \"loading(): PC of <1 specified\"\n \n if \"aspect\" not in kargs:\n kargs[\"aspect\"] = \"long\"\n \n data = self.__u[:,PC]\n labs = self.parent[label_key]\n packed_data = [{label_key: i[0], \"l\": i[1]} for i in zip(labs, data)]\n \n sorted_data = sorted(packed_data, key=itemgetter(\"l\"))\n data = [i[\"l\"] for i in sorted_data]\n labs = [i[label_key] for i in sorted_data]\n \n if all:\n data = data\n labs = labs\n else:\n if bot > 0 and top > 0: # data[-0:] returns the entire list and data[0:0] returns [] !\n data = data[0:top] + data[-bot:]\n labs = labs[0:top] + labs[-bot:]\n elif top > 0:\n data = data[0:top]\n labs = labs[0:top] \n elif bot > 0:\n data = data[-bot:]\n labs = labs[-bot:] \n \n if filename:\n fig = self.__draw.getfigure(**kargs)\n ax = fig.add_subplot(111)\n ax.set_position([0.3,0.03,0.6,0.96])\n \n x = numpy.arange(len(data))\n ax.barh(x-0.4, data, ec=\"black\", color=\"grey\")\n ax.set_ylabel(\"Rows\")\n ax.set_xlabel(\"Loading\")\n ax.set_yticklabels(labs)\n ax.set_yticks(x)\n ax.set_ylim([-0.5, len(data)-0.5])\n [t.set_fontsize(6) for t in ax.get_yticklabels()]\n \n self.__draw.do_common_args(ax, **kargs)\n real_filename = self.__draw.savefigure(fig, filename)\n \n config.log.info(\"loading(): Saved PC loading '%s'\" % real_filename)\n \n # work out the list to return\n newgl = genelist()\n newgl.load_list([{label_key: i[0], \"pc_loading\": i[1]} for i in zip(labs, data)]) # relist it so that top bot are used\n newexpn = newgl.map(genelist=self.parent, key=label_key, greedy=False)\n newexpn.sort(\"pc_loading\")\n return(newexpn)", "def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()", "def on_action_3_triggered(self):\n png, pictuer=QFileDialog.getOpenFileName(self, '打开图片', '/', '*.jpg *.png')\n self.path=str(png)\n print(self.path)\n#此处开始是保存到本地的操作\n pic=open(self.path, 'rb')\n g=pic.read()\n f=open(str(os.path.abspath('.'))+\"\\str.png\",'wb')\n f.write(g)\n pic.close()\n f.close()\n#此次保存到本地完成\n pixMap = QPixmap(str(png)).scaled(self.label.width(),self.label.height())\n self.label.setPixmap(pixMap)", "def launch_image_manager(self):\n \n self._image_manager_view = ImageManagerView(self._file_table_model, self._image_manager_controller)\n self._image_manager_view.show()", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n image = read_tiff(self.filenames[pos])\n self.image_item.setImage(image)", "def image_selection_change():\n\n def return_image(val, model_id, message_name, field_name, mime, sind):\n \"\"\"Returns a URL resolvable by the probe\"\"\"\n column_data_source = curdoc().get_model_by_name(sind)\n index = column_data_source.tags[0]\n url = \"http://{0}/image/\".format(_host) + \"---\".join([model_id, message_name, field_name, mime, sind, str(index)])\n return url\n\n d = curdoc()\n _remove_fig(d)\n model_id, message_name, _ = run_handlers.get_modelid_messagename_type(d)\n image_field = d.get_model_by_name(IMAGE_SELECTION).value.split(\" :\")[0]\n mime = d.get_model_by_name(MIME_SELECTION).value\n\n if image_field != DEFAULT_UNSELECTED and mime != DEFAULT_UNSELECTED:\n plot = figure(plot_width=500, plot_height=500, title=\"\", x_range=Range1d(start=0, end=1), y_range=Range1d(start=0, end=1), name=FIGURE_MODEL)\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name, image_field + mime)\n\n _install_callback_and_cds(sind, model_id, message_name,\n {image_field: [return_image, {\"model_id\": model_id,\n \"message_name\": message_name,\n \"field_name\": image_field,\n \"mime\": mime,\n \"sind\": sind}]},\n stream_limit=1)\n plot.image_url(url=image_field, x=0, y=1, h=1, w=1, source=d.get_model_by_name(sind))\n d.add_root(plot)" ]
[ "0.70843863", "0.64637136", "0.6282635", "0.61812353", "0.6077754", "0.60075426", "0.60023975", "0.5985294", "0.59840703", "0.59838974", "0.59453404", "0.5897838", "0.5861998", "0.5781273", "0.5778588", "0.5764681", "0.5759482", "0.5740892", "0.57235175", "0.57107615", "0.5680758", "0.56615114", "0.5654651", "0.56318796", "0.5623019", "0.56193423", "0.5619226", "0.5617869", "0.56177264", "0.5617669", "0.5598565", "0.5596666", "0.55956334", "0.559422", "0.55868685", "0.5583254", "0.5578989", "0.5578511", "0.557725", "0.55756086", "0.5560043", "0.55539125", "0.5551807", "0.5540827", "0.5529114", "0.5520673", "0.55181175", "0.5516363", "0.55095726", "0.55077094", "0.54955727", "0.5490303", "0.54898363", "0.54825366", "0.5472559", "0.5467801", "0.5464916", "0.5461663", "0.5459871", "0.5450944", "0.5448517", "0.54470414", "0.54460275", "0.5443355", "0.54429364", "0.54425645", "0.5433542", "0.54330194", "0.54224217", "0.54219186", "0.5419525", "0.54150236", "0.54057586", "0.5405259", "0.5402638", "0.5382118", "0.53657746", "0.536231", "0.53599423", "0.5358117", "0.53440464", "0.53357494", "0.5332954", "0.5323437", "0.53169256", "0.53136045", "0.53101766", "0.5309631", "0.53040206", "0.53040206", "0.53040206", "0.5301797", "0.53002787", "0.5300166", "0.5299085", "0.5294403", "0.52938384", "0.5283012", "0.52827185", "0.527235" ]
0.595108
10
Callback function that controls the download button
def download_function(self): # Ask user for directory and user ID savepath = filedialog.askdirectory() ID = self.user_name.get() self.msg2.set('Saving files to the designated folder') # Get selected filenames index = self.name_list.curselection() select_files = [self.image_names[i] for i in index] single = check_multi_single(select_files) if single is True: filename = select_files[0] try: pro_img_obj, raw_img_obj, raw_img_name, \ pro_hist_obj, raw_hist_obj = get_image_pair(filename, ID) except ValueError: self.msg2.set(get_image_pair(filename, ID)) else: # Get Image metrics self.raw_metrics = client.image_metrics(ID, raw_img_name) self.pro_metrics = client.image_metrics(ID, filename) s = self.raw_metrics['size'] size = image_size(s) # display the raw and process image in GUI raw_img = ImageTk.PhotoImage(raw_img_obj.resize(size)) self.raw_img_label.configure(image=raw_img) self.raw_img_label.image = raw_img pro_img = ImageTk.PhotoImage(pro_img_obj.resize(size)) self.pro_img_label.configure(image=pro_img) self.pro_img_label.image = pro_img # display raw and process histogram in GUI raw_hist = ImageTk.PhotoImage(raw_hist_obj.resize([385, 450])) self.raw_hist_label.configure(image=raw_hist) self.raw_hist_label.image = raw_hist pro_hist = ImageTk.PhotoImage(pro_hist_obj.resize([385, 450])) self.pro_hist_label.configure(image=pro_hist) self.pro_hist_label.image = pro_hist # Save file to a designated folder full_name = savepath + '/' + filename + '.' + self.saveas.get() pro_img_obj.save(full_name) self.msg2.set('Image is saved successfully') else: download_multiple(select_files, savepath, ID, self.saveas.get()) self.msg2.set('Images are saved successfully ' 'in "processed_images.zip"')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_with_callback(self, url, path=None, filename=None, headers=None, force=False, func=None):", "def click_download_button(self):\n self._basket.click_download_button()", "def download_files(self):", "def download(self):\n pass", "def download(self):\n pass", "def download_file(self):\r\n file_path = filedialog.askopenfilename(initialdir=\"/\",\r\n title=\"Select a File\",\r\n filetypes=((\"Torrent files\", \"*.torrent\"), (\"all files\", \"*.*\")))\r\n\r\n if file_path:\r\n download_bar = DownloadProgressBar(self.scroll_frame.interior, file_path)\r\n download_bar.pack()\r\n download_bar.start()", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def download_file(self, parsed_event, input_dir_path):", "def download(self,**attrs):\n\t\treturn super().download(**attrs)", "def on_DownloadTools_clicked(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n rootdir = os.getcwd()\n print(f\"We will download all file in {rootdir}{os.sep}Download/ Directory\")\n self.OnlyDisplay(f\"autoDownloadGman {self.url}\")\n self.MainFile = autoDownloadGman(self.url)\n\n self.OnlyDisplay(f\"autoDeployGman {self.MainFile}\")\n autoDeployGman(self.MainFile)", "def download(all):\n print(\"Downloading\")", "def download():\n raise NotImplementedError", "def download_finish(self, cloud_file):", "def download(self, download_path):\n return", "def download_progress(self, cloud_file, size, downloaded):", "def download(ctx: click.Context, **kwargs):\n root_commands.cmd_download(ctx.obj, **kwargs)", "def download_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_cwrexport_change', args=(obj.id,))\n url += '?download=true'\n return mark_safe('<a href=\"{}\">Download</a>'.format(url))", "def download(self, *args, **kwargs):\n return wb.download(*args, **kwargs)", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def _(event):\n\t\t\tif event.target is not self.download_thread:\n\t\t\t\treturn\n\t\t\tself.download_thread = None\n\t\t\tif self.continued_failure >= len(mission_manager.get_all(\"view\", lambda m: m.state != \"FINISHED\")):\n\t\t\t\tprint(f\"連續失敗 {self.continued_failure} 次,停止下載\")\n\t\t\t\treturn\n\t\t\tself.start_download(continued=True)", "def onContentDownload(self, fetcher, numBytes): #$NON-NLS-1$\r", "def download(self, download_request):\n raise NotImplementedError", "def menu_download_blender(self, event=None):\n self.link('http://www.blender.org/download/get-blender')", "def keil_download(button_download=None,md5='',build=False,nircmd=None):\t\r\n\tU,T,N,F=py.importUTNF() \r\n\tfrom qgb import Win\r\n\tbutton_download=button_download or U.get(keil_download.__name__+'Download')\r\n\tbutton_build=\t\t\t\t\t U.get(keil_download.__name__+'Build')\r\n\tif not button_download or not button_build:\r\n\t\tes=get_keil_es()\r\n\t\tbs=[e for e in es if py.getattr(e,'friendlyclassname',0)=='Button']\r\n\t\t# if build:return es,bs\r\n\t\t# bs=[]\r\n\t\t# for i in range(9):\r\n\t\t\t# print(U.stime(),'wait bs',len(bs))\r\n\t\t\t# U.sleep(0.5)\r\n\t\t# if not bs:return es\r\n\t\tbutton_download=[e for e in bs if e.texts()==['Download']][0]\r\n\t\tbutton_build\t=[e for e in bs if e.texts()==['Build']][0]\r\n\t\t\r\n\tU.set(keil_download.__name__+'Download',button_download)\r\n\tU.set(keil_download.__name__+'Build',button_build)\r\n\t\r\n\tif md5:\r\n\t\t# md5=md5.replace(py.chr(0x0a),T.eol)\r\n\t\tms=[i for i in md5.splitlines() if '.elf' in i]\r\n\t\tmd5=ms[0][:32]\r\n\t\t\r\n\t\tt=button_download.parent().parent().parent().texts()[0]\r\n\t\tsp=T.subLast(t,'','\\\\')\r\n\t\tname=T.subLast(t,'\\\\','.uvprojx')\r\n\t\tif sp and name:\r\n\t\t\tsp=f'{sp}/Objects/{name}.axf'\r\n\t\t\tif md5==U.md5(file=sp):\r\n\t\t\t\timport win32gui\r\n\t\t\t\th=win32gui.GetForegroundWindow()\r\n\t\t\t\tbutton_download.click()\r\n\t\t\t\tU.nircmd('win activate stitle tmux')\r\n\t\t\t\tU.nircmd('win max stitle tmux')\t\r\n\t\t\t\t# for i in range(3):\r\n\t\t\t\t\t# print(Win.GetForegroundWindow())\r\n\t\t\t\t\t#win32gui.SetForegroundWindow(h)\r\n\t\t\t\t\t# U.sleep(0.5)\r\n\t\t\t\t\r\n\t\t\t\treturn [U.StrRepr('='*122+T.eol*3),'Success keil_download !',md5,sp,\r\n\t\t\t\th,get_title(h),\r\n\t\t\t\tU.stime(),U.StrRepr(T.eol*2+'='*122)]\r\n\t\t\t\r\n\t\treturn U.StrRepr('#'*122+T.eol*3),'check failed !!!',md5,sp,U.md5(file=sp),U.StrRepr(T.eol*2+'#'*122)\r\n\t\t\r\n\tif build:\r\n\t\t# print(U.stime(),button_build)\r\n\t\tbutton_build.click()\r\n\t\tprint(U.stime(),button_build)\r\n\t\tU.set('keil.log',U.stime())\r\n\t\tlog=''\r\n\t\twhile ' Error(s)' not in log:\r\n\t\t\tlog=get_keil_log(-10)\r\n\t\t\tU.sleep(0.6)\r\n\t\tif '- 0 Error(s)' not in log:\r\n\t\t\tprint(U.stime(),log)\r\n\t\t\tlog=get_keil_log()\r\n\t\t\tU.set('keil.log',log)\r\n\t\t\treturn py.No(log)\r\n\t\t\t\r\n\tbutton_download.click()\r\n\t\r\n\tif nircmd:\r\n\t\tU.nircmd('win','activate',*nircmd)\r\n\t\tU.nircmd('win','activate',*nircmd)\r\n\t\t\r\n\treturn button_download", "def open_download_delete_file(self):\n self._unable_open_option()\n self._tap_on_confirm_button(yes=True, msg=\"Delete button\")\n self._tap_on_confirm_button(yes=True, msg=\"Confirm Delete button\")", "def _(event):\n\t\t\tself.continued_failure = 0\n\t\t\t\n\t\t\tif event.target is not self.download_thread:\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tcmd = event.data.module.config.get(\"runafterdownload\")\n\t\t\tdefault_cmd = setting.get(\"runafterdownload\")\n\t\t\t\n\t\t\tcommands = []\n\t\t\t\n\t\t\tif cmd:\n\t\t\t\tcommands.append(cmd)\n\t\t\t\t\n\t\t\tif default_cmd and default_cmd not in commands:\n\t\t\t\tcommands.append(default_cmd)\n\t\t\t\n\t\t\tdef run_command():\n\t\t\t\tfor command in commands:\n\t\t\t\t\ttarget = quote(path_join(\n\t\t\t\t\t\tprofile(event.data.module.config[\"savepath\"]),\n\t\t\t\t\t\tsafefilepath(event.data.title)\n\t\t\t\t\t))\n\t\t\t\t\tif \"{target}\" in command:\n\t\t\t\t\t\tcommand = command.format(target=target)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcommand += \" \" + target\n\t\t\t\t\tprint(f\"run command: {command}\")\n\t\t\t\t\ttry:\n\t\t\t\t\t\tawait_(subprocess.call, command, shell=True) # nosec\n\t\t\t\t\texcept (OSError, subprocess.SubprocessError):\n\t\t\t\t\t\ttraceback.print_exc()\n\n\t\t\tasync_(run_command)", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def _return_file(self, btn):\n del btn\n if self.action:\n self.action(self.file)", "def button_download(args):\n cell_source = args[\"cell_source\"]\n repo = get_arg_or_fail(args[\"user\"], \"repo\", \"<org/name>\")\n repo_name = pathlib.Path(repo.split(\"/\")[1])\n docs_dir, rel_path = split_doc_path(args[\"path\"])\n\n if \"r1\" in rel_path.parts:\n return True # No download button for TF 1.x docs.\n\n # Buttons use OSS URLs.\n if str(docs_dir) == \"g3doc/en\":\n docs_dir = pathlib.Path(\"site/en\")\n\n this_url = urllib.parse.urljoin(\n \"https://storage.googleapis.com\",\n str(f\"tensorflow_docs/{repo_name}\" / docs_dir / rel_path))\n\n if is_button_cell_re.search(cell_source) and cell_source.find(this_url) != -1:\n return True\n else:\n fail(\n f\"Download button URL doesn't match: {this_url}\",\n fix=fix.regex_between_groups_replace_all,\n fix_args=[r\"(href.*)http.*?(\\\\\\\".*download_logo_32px.png)\", this_url])", "def download():\n \n browser.find_element_by_xpath('//*[@id=\"ctl00_contentPlaceHolder_divAllVariablesPerYear2012\"]/div[2]/div[2]/div[1]/a').click()", "def cb(complete,total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))", "def __handleDownload(self,block):\n self.file.write(block)", "def post_download(self, remote_files):\n pass", "def cb_new(self, button):\n print(\"New File callback\")\n self.cb_save_as(button)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download_link(self, handle):\n return None", "def download_file(self, filename: str, save_dir: str) -> None:\n raise NotImplementedError()", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def run(self):\n download(self.attempt)", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def onContentDownloadComplete(self, fetcher, connectionResp): #$NON-NLS-1$\r", "def cb(complete, total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))", "def download(self, url_match):\n pass", "def download():\n\treturn response.download(request, db)", "def download_link(request, job_id, filename):\n template_values = remote_view_util.fill_job_values(request, job_id)\n template_values = remote_view_util.fill_template_values(request, **template_values)\n template_values = catalog_view_util.fill_template_values(request, **template_values)\n template_values['title'] = 'Download area'\n template_values['file_name'] = filename\n return render_to_response('catalog/download_link.html',\n template_values)", "def _download(item):\n\n filename = item.filename()\n filename = os.path.join(item.vdir(), filename)\n logger.info(\"Downloading '%s' to %s\" % (item.show, filename))\n\n f = open(filename, \"wb\")\n\n buf = net.tivoget(item.show.url)\n for chunk in buf:\n f.write(chunk)\n\n f.close()\n\n item.downloaded = True\n item.save()", "def download_report(self):\n self.ensure_one()\n if self.attachment_id:\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/content/%s?download=true' % self.attachment_id.id,\n 'target': 'download',\n }\n return True", "def click_download_archive_of_fpos_link(self):\n self._basket.click_download_archive_of_fpos_link()", "def tool_dl2flc_clicked(self, widget, data=None):\n self._download_to_flc()", "def cb_save(self, button):\n print(\"Save File callback\")\n\n if self.filename:\n with open(self.filename, \"w\") as fout:\n fout.write(self.main_data)\n else:\n # If self.flename is blank then call the Save_As method.\n self.cb_save_as(button)", "def test_download(self):\n pass", "def job_download(self, job_id):\n\n target = QFileDialog.getExistingDirectory(self, 'Where to save the resulting files?')\n if target:\n paths = self.backend.job_result_download(job_id, target)\n info(self.iface, \"Successfully Downloaded to {}\".format(paths))", "def start_download(self) -> NoReturn:\n if self.threaded:\n self.threaded_download()\n else:\n self.regular_download()", "def download():\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)", "def export_file_complete_sig_handler(self):\n # Increment the index\n self.export_file_index += 1\n\n # Move the state\n self.scanFilesProgressBar.setValue(self.scanFilesProgressBar.value() + 1)\n\n # Check if we have exported all the files\n if self.export_file_index >= len(self.analzye_results):\n # Show a dialog box that everything is exported and complete\n QMessageBox.question(self.parent, \"Export Complete\", \"All files have been exported.\", QMessageBox.Ok)\n else:\n # Export the next file\n self.export_file(self.analzye_results[self.export_file_index])", "def exportCDLButton():\n \n # Grab the parent nodes current file string\n cdlfilename=nuke.thisNode().knob(\"cdlfile\").getValue()\n\n # Check file exists, ask if ok to overwrite then call relevant parsing modules dependant on selected \n # file's extension. \n if os.path.exists(cdlfilename) == True:\n cdlfileext=os.path.splitext(cdlfilename)[1].lstrip('.')\n # TODO: pop up panel to check overwrite ok\n \n if cdlfileext == 'ccc':\n # Simple colour correction container\n import J_Ops.J_3Way.parseCCC\n pass\n #J_Ops.J_3Way.exportCCC().parse(cdlfilename)\n\n elif cdlfileext == 'cdl': \n # Colour decision list\n import J_Ops.J_3Way.parseCDL\n pass\n #J_Ops.J_3Way.exportCDL().parse(cdlfilename)\n \n # Implement others here.\n \n else:\n nuke.message(\"Parser does not yet exist for filetype: \" + cdlfileext + \".\\n\"\n \"Check out the manual for information on implementing a parser inside the J_3Way framework\")\n \n return", "def file_download_link(filename):\n location = f\"/{UPLOAD_DIRECTORY}/{filename}\"\n return html.A(filename, href=location)", "def download_file():\n\n if 'POST' == request.method:\n file_id = request.form['file_id']\n else:\n file_id = request.args.get('file_id')\n\n # 1 ==> example_1.tgz\n file_path = file_manager.get_file_path_from_id(file_id)\n print \"serving file: \" + file_path\n return send_file(file_path, as_attachment=True)", "def open_download_keep_file(self):\n self._unable_open_option()\n self._tap_on_confirm_button(yes=False, msg=\"Keep file button\")", "def download_file(Bucket=None, Key=None, Filename=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def onContentDownloadStart(self, fetcher, contentLength): #$NON-NLS-1$\r", "def dowload_vt():\n print get_date_time_now() + \" ==> Download VT Samples started!\"\n print get_date_time_now() + \" ==> Nothing downloaded\"" ]
[ "0.7018457", "0.69721246", "0.69597185", "0.6953735", "0.6953735", "0.6816827", "0.6681164", "0.663355", "0.66132164", "0.65275913", "0.6489063", "0.6469115", "0.64611053", "0.6418321", "0.6399592", "0.637393", "0.6368756", "0.6359143", "0.63438714", "0.6293492", "0.62759054", "0.6265195", "0.62626225", "0.6254334", "0.6243844", "0.6231575", "0.62272936", "0.6140875", "0.61268973", "0.61003697", "0.60880375", "0.6083114", "0.6070835", "0.6027082", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.6025012", "0.5997854", "0.5986547", "0.5975167", "0.5975167", "0.5975167", "0.5975167", "0.5975167", "0.5975167", "0.5975048", "0.59696716", "0.59683603", "0.596577", "0.5964993", "0.5949334", "0.5927596", "0.59127784", "0.5876034", "0.58740395", "0.58682275", "0.5849485", "0.58345664", "0.5827955", "0.58267534", "0.5820645", "0.58074754", "0.579865", "0.5790439", "0.5788562", "0.5769855", "0.5752795", "0.5739805", "0.57271945" ]
0.6004468
68
Determine the image size of the images to be displayed in GUI The original width to height ratio will be preserved. Max width/height is set to be 300, and the other dimension will be adjusted accordingly.
def image_size(size): l_max = max(size) if l_max > 300: num = l_max/300 else: num = 1 w = round(size[0] / num) h = round(size[1] / num) new_size = [w, h] return new_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_size(self):", "def scale_widget_to_image_size(self):\n if self._image is not None:\n im = self._image.make_image()\n self.width = im.shape[1]\n self.height = im.shape[0]", "def format_img_size(self, img, C):\n img_min_side = float(C.im_size)\n (height,width,_) = img.shape\n\n if width <= height:\n ratio = img_min_side/width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side/height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def canvas_size(self):\r\n width = height = 0\r\n for image in self.images:\r\n x = image.x + image.absolute_width\r\n y = image.y + image.absolute_height\r\n if width < x:\r\n width = x\r\n if height < y:\r\n height = y\r\n return round_up(width), round_up(height)", "def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width", "def resizeImage(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n maxWidth = 300\n maxHeight = int(300 / ratio)\n else:\n maxWidth = int(300 / ratio)\n maxHeight = 300\n img = self.qIma.toImage().scaled(maxWidth, maxHeight, QtCore.Qt.KeepAspectRatio)\n return img", "def set_pic_size(self, im_name):\n im_vals = np.genfromtxt(im_name, delimiter=self.delim)\n self.pic_width = int(np.size(im_vals[0]) - 1) # the first column of ASCII image is row number\n try: self.pic_height = int(np.size(im_vals[:,0])) \n except IndexError: \n self.pic_width = int(np.size(im_vals) - 1)\n self.pic_height = 1\n self.create_rect_mask()\n return self.pic_width, self.pic_height", "def _get_image_size(self):\n return (3, 224, 224)", "def size(img):\n\treturn img.size", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def ratio(self):\n return float(self.max_width) / self.max_height", "def input_image_size(interpreter):\n _, height, width, channels = interpreter.get_input_details()[0]['shape']\n return width, height, channels", "def get_new_img_size(w, h, img_min_side = 600):\n if w <= h:\n f = float(img_min_side) / w\n resized_h = int(f * h)\n resized_w = img_min_side\n else:\n f = float(img_min_side) / h\n resized_w = int(f * w)\n resized_h = img_min_side\n \n return resized_w, resized_h", "def format_img_size(img, C):\n img_min_side = float(C.im_size)\n (height, width, _) = img.shape\n\n if width <= height:\n ratio = img_min_side / width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side / height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio", "def format_img_size(img, C):\n img_min_side = float(C.im_size)\n (height, width, _) = img.shape\n\n if width <= height:\n ratio = img_min_side / width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side / height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio", "def calculate_image_sizes(self, pil_image):\n # TODO this should look at the style on the img tag\n # resize the image so it fits on portrait page\n # A4 is just shy of 11906x16838\n # Taking out the margins\n # TODO since we are in portrait, just care about the width\n # which is 8306 twips, or 151,900,130,000 emus\n # which should be a little over 1400 pixels, but isn't\n width, height = pil_image.size\n if width > 800:\n ratio = float(800)/width\n width = int(width * ratio)\n height = int(height * ratio)\n return width, height", "def update_size(self):\n self.size = self.image.size\n self.width, self.height = self.size", "def _get_image_dimensions(self):\n\t\timageWidth = int(self.labels['IMAGE']['LINE_SAMPLES'])\n\t\timageHeight = int(self.labels['IMAGE']['LINES'])\n\t\treturn imageWidth, imageHeight", "def _image_resolution(image_filename):\n img = mpimg.imread(image_filename)\n return img.shape", "def camera_image_size():\n camera = GigE_camera(parameter(\"camera.IP_addr\"))\n width,height = camera.width,camera.height\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n orientation %= 360\n if orientation == 90 or orientation == 270: width,height = height,width\n return width,height", "def get_image_size(self, **kwargs):\n fov_height = np.abs(self.fov_pitch[1] - self.fov_pitch[0])\n fov_width = np.abs(self.fov_yaw[1] - self.fov_yaw[0])\n height = np.ceil(fov_height * self.res_pitch).astype(int)\n width = np.ceil(fov_width * self.res_yaw).astype(int)\n\n return height, width", "def img_scale(self):\n return min(400, abs(self.size))", "def size(self):\n return self._image_size", "def get_grid_size(self, img):\r\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\r\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\r\n return grid_height, grid_width", "def get_size_inches(self):\n width, height = self.figure.get_size_inches()\n bbox = self.get_position()\n width = width * abs(bbox.width)\n height = height * abs(bbox.height)\n return width, height", "def output_image_size(n_patches_x, n_patches_y, patch_size):\n width = n_patches_x * patch_size\n height = n_patches_y * patch_size\n return width, height", "def get_size(image):\n width, height = image.size\n\n return (width, height)", "def getSize(self):\n return self.__width * self.__height;", "def sizeHint(self):\n pixmap = self._pixmap\n if pixmap is not None:\n return pixmap.size()\n return super(QImageView, self).sizeHint()", "def size(self):\n if self._size and not self._pil_image:\n return self._size\n else:\n return self.pil_image.size", "def setImageDimensions(*args):", "def compute_size(requested_width, requested_height, rev_width, time_height):\n pic_width = 0\n pic_height = 0\n if (requested_width is not None and requested_height is not None):\n pic_height = requested_height\n pic_width = requested_width\n \n elif (requested_width is not None):\n pic_width = requested_width\n pic_height = pic_width * (float(time_height) / rev_width)\n \n elif (requested_height is not None):\n pic_height = requested_height\n pic_width = pic_height * (float(rev_width) / time_height)\n \n else:\n pic_height = 800\n pic_width = max(rev_width*3\n , pic_height * (float(rev_width) / time_height))\n \n return (pic_width, pic_height)", "def deside_figure_size(self):\n # HEIGHT >\n self.figure_height = FIGURE_HEIGHT\n\n av = self.canvas.height() / FIGURE_HEIGHT\n left_over = self.canvas.height() - (FIGURE_HEIGHT * math.floor(av))\n\n if left_over > av:\n self.figure_height += math.floor(left_over / math.floor(av))\n self.figure_height = int(self.figure_height)\n\n self.figure_height -= 3 # gives geometry.height() breathing room\n\n # WIDTH >\n self.figure_width = self.figure_height * 0.6\n av = math.floor(self.canvas.width() / self.figure_width)\n left_over = self.canvas.width() - (self.figure_width * math.floor(av))\n if left_over > av:\n self.figure_width += math.floor(left_over / math.floor(av))\n self.figure_width = int(self.figure_width)\n\n self.figure_width -= 3 # gives geometry.width() breathing room", "def resizePreview(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n width = 300\n height = int(float(width) / ratio)\n else:\n height = 170\n width = int(float(height) / ratio)\n if 'prodManager' in os.path.basename(self._ima):\n width = 300\n height = 170\n self.lPreview.setMinimumSize(width, height)\n self.lPreview.setMaximumSize(width, height)", "def _calc_figure_size(self):\n\n figheight_default = 6.125\n minsize = 0.025 * figheight_default # minimum size of an axis\n\n axheight = min(minsize, self.maxheight * figheight_default)\n\n w = 8. # inches\n # colorbar, gap, title + individual axes\n h = self.cb_height * figheight_default * 3. + self.get_n() * axheight\n\n return (w, h)", "def recommended_size(img_shape):\r\n new_width = 512\r\n new_height = img_shape[0] / img_shape[1] * 512\r\n new_height = round(new_height / 32) * 32\r\n return new_width, new_height", "def findWidthHeight():\n\n for f in os.listdir(\"%s/train/images/\" % args.dataset):\n if f.endswith(\".jpeg\"):\n imf = \"%s/train/images/%s\" % (args.dataset, f)\n try:\n im = Image.open(imf)\n except:\n print \"Could not open training image %s to read its size.\" %imf\n usage()\n break\n \n width = int(im.size[0])\n height = int(im.size[1])\n \n nwidth = width\n nheight = height\n if args.width:\n nwidth = args.width\n if args.height:\n nheight = args.height\n\n return width, height, nwidth, nheight, not(width == nwidth and height == nheight)", "def scale_image_to_frame(self, width, height):\n aspect = width / height\n # Image is landscape.\n if aspect >= 1:\n scale = self.popup_width / width\n return (self.popup_width, int(scale * height))\n # Otherwise image is portrait.\n scale = self.popup_width / height\n return (int(scale * width), self.popup_width)", "def get_image_sizes():\n widths = []\n heights = []\n\n from settings import folders_location\n for individual_folder_name in listdir(folders_location):\n individual_training_folder_path = folders_location + individual_folder_name + \"/training/\"\n\n image_paths = listdir(individual_training_folder_path)\n for image_path in image_paths:\n img = cv2.imread(individual_training_folder_path + image_path)\n\n height, width, channel = img.shape\n widths.append(width)\n heights.append(height)\n\n print(individual_training_folder_path + image_path)\n\n print(\"Min: %s, Max: %s\" % (np.min(widths), np.max(widths)))\n print(\"Average: %s\" % (np.average(widths)))\n\n return widths", "def _set_pixel_size(self) -> None:\n # Not Pansharpened images\n if self.band_combi == Sv1BandCombination.PMS:\n # TODO: manage default resolution for PAN band ?\n self.pixel_size = self._ms_res\n # Pansharpened images\n else:\n self.pixel_size = self._pan_res", "def resize(img, output_size=(350, 350)):\n\n if img and hasattr(img, \"url\"):\n image = Image.open(img)\n m_width = float(output_size[0])\n m_height = float(output_size[1])\n w_k = image.size[0] / m_width\n h_k = image.size[1] / m_height\n if output_size < image.size:\n if w_k > h_k:\n new_size = (m_width, image.size[1] / w_k)\n else:\n new_size = (image.size[0] / h_k, m_height)\n else:\n new_size = image.size\n new_size = tuple(map(int, new_size))\n return new_size\n return None", "def GetBestSize(self):\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())", "def size(self):\n return self.__image.size", "def checkFigureSize(self, figure):\n # scale factor\n f = 3\n width, height = self.getWidgetDims(figure)\n\n if (width < 100) and (height < 100):\n return figure.scaled(width*f, height*f, self.ratioOption, QtCore.Qt.SmoothTransformation)\n elif width<100:\n return figure.scaled(width*f, self.height, self.ratioOption, QtCore.Qt.SmoothTransformation)\n\n elif height<100:\n return figure.scaled(self.width, height*f, self.ratioOption, QtCore.Qt.SmoothTransformation)\n else:\n return figure.scaled(self.width, self.height, self.ratioOption, QtCore.Qt.SmoothTransformation)", "def maxSize():\n rect = pf.app.desktop().availableGeometry()\n maxh,maxw = rect.width(),rect.height()\n return maxh,maxw", "def _get_current_size(self, name):\n logger.debug(\"Getting size: '%s'\", name)\n if not self._previewtrain.get(name, None):\n return None\n img = self._previewtrain[name][1]\n if not img:\n return None\n logger.debug(\"Got size: (name: '%s', width: '%s', height: '%s')\",\n name, img.width(), img.height())\n return img.width(), img.height()", "def _SetSize(self, pixels = None):\n if not pixels:\n pixels = self.GetClientSize()\n self.canvas.SetSize(pixels)\n self.figure.set_size_inches(pixels[0]/self.figure.get_dpi(),\n pixels[1]/self.figure.get_dpi())", "def get_image_size(path : str):\n from PIL import Image\n im = Image.open(path)\n return im.size # W, H", "def get_height():\n return resize.transforms[1].size", "def size(self):\n return self.width, self.height", "def getimagesize(filename):\n img = Image.open(filename)\n (w,h) = img.size\n t = \"IMAGETYPE_%S\" % img.format\n a = \"width=\\\"%d\\\" height=\\\"%d\\\"\" % img.size\n return (w,h,t,a)", "def recommended_size(img_shape):\n new_width = 512\n new_height = img_shape[0] / img_shape[1] * 512\n new_height = round(new_height / 32) * 32\n return new_width, int(new_height)", "def _calc_fig_size(self, image_height, dpi, y_range, slice_window, fs):\n duration = slice_window / fs\n width_mm = duration * self.MM_IN_SEC\n height_mm = abs(y_range.max - y_range.min) * self.MM_IN_MV\n\n height_inch = image_height / dpi\n width_inch = height_inch * width_mm / height_mm\n\n return (width_inch, height_inch)", "def get_size(self) -> Tuple2IntType:\n return self.get_width(), self.get_height()", "def get_image_size(image_name='default.png'):\n\timg = Image.open(image_name)\n\treturn (img.width, img.height)", "def get_dimensions(view: View, path: str):\n\n # Allow max automatic detection and remove gutter\n max_width, max_height = view.viewport_extent()\n max_width *= 0.75\n max_height *= 0.75\n max_ratio = max_height / max_width\n\n # Get image dimensions\n try:\n width, height, _ = get_image_size(path)\n except UnknownImageFormat:\n return -1, -1\n\n # First check height since it's the smallest vector\n if height / width >= max_ratio and height > max_height:\n ratio = max_height / height\n width *= ratio\n height *= ratio\n elif height / width <= max_ratio and width > max_width:\n ratio = max_width / width\n width *= ratio\n height *= ratio\n\n return width, height", "def _figsize(profiles, height):\n shape = profiles.data.shape[1:]\n count = profiles.data.shape[0]\n hw_ratio = shape[1] / shape[0]\n width = height * hw_ratio * count\n return (width, 1.1 * height)", "def _resize_image(self, event):\n self.window_width = event.width\n self.window_height = event.height", "def getHeight(self):\n return _tkCall(self.image.height)", "def getImageSize(language=None):", "def imageScale(scale):\n\t\treturn max(1, int(scale * (InterfaceTools.getCanvasSize()[0] / height)))", "def GetDimensions(filename):\n with Image(filename=filename) as img:\n dimensions = (img.width, img.height)\n return(dimensions)", "def getGridSize(self):\n # This is set by the mosaic module, but other modules need to\n # know the values to take the proper size grid.\n return self.grid_size", "def getSize(self):\n return GDimension(frameWidth, frameHeight)", "def get_size(self, index):\n return self.image_sizes[index]", "def CalculateLineHeight(self):\r\n\r\n dc = wx.ClientDC(self)\r\n self._lineHeight = dc.GetCharHeight() \r\n\r\n if self._imageListNormal:\r\n \r\n # Calculate a self._lineHeight value from the normal Image sizes.\r\n # May be toggle off. Then CustomTreeCtrl will spread when\r\n # necessary (which might look ugly).\r\n n = self._imageListNormal.GetImageCount()\r\n\r\n for i in xrange(n):\r\n \r\n width, height = self._imageListNormal.GetSize(i)\r\n\r\n if height > self._lineHeight:\r\n self._lineHeight = height\r\n \r\n if self._imageListButtons:\r\n \r\n # Calculate a self._lineHeight value from the Button image sizes.\r\n # May be toggle off. Then CustomTreeCtrl will spread when\r\n # necessary (which might look ugly).\r\n n = self._imageListButtons.GetImageCount()\r\n\r\n for i in xrange(n):\r\n \r\n width, height = self._imageListButtons.GetSize(i)\r\n\r\n if height > self._lineHeight:\r\n self._lineHeight = height\r\n\r\n if self._imageListCheck:\r\n \r\n # Calculate a self._lineHeight value from the check/radio image sizes.\r\n # May be toggle off. Then CustomTreeCtrl will spread when\r\n # necessary (which might look ugly).\r\n n = self._imageListCheck.GetImageCount()\r\n\r\n for i in xrange(n):\r\n \r\n width, height = self._imageListCheck.GetSize(i)\r\n\r\n if height > self._lineHeight:\r\n self._lineHeight = height\r\n\r\n if self._imageListLeft:\r\n \r\n # Calculate a self._lineHeight value from the leftmost image sizes.\r\n # May be toggle off. Then CustomTreeCtrl will spread when\r\n # necessary (which might look ugly).\r\n n = self._imageListLeft.GetImageCount()\r\n\r\n for i in xrange(n):\r\n \r\n width, height = self._imageListLeft.GetSize(i)\r\n\r\n if height > self._lineHeight:\r\n self._lineHeight = height\r\n \r\n if self._lineHeight < 30:\r\n self._lineHeight += 2 # at least 2 pixels\r\n else:\r\n self._lineHeight += self._lineHeight/10 # otherwise 10% extra spacing\r", "def max_size(self):\n max_size = self.widget.maximumSize()\n return Size(max_size.width(), max_size.height())", "def set_size(self, width, height):\r\n \r\n self.image = pygame.transform.scale(self.image, (width, height))\r\n self.rect = self.image.get_rect()", "def _print_img_size(self, img):\n width, height = img.size\n print('{}, {}'.format(width, height))", "def calc_size(self):\r\n self.height = HEIGHT_STATUS", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def calc_thumbnail_size(img):\n width, length = img.size\n ratio = width / length\n\n # for some reason, if it's exactly 224, then thumnailed image is 223\n dim = 224 + 1 # output dim\n if ratio > 1:\n size = (dim * ratio, dim)\n else:\n size = (dim, dim / ratio)\n# print(size)\n return size", "def pix_size(self):\n return self._pix_size", "def picture_size(self):\n w,h=self.width,self.height\n if self.picture is None:\n return False\n else: \n size_pic,size_crop,margin_pic=self.picture.resize(width=w,height=h)\n self.size_pic = size_pic\n self.size_crop = size_crop\n self.margin_pic = margin_pic\n return True", "def storage_size( self ):\n if self.max_height+1 <= 8:\n return 1\n elif self.max_height+1 <= 16:\n \treturn 2\n else:\n return 3 # Max 24 pixels height", "def get_detector_size(self):\n sensor=self._get_sensor_info()\n return sensor.nMaxWidth,sensor.nMaxHeight", "def get_num_of_images(self):", "def height(self) -> int:\n return self._image_data.height", "def resize_image(self, width=200):\n self.new_width = width\n aspect_ratio = self.original_height/float(self.original_width)\n self.new_height = int(aspect_ratio * self.new_width)\n\n resized_image = self.image.resize((self.new_width, self.new_height), Image.BILINEAR)\n return resized_image", "def calculate_minimum_height_width(image_width, image_height, desired_width, desired_height):\n image_width, image_height = float(image_width), float(image_height)\n desired_width, desired_height = float(desired_width), float(desired_height)\n\n # resize the width and height to match the desired height, while maintaining ratio\n scaled_width = desired_height / image_height * image_width\n scaled_height = desired_height\n\n # if the new width is below the desired width, scale up to match width\n if scaled_width < desired_width:\n scaled_height = desired_width / scaled_width * scaled_height\n scaled_width = desired_width\n\n scaled_width, scaled_height = int(scaled_width), int(scaled_height)\n return scaled_width, scaled_height", "def get_image_size(frame) -> tuple:\n return tuple(frame.shape[1::-1])", "def updateSize(self, *args):\n width = self.width.get()\n height = self.height.get()\n self.initialXScale.config(to=width)\n self.initialYScale.config(to=height)\n # error check that state is not outside bounds\n for ball, state in self.ballStates.items():\n if state[0] > width:\n state[0] = width\n if state[1] > height:\n state[1] = height", "def autoscale(self, img: Image, max_height: int, max_width: int):\n height = img.bgr.shape[0]\n width = img.bgr.shape[1]\n\n diff_height = max_height / height\n diff_width = max_width / width\n\n diff = min(diff_height, diff_width)\n\n height = int(height * diff)\n width = int(width * diff)\n\n return img.rescale_image(height, width)", "def get_image_characteristics(self):\r\n self.image_height, self.image_width, self.image_channels = self.image.shape\r\n\r\n # Estimate the cell size to be around a ninth of the width of the screenshot area\r\n self.cell_size = int(self.image_width / 9) | 1\r\n\r\n # Cell size should be at most a ninth of the width and at least a twentieth of the width of the screenshot\r\n # Since a typical grid is 9x9, so it should be at most a ninth of the image width, and it shouldn't be too small\r\n self.min_cell_size = int(self.image_width / 20 * self.image_width / 20)\r\n self.max_cell_size = int(self.image_width / 9 * self.image_width / 9)", "def image_size(cls):\n return random.randint(250000, 80000000000)", "def _SizeCalculator(partition_size):\n # Minus footer size to return max image size.\n return partition_size - int(math.pow(partition_size, 0.95))", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def DoGetBestSize(self):\n best = wx.Size(0,0)\n if not self._windows:\n best = wx.Size(10,10)\n\n sashsize = self._GetSashSize()\n if self._orient == wx.HORIZONTAL:\n for win in self._windows:\n winbest = win.GetEffectiveMinSize()\n best.width += max(self._minimumPaneSize, winbest.width)\n best.height = max(best.height, winbest.height)\n best.width += sashsize * (len(self._windows)-1)\n\n else:\n for win in self._windows:\n winbest = win.GetEffectiveMinSize()\n best.height += max(self._minimumPaneSize, winbest.height)\n best.width = max(best.width, winbest.width)\n best.height += sashsize * (len(self._windows)-1)\n \n border = 2 * self._GetBorderSize()\n best.width += border\n best.height += border\n return best", "def actual_size(self, size, mode='normal', state='on'):\n qt_size = QSize(*size)\n qt_mode = MODE_MAP.get(mode, QIcon.Normal)\n qt_state = STATE_MAP.get(state, QIcon.On)\n size = self._qicon.actualSize(qt_size, qt_mode, qt_state)\n return (size.width(), size.height())", "def calculate_max_height_width(imgs):\n h_w_map = np.zeros((len(imgs), 2), dtype=np.int32)\n for index, img in enumerate(imgs):\n h_w_map[index, 0], h_w_map[index, 1], _ = img.shape\n max_val = h_w_map.argmax(axis=0)\n max_height, max_width = h_w_map[max_val[0], 0], h_w_map[max_val[1], 1]\n return max_height, max_width", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def _window_size(self):\n width = self.cv.winfo_width()\n if width <= 1: # the window isn't managed by a geometry manager\n width = self.cv['width']\n height = self.cv.winfo_height()\n if height <= 1: # the window isn't managed by a geometry manager\n height = self.cv['height']\n return width, height", "def getDimensions():", "def FlyResize( image, log_mess, nimages, method = Image.ANTIALIAS ):\n oldw, oldh = image.size\n resl = [8, 10, 14, 16, 20, 22, 24, 32, 40, 48, 64, 96, 128, 256]\n \n if oldw > 256 or oldh > 256:\n newsiz = min(resl, key = lambda x:abs(x - max(oldw, oldh)))\n image.thumbnail((newsiz, newsiz), method)\n neww, newh = image.size\n log_mess += ' and new size scaled = %s x %s' %(neww, newh)\n elif nimages > 1:\n log_mess += ' and size = %s x %s' %(oldw, oldh)\n \n return oldw, oldh, image, log_mess", "def image_size():\n return eval(subprocess(\"print camera_image_size()\"))", "def _image_resize_keep_ratio(self, image, max_w, max_h, crop=False):\n if crop:\n width, height = sizing.new_size_keep_aspect_ratio(image.size, (max_w, max_h), 'outer')\n image = image.resize((width, height), Image.ANTIALIAS)\n image = image.crop(sizing.new_size_by_croping(image.size, (max_w, max_h)))\n else:\n width, height = sizing.new_size_keep_aspect_ratio(image.size, (max_w, max_h), 'inner')\n image = image.resize((width, height), Image.ANTIALIAS)\n return image, image.size[0], image.size[1]", "def get_dimensions(image_path):\n with Image.open(image_path) as img:\n return img.size", "def get_image_size(fname):\r\n \r\n logging.debug('get_image_size({})'.format(fname))\r\n\r\n with open(fname, 'rb') as fhandle:\r\n head = fhandle.read(24)\r\n if len(head) != 24:\r\n return\r\n if imghdr.what(fname) == 'png':\r\n check = struct.unpack('>i', head[4:8])[0]\r\n if check != 0x0d0a1a0a:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n width, height = struct.unpack('>ii', head[16:24])\r\n elif imghdr.what(fname) == 'gif':\r\n width, height = struct.unpack('<HH', head[6:10])\r\n elif imghdr.what(fname) == 'jpeg':\r\n try:\r\n fhandle.seek(0) # Read 0xff next\r\n size = 2\r\n ftype = 0\r\n while not 0xc0 <= ftype <= 0xcf:\r\n fhandle.seek(size, 1)\r\n byte = fhandle.read(1)\r\n while ord(byte) == 0xff:\r\n byte = fhandle.read(1)\r\n ftype = ord(byte)\r\n size = struct.unpack('>H', fhandle.read(2))[0] - 2\r\n # We are at a SOFn block\r\n fhandle.seek(1, 1) # Skip `precision' byte.\r\n height, width = struct.unpack('>HH', fhandle.read(4))\r\n except Exception: #IGNORE:W0703\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n else:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n logging.debug('get_image_size - width, height = {}, {}'.format(width, height))\r\n return width, height", "def get_pixel_size(self):\n raise NotImplementedError" ]
[ "0.7428963", "0.72670555", "0.7153747", "0.7006132", "0.6961046", "0.69432396", "0.678366", "0.6766124", "0.6737722", "0.66931385", "0.6661223", "0.6660257", "0.6642184", "0.6614157", "0.66024864", "0.66024864", "0.65730685", "0.656699", "0.6548179", "0.6547853", "0.65275776", "0.6506463", "0.65021527", "0.64996475", "0.6485617", "0.6435002", "0.6407626", "0.63895375", "0.6358188", "0.63461334", "0.63334304", "0.6325478", "0.63250923", "0.6324781", "0.62895626", "0.6275207", "0.62720996", "0.62516594", "0.6246235", "0.6245509", "0.624483", "0.6240051", "0.622978", "0.6221994", "0.62176883", "0.6216795", "0.6210791", "0.6205821", "0.61972684", "0.6184422", "0.61843646", "0.61822355", "0.6175964", "0.6174342", "0.6173761", "0.6171146", "0.6130832", "0.61308205", "0.6116049", "0.6109189", "0.6093346", "0.6093344", "0.6093087", "0.60915995", "0.6059022", "0.605447", "0.60511255", "0.6044813", "0.6038955", "0.6034798", "0.60328686", "0.60321987", "0.60086846", "0.60086274", "0.60085666", "0.6001343", "0.599991", "0.5991928", "0.59904796", "0.5989432", "0.5983794", "0.5981038", "0.5971581", "0.5963932", "0.59522957", "0.59499437", "0.59444326", "0.59391534", "0.59384423", "0.59295255", "0.59244597", "0.59182715", "0.59101564", "0.59035206", "0.5893843", "0.5889003", "0.5877858", "0.5873497", "0.58714855", "0.5871065" ]
0.6792414
6
Communicate with server and database to get original and processed images, pre and postprocessing histogram.
def get_image_pair(filename, ID): r = client.get_image_file(ID, filename) try: check_r_type(r) except TypeError: return r else: pro_img_arr, method = client.get_image(r) pro_img = Image.fromarray(pro_img_arr) raw_img_name = filename.replace('_' + method, "") r_raw = client.get_image_file(ID, raw_img_name) try: check_r_type(r) except TypeError: return r else: raw_img_arr, _ = client.get_image(r_raw) raw_img = Image.fromarray(raw_img_arr) pro_hist = Image.fromarray(client.get_histogram(ID, filename)) raw_hist = Image.fromarray(client.get_histogram(ID, raw_img_name)) return pro_img, raw_img, raw_img_name, pro_hist, raw_hist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)", "def process(self, image):", "def client_handler(inbound_socket, addr, job_queue, result_queue):\n global last_pic\n print(inbound_socket)\n\n def draw_boxes(boxes):\n mask = Image.new('RGBA', picSize, (255, 255, 255, 0))\n d = ImageDraw.Draw(mask)\n fnt = ImageFont.truetype(p.FONT_PATH, 12)\n txt_offset_x = 0\n txt_offset_y = 20\n for box in boxes:\n p_coords = [box.coords[0]*picSize[0],\n box.coords[1]*picSize[1],\n box.coords[2]*picSize[0],\n box.coords[3]*picSize[1]]\n d.rectangle(p_coords, outline='red')\n print('drawing box at ', end='')\n # print([x for x in box.coords])\n textpos = (p_coords[0] - txt_offset_x, p_coords[1] - txt_offset_y)\n d.text(textpos, 'Class %s at %s confidence' %\n (box.classification, box.confidence), font=fnt, fill='red')\n\n return mask\n try:\n camera_socket = socket.socket()\n camera_socket.connect(('dronepi.local', 8000))\n camera_connection = camera_socket.makefile('rwb')\n\n client_connection = inbound_socket.makefile('rwb')\n image_stream = io.BytesIO()\n char_len = struct.calcsize('<c')\n long_len = struct.calcsize('<L')\n while True:\n t = time.time()\n command = struct.unpack('<c', client_connection.read(char_len))[0]\n t = time_op(t, 'recv command')\n if command != b'':\n if command == b'p':\n last_pic.save(image_stream,\n format='jpeg',\n quality=85,\n thumbnail=None)\n t = time_op(t, 'save pic')\n header = struct.pack('<L', image_stream.tell())\n client_connection.write(header)\n t = time_op(t, 'send header')\n # Rewind the stream and send the image data over the wire\n image_stream.seek(0)\n client_connection.write(image_stream.read())\n client_connection.flush()\n t = time_op(t, 'send pic')\n # reset stream\n image_stream.seek(0)\n image_stream.truncate()\n\n elif command == b'c':\n camera_connection.write(b'p')\n camera_connection.flush()\n t = time_op(t, 'send cam request')\n image_len_raw = camera_connection.read(long_len)\n image_len = struct.unpack('<L', image_len_raw)[0]\n t = time_op(t, 'recv header')\n if not image_len:\n print('Received image length of 0, quitting!')\n break\n # Construct a stream to hold the image data and\n # read the image data from the connection\n image_stream.write(camera_connection.read(image_len))\n t = time_op(t, 'recv pic')\n # Rewind the stream, open it as an image with PIL and\n # do some processing on it\n image_stream.seek(0)\n image = Image.open(image_stream)\n\n t = time_op(t, 'open pic & process')\n job_queue.put(image)\n job_queue.join()\n t = time_op(t, 'NN')\n\n image_stream.seek(0)\n image_stream.truncate()\n\n bboxes = result_queue.get(False)\n box_pickle = pickle.dumps(bboxes, protocol=3)\n pickle_size = len(box_pickle)\n t = time_op(t, 'pickle')\n client_connection.write(struct.pack('<L', pickle_size))\n client_connection.write(box_pickle)\n client_connection.flush()\n t = time_op(t, 'send pickle')\n\n last_pic = image\n\n elif command == b'd':\n camera_connection.write(b'p')\n camera_connection.flush()\n t = time_op(t, 'send cam request')\n image_len_raw = camera_connection.read(long_len)\n image_len = struct.unpack('<L', image_len_raw)[0]\n t = time_op(t, 'recv header')\n if not image_len:\n print('Received image length of 0, quitting!')\n break\n # Construct a stream to hold the image data and read\n # the image data from the connection\n\n image_stream.write(camera_connection.read(image_len))\n t = time_op(t, 'recv pic')\n # Rewind the stream, open it as an image with PIL and\n # do some processing on it\n image_stream.seek(0)\n image = Image.open(image_stream)\n\n t = time_op(t, 'open pic & process')\n job_queue.put(image)\n job_queue.join()\n t = time_op(t, 'NN')\n\n image_stream.seek(0)\n image_stream.truncate()\n\n bboxes = result_queue.get(False)\n\n box_count = len(bboxes)\n client_connection.write(struct.pack('<L', box_count))\n for box in bboxes:\n data = [box.coords[0],\n box.coords[1],\n box.coords[2],\n box.coords[3],\n box.confidence,\n box.classification]\n #print(data)\n client_connection.write(struct.pack('<ffffff',\n data[0],\n data[1],\n data[2],\n data[3],\n data[4],\n data[5]))\n client_connection.flush()\n t = time_op(t, 'send tuples')\n\n last_pic = image\n except:\n print('Error: %s' % sys.exc_info()[0], flush=True)\n print('Error: %s' % sys.exc_info()[1], flush=True)\n print('Error: %s' % sys.exc_info()[2], flush=True)\n client_connection.close()\n camera_connection.close()\n inbound_socket.close()\n camera_socket.close()\n return 0", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))", "def run(self):\n \n count = 0\n while True:\n self.__ticker.tick()\n\n self.update()\n img = self.get_new_image()\n if img == None:\n print \"not receiving images yet...\"\n else:\n if self.verbose:\n cv.ShowImage(\"SnapShotSaver\", img)\n cv.WaitKey(10)\n cv.SaveImage(\"%s/%s_%d.png\" % (self.destination, self.prefix, count), img)\n count += 1", "def make_reply(self,request,nreplies):\n #print(\"DummyPyWorker. Sending client message back\")\n self._log.debug(\"received message with {} parts\".format(len(request)))\n\n if not self.is_model_loaded():\n self._log.debug(\"model not loaded for some reason. loading.\")\n\n try:\n import torch\n except:\n raise RuntimeError(\"could not load pytorch!\")\n\n # message pattern: [image_bson,image_bson,...]\n\n nmsgs = len(request)\n nbatches = nmsgs/self.batch_size\n\n if not self._still_processing_msg:\n self._next_msg_id = 0\n\n # turn message pieces into numpy arrays\n img2d_v = []\n sizes = []\n frames_used = []\n rseid_v = []\n for imsg in xrange(self._next_msg_id,nmsgs):\n try:\n compressed_data = str(request[imsg])\n data = zlib.decompress(compressed_data)\n c_run = c_int()\n c_subrun = c_int()\n c_event = c_int()\n c_id = c_int()\n img2d = larcv.json.image2d_from_pystring(data,\n c_run, c_subrun, c_event, c_id )\n except:\n self._log.error(\"Image Data in message part {}\\\n could not be converted\".format(imsg))\n continue\n self._log.debug(\"Image[{}] converted: {}\"\\\n .format(imsg,img2d.meta().dump()))\n\n # check if correct plane!\n if img2d.meta().plane()!=self.plane:\n self._log.debug(\"Image[{}] is the wrong plane!\".format(imsg))\n continue\n\n # check that same size as previous images\n imgsize = (int(img2d.meta().cols()),int(img2d.meta().rows()))\n if len(sizes)==0:\n sizes.append(imgsize)\n elif len(sizes)>0 and imgsize not in sizes:\n self._log.debug(\"Next image a different size. \\\n we do not continue batch.\")\n self._next_msg_id = imsg\n break\n img2d_v.append(img2d)\n frames_used.append(imsg)\n rseid_v.append((c_run.value,c_subrun.value,c_event.value,c_id.value))\n if len(img2d_v)>=self.batch_size:\n self._next_msg_id = imsg+1\n break\n\n\n # convert the images into numpy arrays\n nimgs = len(img2d_v)\n self._log.debug(\"converted msgs into batch of {} images. frames={}\"\n .format(nimgs,frames_used))\n np_dtype = np.float32\n img_batch_np = np.zeros( (nimgs,1,sizes[0][1],sizes[0][0]),\n dtype=np_dtype )\n\n for iimg,img2d in enumerate(img2d_v):\n meta = img2d.meta()\n img2d_np = larcv.as_ndarray( img2d )\\\n .reshape( (1,1,meta.cols(),meta.rows()))\n\n img2d_np=np.transpose(img2d_np,(0,1,3,2))\n img_batch_np[iimg,:] = img2d_np\n\n # print(\"shape of image: \",img2d_np.shape)\n\n\n # now make into torch tensor\n img2d_batch_t = torch.from_numpy( img_batch_np ).to(self.device)\n # out_batch_np = img2d_batch_t.detach().cpu().numpy()\n # out_batch_np=np.transpose(out_batch_np,(0,1,3,2))\n\n print(\"shape of image: \",img2d_batch_t.shape)\n with torch.set_grad_enabled(False):\n out_batch_np = self.model.forward(img2d_batch_t).detach().cpu().numpy()\n out_batch_np=np.transpose(out_batch_np,(0,1,3,2))\n\n\n\n # compression techniques\n ## 1) threshold values to zero\n ## 2) suppress output for non-adc values\n ## 3) use half\n\n # suppress small values\n out_batch_np[ out_batch_np<1.0e-3 ] = 0.0\n\n # threshold\n # for ich in xrange(out_batch_np.shape[1]):\n # out_batch_np[:,ich,:,:][ img_batch_np[:,0,:,:]<10.0 ] = 0.0\n\n # convert back to full precision, if we used half-precision in the net\n\n self._log.debug(\"passed images through net. output batch shape={}\"\n .format(out_batch_np.shape))\n # convert from numpy array batch back to image2d and messages\n reply = []\n for iimg in xrange(out_batch_np.shape[0]):\n img2d = img2d_v[iimg]\n rseid = rseid_v[iimg]\n meta = img2d.meta()\n\n out_np = out_batch_np[iimg,0,:,:]\n # print(\"out_np\",type(out_np))\n # print(\"meta\",type(meta))\n out_img2d = larcv.as_image2d_meta( out_np, meta )\n bson = larcv.json.as_pystring( out_img2d,\n rseid[0], rseid[1], rseid[2], rseid[3] )\n compressed = zlib.compress(bson)\n reply.append(compressed)\n\n if self._next_msg_id>=nmsgs:\n isfinal = True\n self._still_processing_msg = False\n else:\n isfinal = False\n self._still_processing_msg = True\n\n self._log.debug(\"formed reply with {} frames. isfinal={}\"\n .format(len(reply),isfinal))\n return reply,isfinal", "def handle_result(self):\n if not self.scidb_connector.running and self.scidb_connector.result_queue.qsize() == 0:\n print >> sys.stdout, 'Stopping Image Loader...'\n return\n \n if self.scidb_connector.result_queue.qsize():\n try:\n print >> sys.stdout, 'Loading image...'\n start_time = time.clock()\n \n query_json = self.scidb_connector.result_queue.get(0)\n result = SciDB.SciDB_Result(query_json)\n \n # Make figure\n if query_json['request']['options']['iterative']:\n scatter = scalrr_vis.plot_image(result, ZSCALE, master=self.master, title=\"Iteration \" + str(len(self.iterations) + 1))\n self.master = result.data_array\n \n self.add_graph(scatter)\n else:\n self.master = None\n scatter = scalrr_vis.plot_image(result, ZSCALE, title=query_json['request']['query'])\n \n self.update_graph(scatter)\n \n end_time = time.clock() \n print >> sys.stdout, 'Done... ', end_time-start_time\n \n self.ui_root.after(500, self.handle_result)\n except Queue.Empty:\n pass\n \n else:\n self.ui_root.after(1000, self.handle_result)", "def handle(self):\n try:\n conn = sqlite.connect(\"temp.db\")\n while True:\n data = self.request.recv(48)\n if not data:\n break\n parts = struct.unpack(\"dddddd\", data)\n print_datapoint(parts)\n store_datapoint(conn, parts)\n except KeyboardInterrupt:\n pass\n finally:\n conn.close()", "def run(self):\n # Get data objects (in a dict) from the controller process \n dataDict = self.controller.recv()\n self.orderedStreams = dataDict['orderedStreams']\n\n ID = None\n data = None\n while self.clients:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n ID, data = result\n # Data sequence is unimportant, simply write it out and proceed\n self.writePairs(data)\n\n # Send updated data (stats mainly) via the pipe directly back to\n # the MPController object, close filehandles and finish up.\n self.updateObjectsToController()\n self.closeFileHandles()", "def image_server():\n yield from http_server_thread(ImageHandler)", "def btn_display_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n del image_fio\n del img_hist_fio\n self.show_as_waiting(False)", "def handler(self):\n msg = self.create_http_request()\n self.send(msg)\n\n if self.file_name == \"/\":\n self.file_name = \"/index.html\"\n else:\n self.file_name = HttpClient.create_file_location(self.file_name)\n\n if self.http_command == \"HEAD\":\n recv_raw, _ = self.recv_header()\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n elif self.http_command == \"PUT\":\n recv_raw = self.recv_all_data()\n if recv_raw != b'':\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n elif self.http_command == \"POST\":\n recv_raw = self.recv_all_data()\n if recv_raw != b'':\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n else: # http_command == \"GET\" or it is a bad request\n recv_raw = self.recv_all_data()\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n\n self.disconnect()\n print(\"[CONNECTION] Client terminated\")", "def prepare_images(self):\n\n qt_original_image = self.convert_image_to_QTformat(self.original_image)\n self.send_original_photo_to_gui.emit(qt_original_image)\n\n self.processed_image = self.procces_image(self.original_image)\n qt_processed_image = self.convert_image_to_QTformat(self.processed_image)\n self.send_processed_photo_to_gui.emit(qt_processed_image)", "def serve_inference_requests():\n global image_queue\n\n with tf.Session() as sess:\n while True:\n image_data = image_queue.get()\n\n tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]\n\n human_string = labels[top_k[0]]\n score = predictions[top_k[0]]\n logging.info('%s classified with score %.5f', human_string, score)\n\n emit_image = False\n if human_string != 'nothing':\n emit_image = True\n logging.debug('emitting image cause %s was detected', human_string)\n elif score <= config['inference']['threshold']:\n emit_image = True\n logging.debug('emitting image cause score %.5f is below threshold of %s',\n score, config['inference']['threshold'])\n else:\n logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',\n score)\n\n if emit_image:\n mqtt_publish(image_data)\n else:\n save_image(image_data)", "def process_data(request):\n mongodb = get_db() \n start_time = time.time()\n record_segments(mongodb)\n record_heatmaps(mongodb)\n result = sys._getframe().f_code.co_name, \"COMPLETED\", (time.time() - start_time), \"seconds\"\n print result\n return result", "def _inst_get_img_info_from_db(self):\r\n docs = self._mongo_api.inst_get_no_replied_data()\r\n if docs is None:\r\n print(\"No replied documents in instagram collection...\")\r\n else:\r\n for doc in docs:\r\n obj_id = doc['_id']\r\n post_id = doc['post_id']\r\n full_name = doc['full_name']\r\n img_url = doc['img_url']\r\n\r\n img = self._download_img_from_url(img_url)\r\n img = self._cnn_model.preprocess_img(img)\r\n prediction = self._cnn_model.predict(img)\r\n\r\n if self._inst_reply_post(post_id=post_id, full_name=full_name, prediction=prediction):\r\n self._mongo_api.inst_update_doc_after_replied(obj_id)\r\n print(\"Instagram post have answered...\")\r\n print(\"Instagram post have updated...\")\r\n else:\r\n print(\"Instagram post haven't replied...\")", "def new_live_image():\n\n parsed_args = live_parser.parse_args()\n image = parsed_args['image']\n filename = str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')[:-3]) + '.jpg'\n db_image = new_image(image, filename)\n\n #latest_clf = ClassifierStats.query.order_by(ClassifierStats.date.desc()).first()\n #if latest_clf:\n recognition_manager.add_image(db_image=db_image)\n return jsonify({'message': 'Image processed'}), 200\n #else:\n # return jsonify({'message': 'No classifier present!'}), 500", "def download_function(self):\n # Ask user for directory and user ID\n savepath = filedialog.askdirectory()\n ID = self.user_name.get()\n\n self.msg2.set('Saving files to the designated folder')\n\n # Get selected filenames\n index = self.name_list.curselection()\n select_files = [self.image_names[i] for i in index]\n\n single = check_multi_single(select_files)\n\n if single is True:\n\n filename = select_files[0]\n try:\n pro_img_obj, raw_img_obj, raw_img_name, \\\n pro_hist_obj, raw_hist_obj = get_image_pair(filename, ID)\n except ValueError:\n self.msg2.set(get_image_pair(filename, ID))\n else:\n # Get Image metrics\n self.raw_metrics = client.image_metrics(ID, raw_img_name)\n self.pro_metrics = client.image_metrics(ID, filename)\n\n s = self.raw_metrics['size']\n size = image_size(s)\n\n # display the raw and process image in GUI\n raw_img = ImageTk.PhotoImage(raw_img_obj.resize(size))\n self.raw_img_label.configure(image=raw_img)\n self.raw_img_label.image = raw_img\n\n pro_img = ImageTk.PhotoImage(pro_img_obj.resize(size))\n self.pro_img_label.configure(image=pro_img)\n self.pro_img_label.image = pro_img\n\n # display raw and process histogram in GUI\n raw_hist = ImageTk.PhotoImage(raw_hist_obj.resize([385, 450]))\n self.raw_hist_label.configure(image=raw_hist)\n self.raw_hist_label.image = raw_hist\n\n pro_hist = ImageTk.PhotoImage(pro_hist_obj.resize([385, 450]))\n self.pro_hist_label.configure(image=pro_hist)\n self.pro_hist_label.image = pro_hist\n\n # Save file to a designated folder\n full_name = savepath + '/' + filename + '.' + self.saveas.get()\n pro_img_obj.save(full_name)\n self.msg2.set('Image is saved successfully')\n\n else:\n download_multiple(select_files, savepath, ID, self.saveas.get())\n self.msg2.set('Images are saved successfully '\n 'in \"processed_images.zip\"')", "def __image_request_handler(self):\n self.__logger.info(\"Image Request Handling Thread started\")\n ticker = Ticker(2)\n while self._running:\n timeout = ticker.end_tick(False)\n try:\n task = self.__image_queue.get(True, timeout)\n except Queue.Empty:\n ticker.start_tick()\n continue\n\n # There is a task to process\n ticker.start_tick()\n source, connection = task\n\n # Check if the connection has been closed. If it was,\n # do not bother processing the request.\n if not connection.connected():\n self.__logger.info(\"Skipping request for image of source %s\" \\\n \" because requesting client disconnected\" \\\n % source)\n self.__image_queue.task_done()\n continue \n\n # Obtain new image\n error = \"No image available\"\n image = None\n mtime = time.time()\n if source in self.__video_modules:\n try:\n mtime, image = self.__get_image(source)\n except Exception as err:\n error = \"Obtaining image failed: %s\" % repr(err)\n else:\n error = \"Video source %s has not been started\" % source\n\n if connection.connected():\n if image:\n # Valid image was obtained\n img_str = image.tostring()\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'shape': (image.width, image.height),\n 'depth': image.depth,\n 'nChannels': image.nChannels}\n else:\n # An error occured, notify the vision module\n self.__logger.info(\"Failed to obtain image for source %s. \"\\\n \" Error message: %s\" % (source, error))\n img_str = \"\"\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'error': error}\n # Send the data to the vision module.\n if not connection.sendall(data, img_str):\n self.__logger.warning(\"Failed to send data to client. \" \\\n \"Probably disconnected\")\n else:\n self.__logger.info(\"Image of source %s obtained but not \" \\\n \"sending because requesting client \" \\\n \"disconnected\" % source)\n self.__image_queue.task_done()\n self.__logger.info(\"Image Request Handling Thread ended\")", "def process_image(self):\n pass", "def do_all(self):\r\n self.frame_gen.start()\r\n\r\n while True:\r\n msg = self.rec_queue.get()\r\n if msg[0] == 'sync':\r\n self.send_queue.put(('sync', time.time()))\r\n continue\r\n if msg[0] == 'finish':\r\n break\r\n if msg[0] != 'img':\r\n raise ValueError(f'strange msg: {msg}')\r\n\r\n frame_num = msg[1]\r\n time_ms = self.ms_per_frame * frame_num\r\n rawimg = self.frame_gen.generate_at(time_ms)\r\n self.img_queue.put((frame_num, rawimg))\r\n self.send_queue.put(('post', frame_num))\r\n rawimg = None\r\n\r\n self.frame_gen.finish()\r\n\r\n self.img_queue.close()\r\n self.rec_queue.close()\r\n self.send_queue.close()", "def run(self):\n self.sock = self.set_up_socket()\n if self.output_file is None:\n cv2.namedWindow('Video Preview')\n else:\n self.video_writer = cv2.VideoWriter(self.output_file, \n self.output_format, \n self.fps, \n self.output_size\n )\n start_time = time.time()\n self.loop()\n run_time = time.time() - start_time\n self.output_statistics(run_time)\n self.cleanup()", "def run(self):\r\n self.collect_data()", "def process(self):", "def process(self):", "def process(self):", "def _main_loop(self):\n while not rospy.is_shutdown():\n # Check for reconfiguration data\n if self._transfer_reconfigure_data is not None:\n # Copy reconfigure data from shared memory\n with self._transfer_reconfigure_data_mutex:\n reconfigure_data = deepcopy(self._transfer_reconfigure_data)\n self._transfer_reconfigure_data = None\n # Run vision reconfiguration\n self._configure_vision(*reconfigure_data)\n # Check for new image\n elif self._transfer_image_msg is not None:\n # Copy image from shared memory\n with self._transfer_image_msg_mutex:\n image_msg = self._transfer_image_msg\n self._transfer_image_msg = None\n # Run the vision pipeline\n self._handle_image(image_msg)\n # Now the first image has been processed\n self._first_image_callback = False\n else:\n try:\n self._rate.sleep()\n except rospy.exceptions.ROSTimeMovedBackwardsException:\n pass", "def process():", "def start(self) :\n\t\t# Create the image buffer \n\t\tself.histogram_buffer = PicoHarp.AllocateBuffer()\n\t\tp = PicoHarp(self.child_connection, self.histogram_buffer)\n\t\tp.start()\n\t\treturn p", "def run(self):\n # Get data objects (in a dict) from the controller process \n dataDict = self.controller.recv()\n self.orderedStreams = dataDict['orderedStreams']\n\n ID = None\n data = None\n output_compressed = set()\n output_normal = set()\n while self.clients:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n ID, data = result\n for pDict in data:\n if pDict['gzipped']:\n for filename in pDict['files']:\n output_compressed.add(filename)\n else:\n for filename in pDict['files']:\n output_normal.add(filename)\n for filename in pDict['files']:\n self.outputfiles.add(filename)\n \n self.integrateStats(data)\n self.Counter.value += len(data)\n\n # Now concatenate any output files together\n if self.heartbeat is not None:\n self.heartbeat.message(\"Beginning file block merging..\", True)\n\n fcount = 0\n blkavg = 0\n for extension in ('sam', \n 'pp.sam',\n '1.fastq',\n '2.fastq',\n 'pp.1.fastq',\n 'pp.2.fastq',\n 'sh.fastq',\n 'sh.pp.fastq'):\n fc,ba = self.concatenate(output_compressed, extension, do_gzip=True)\n fcount += fc\n blkavg += ba\n fc,ba = self.concatenate(output_normal, extension, do_gzip=False)\n fcount += fc\n blkavg += ba\n\n if self.heartbeat is not None and fcount > 0:\n self.heartbeat.message(\n \"Merged %d blocks (avg) in each of %d output files\" % \n (int(round(blkavg * 1.0 / fcount)), fcount), True)\n \n\n # Send updated data (stats mainly) via the pipe directly back to\n # the MPController object, close filehandles and finish up.\n self.updateObjectsToController()", "def handle_req( self, req ):\n start_time_handle = time.time()\n stamp = req.stamp.data\n\n cv_image = None\n for i in range(3):\n cv_image, fail = self.pop_image_by_timestamp(stamp)\n if cv_image is None and fail == 0:\n rospy.logerr(\"Unable find image swarm loop too slow!\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n else:\n if fail == 1:\n print(\"Wait 0.02 sec for image come in and re find image\")\n rospy.sleep(0.02)\n cv_image = self.pop_image_by_timestamp(stamp)\n else:\n break\n\n if cv_image is None:\n rospy.logerr(\"Unable to find such image\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n\n\n # print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\\ta=', req.a, '\\tt=', stamp )\n if len(cv_image.shape)==2:\n # print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'\n cv_image = np.expand_dims( cv_image, -1 )\n elif len( cv_image.shape )==3:\n pass\n else:\n assert False\n\n\n assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \\\n \"\\n[whole_image_descriptor_compute_server] Input shape of the image \\\n does not match with the allocated GPU memory. Expecting an input image of \\\n size %dx%dx%d, but received : %s\" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )\n\n ## Compute Descriptor\n start_time = time.time()\n i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]\n print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n\n # u = self.model.predict( i__image )\n with self.sess.as_default():\n with self.sess.graph.as_default():\n # u = self.model.predict( i__image )\n u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})\n\n print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )\n # print( '\\tinput_image.shape=', cv_image.shape, )\n # print( '\\tinput_image dtype=', cv_image.dtype )\n # print( tcol.OKBLUE, '\\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )\n # print( '\\tdesc.shape=', u.shape, )\n # print( '\\tdesc minmax=', np.min( u ), np.max( u ), )\n # print( '\\tnorm=', np.linalg.norm(u[0]) )\n # print( '\\tmodel_type=', self.model_type )\n\n\n\n ## Populate output message\n result = WholeImageDescriptorComputeTSResponse()\n # result.desc = [ cv_image.shape[0], cv_image.shape[1] ]\n result.desc = u[0,:]\n result.model_type = self.model_type\n print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n return result", "def run(self):\n # Wait for the 'shot' message ready\n self.wait_for_messages()\n # Send the initial states to the server\n self.send_shape_and_states()\n # Wait for the 'method' message ready\n self.wait_for_messages()\n\n # Send the measurement angles to the server\n for y in range(self.__depth):\n self.send_angle_bulks(y)\n\n # Obtain the measurement outcomes\n result = self.get_classical_output()[::-1]\n self.send_back(\n 'local',\n self.__wrap_shot_message(\n 'setResult',\n {'result': result, 'shot': self.__shots},\n )\n )", "def handle(self):\n self.request.recv(1024)\n self.request.sendall(pickle.dumps(self.server.lymphocytes_getter()))", "def capture_image(self, data={}):\n # call self.increment_count() after each image saved\n pass", "def Run(self, args):\n # Make sure we limit the size of our output\n if args.length > constants.CLIENT_MAX_BUFFER_SIZE:\n raise RuntimeError(\"Can not read buffers this large.\")\n\n data = vfs.ReadVFS(\n args.pathspec,\n args.offset,\n args.length,\n progress_callback=self.Progress)\n result = rdf_protodict.DataBlob(\n data=zlib.compress(data),\n compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)\n\n digest = hashlib.sha256(data).digest()\n\n # Ensure that the buffer is counted against this response. Check network\n # send limit.\n self.ChargeBytesToSession(len(data))\n\n # Now return the data to the server into the special TransferStore well\n # known flow.\n self.grr_worker.SendReply(\n result, session_id=rdfvalue.SessionID(flow_name=\"TransferStore\"))\n\n # Now report the hash of this blob to our flow as well as the offset and\n # length.\n self.SendReply(\n rdf_client.BufferReference(\n offset=args.offset, length=len(data), data=digest))", "def start(self) -> None:\n data = b\"\"\n while True:\n # while loop to get size of receiving data\n while len(data) < self.payload_size:\n packet = self.client_socket.recv(4 * 1024) # 4KB\n if not packet:\n break\n data += packet\n # counting size of sending data\n packed_msg_size = data[: self.payload_size]\n # if in first while loop there was download part of data, need to add it on start\n data = data[self.payload_size :]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n # receiving concrete data\n while len(data) < msg_size:\n data += self.client_socket.recv(4 * 1024)\n # getting all data for current state\n data_recv_pickled = data[:msg_size]\n # setting data to whats left for next state\n data = data[msg_size:]\n # unpickle what we got\n data_recv = pickle.loads(data_recv_pickled)\n # show image and if q pressed - stop\n cv2.imshow(\"RECEIVING VIDEO\", data_recv.frame)\n print(\n f\"[CLIENT] GOT IMAGE AT TIME: {data_recv.decision} | WITH PERCENTAGE: {data_recv.percentage}% | DELAY: {datetime.datetime.now() - data_recv.time_sended}\"\n )\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n # disconnect from server\n self.disconnect()", "def procesarImagen(frame):\n gris = convertirGris(frame)\n noruido = filtroBilateral(gris)\n mejorado = histograma(noruido) \n #mejorado=gris\n return mejorado", "def _handle_connection(self, conn):\n conn.serve_all()", "def histogram(self, ctr1, ctr2):\n # check for the current status of the viewer\n # (tiling, aligned by wcs)\n if self.run('tile', via='get') == 'yes':\n allframes = True\n frames = self.run('frame active', via='get').split()\n else:\n allframes = False\n frames = [self.run('frame', via='get')]\n if self.run('wcs align', via='get') == 'yes':\n cs = 'wcs'\n else:\n cs = 'image'\n\n # get any currently available regions\n all_regions = self.run(f'regions -system {cs}',\n allframes=allframes, via='get')\n if not allframes:\n all_regions = [all_regions]\n\n param = self.plot_parameters\n for frame in frames:\n log.info('')\n if allframes:\n self.run('frame ' + frame)\n # check for loaded data\n if not self._loaded_data():\n continue\n\n try:\n results = self.retrieve_data(ctr1, ctr2, photometry=False)\n except (ValueError, TypeError) as err:\n log.debug(f'Error in retrieving Frame {frame} data: {err}')\n continue\n fulldata = results['fulldata']\n data = results['data']\n wdw = results['window']\n hwcs = results['wcs']\n xctr = results['xctr']\n yctr = results['yctr']\n filename = results['filename']\n\n # get file and ext name if possible\n log.info(f'Frame {frame}: {filename}')\n\n log.info(f'Histogram at x={ctr1}, y={ctr2} '\n f'(in {cs} coordinates)')\n\n # get data from region mask or window\n mask = self._region_mask(cs, all_regions, xctr, yctr, hwcs)\n if mask is None:\n if param['window'] is None:\n log.info('Using the full image')\n reg_name = 'full image'\n short_reg_name = 'full'\n hist_data = fulldata\n else:\n log.info(f'Using the analysis window '\n f'(width: {wdw} pixels)')\n reg_name = f'{wdw} pixel window'\n short_reg_name = f'x={xctr:.0f} y={yctr:.0f} {wdw}pix'\n hist_data = data\n else:\n reg_name = 'DS9 region'\n short_reg_name = f'x={xctr:.0f} y={yctr:.0f} region'\n hist_data = mask.multiply(fulldata)\n if hist_data is None: # pragma: no cover\n # condition occasionally but unreliably encountered\n # in testing\n log.warning('Region is too small; skipping histogram')\n continue\n hist_data[hist_data == 0] = np.nan\n\n hist_data = hist_data.ravel()\n hist_minmax = (np.nanmin(hist_data), np.nanmax(hist_data),\n np.nansum(hist_data))\n hist_stats = (np.nanmean(hist_data),\n np.nanmedian(hist_data),\n np.nanstd(hist_data))\n nnan = np.isfinite(hist_data)\n clip_stats = stats.sigma_clipped_stats(hist_data[nnan])\n text_stats = [f'Total pixels: {np.sum(nnan)}',\n f'Min, max, sum: '\n f'{hist_minmax[0]:.5g}, {hist_minmax[1]:.5g}, '\n f'{hist_minmax[2]:.5g}',\n f'Mean, median, stddev: '\n f'{hist_stats[0]:.5g}, {hist_stats[1]:.5g}, '\n f'{hist_stats[2]:.5g}',\n f'Clipped mean, median, stddev: '\n f'{clip_stats[0]:.5g}, {clip_stats[1]:.5g}, '\n f'{clip_stats[2]:.5g}']\n for t in text_stats:\n log.info(t)\n\n title = f'Frame {frame}, x={xctr:.0f} y={yctr:.0f} in {reg_name}'\n l1 = f'F{frame} {short_reg_name}'\n hist_kwargs = {'bins': param['bin'], 'label': l1, 'alpha': 0.8}\n if param['hist_limits'] is not None:\n hist_kwargs['range'] = (param['hist_limits'][0],\n param['hist_limits'][1])\n new_hist = {'plot_type': 'histogram', 'args': [hist_data],\n 'kwargs': hist_kwargs}\n\n if param['separate_plots'] or len(self.histogram_data) < 1:\n # summary stat (mean, median, clipped mean, or clipped median)\n summary_stat = str(param.get('summary_stat', 'mean')).lower()\n if 'clip' in summary_stat:\n se = clip_stats[2]\n if 'median' in summary_stat:\n ss = clip_stats[1]\n ss_label = 'Clipped median'\n else:\n ss = clip_stats[0]\n ss_label = 'Clipped mean'\n else:\n se = hist_stats[2]\n if 'median' in summary_stat:\n ss = hist_stats[1]\n ss_label = 'Median'\n else:\n ss = hist_stats[0]\n ss_label = 'Mean'\n l2 = f'{ss_label} {ss:.3g} +/- {se:.3g}'\n\n overplots = [new_hist]\n vlines = [ss, ss - se, ss + se]\n vlabels = [l2, None, None]\n vstyles = ['-', ':', ':']\n for vdata, vlabel, vstyle in zip(vlines, vlabels, vstyles):\n overplots.append({'plot_type': 'vline',\n 'args': [vdata],\n 'kwargs': {'label': vlabel,\n 'color': 'gray',\n 'linewidth': 1,\n 'linestyle': vstyle}})\n overplots.append({'plot_type': 'legend', 'args': []})\n\n plot_data = {'args': [],\n 'kwargs': {'title': title,\n 'xlabel': 'Flux',\n 'ylabel': 'Count',\n 'colormap': param['color']},\n 'plot_kwargs': {},\n 'overplot': overplots}\n self.histogram_data.append(plot_data)\n else:\n # append new histogram to existing ones\n plot_data = self.histogram_data[-1]\n overplots = []\n for plot in plot_data['overplot']:\n if plot['plot_type'] == 'histogram':\n overplots.append(plot)\n overplots.append(new_hist)\n overplots.append({'plot_type': 'legend', 'args': []})\n plot_data['overplot'] = overplots\n plot_data['kwargs']['title'] = 'All histogram regions'\n\n if self.signals is not None:\n self.signals.make_histogram_plot.emit()", "def process(image):\n pass", "def _monitor(self):\n # while CONF.weight == 'bw':\n while True:\n self._send_echo_request()\n self.create_link_delay()\n # self.get_loss()\n self.stats['flow'] = {}\n self.stats['port'] = {}\n for dp in self.datapaths.values():\n self.port_features.setdefault(dp.id, {})\n self.link_loss.setdefault(dp.id,{})\n self._request_stats(dp)\n # refresh data.\n self.capabilities = None\n self.best_paths = None\n hub.sleep(setting.MONITOR_PERIOD)\n self.show_stat()", "def process_images(img_xy, img_z):\n logging.info(\"paired {} and {}\".format(img_xy.ts, img_z.ts))\n for item in xy_imgs:\n assert(item.ts >= img_xy.ts)\n for item in z_imgs:\n assert(item.ts >= img_z.ts)\n\n xy_data = np.asarray(img_xy.data, dtype='uint8')\n z_data = np.asarray(img_z.data, dtype='uint8')\n\n xy_tracker.run_tracking(xy_data)\n z_tracker.run_tracking(z_data)\n\n try:\n x, y1 = xy_tracker.get_avg().astype(float)\n z, y2 = z_tracker.get_avg().astype(float)\n msg = dict(x=x, y=y1, z=z)\n msg = json.dumps(msg)\n send_socket_msg(msg)\n except Exception:\n pass", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def run(self):\n\n try:\n self.parent.setEnabled(False)\n\n # Run DataLab processing; compute and write requested logger statistics and spectrograms\n if self.processing_mode == \"screening\":\n self.processing_hub.run_screening()\n elif self.processing_mode == \"integration\":\n self.processing_hub.run_ts_integration()\n\n # Emit processed results to outside worker to present in gui\n self.signal_screening_output_to_gui.emit(self.processing_hub)\n except ValueError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except TypeError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except ZeroDivisionError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except Exception as e:\n msg = \"Unexpected error during processing\"\n self.signal_error.emit(f\"{msg}:\\n{e}\\n{sys.exc_info()[0]}\")\n logging.exception(e)\n finally:\n self.parent.setEnabled(True)\n self.parent.statusbar.showMessage(\"\")", "def process(self):\n if len(self.inputs):\n self._process_input()\n while len(self.servers) > 0:\n self._process_input()\n self._write_file()", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def btn_display_color_hist_callback(self):\n self.show_as_waiting(True)\n ids = self.tbl_images.get_selected_ids()\n names = self.tbl_images.get_selected_names()\n\n for id, name in zip(ids, names):\n ret = api.get_single_image(id, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_fio = b64s_to_fio(ret['data'])\n img_hist_fio = img_proc.fio_color_hist_fio(image_fio)\n self.img_displayer.new_display(\n img_hist_fio, name + ' Histogram')\n self.show_as_waiting(False)", "def raw_image_callback(self, msg):\n if self.pictures_to_take and not self.detection_to_receive:\n self.pictures_to_take -= 1\n # so let's analyse it here and then delete the subscription\n rows = msg.height\n step = msg.step\n cols = msg.width\n dim = int(step / cols)\n pixels = msg.data # of size (steps, nrows)\n # save the image (later we will need to analyse it)\n vision_utils.save_picture(pixels, rows, cols, dim, self.name, FOLDER)", "def process(self):\n pass", "def run(self):\n\t\tlogger.info(\"Uploading data... @ %f, PID: %d\" % (time.time(), os.getpid()))\n\n\t\tself.dump_db()", "def __do_analysis(self):\n #Step 1: connect to mongodb and pick a streamer\n dbclient = db_connect.DBClient()\n streamer_data = dbclient.analyze_number_of_stream_viewers(self.streamer)\n streamer_messeges_data = dbclient.analyzeStream(self.streamer)\n\n timearr = []\n messagesarr = []\n streamer_timearr = []\n num_chattersarr = []\n\n #create time and messages array for plotting purposes\n for entry in streamer_messeges_data:\n timearr.append(entry['start_time'])\n messagesarr.append(entry['messeges_count'] * entry['messeges_count'])\n #print(entry['start_time'])\n\n #create time and chatters array for plotting purposes\n for entry in streamer_data:\n streamer_timearr.append(entry['deltatime_from_start_of_clip'])\n num_chattersarr.append(entry['num_viewers'])\n\n # print('start time: ' + str(timearr[0]))\n # print('end time: ' + str(timearr[-1]))\n # print('duration: ' + str(timearr[-1] - timearr[0]))\n # print('average views/min = ' + str(sum(messagesarr) / len(messagesarr)))\n\n average_message_count = sum(messagesarr) / len(messagesarr)\n\n averagearr = []\n plotting_time_arr = []\n labelarr = []\n\n for i in range(len(timearr)):\n averagearr.append(average_message_count*1.8)\n #print(str(timearr[i]) + ' converts to ' + str(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i]))\n plotting_time_arr.append(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i])\n labelarr.append(str(i))\n\n plotting_streamer_timearr = []\n for i in range(len(streamer_timearr)):\n plotting_streamer_timearr.append(datetime.datetime(2020, 1, 1, 0, 0) + streamer_timearr[i])\n\n #plot messages and cuttoff\n messeges_over_time_fig = pyplot.figure(1)\n messeges_over_time_fig.set_figheight(15)\n messeges_over_time_fig.set_figwidth(30)\n messeges_over_time_fig.suptitle(self.streamer + \"'s video data\")\n messeges_over_time_sub = messeges_over_time_fig.add_subplot(211)\n\n pyplot.plot(plotting_time_arr,messagesarr,label='messages/min')\n dots = pyplot.plot(plotting_time_arr,messagesarr,'bo',label='messages/min')\n\n #label dots\n count = 0\n last_entry_was_above_line = False\n for i in range(len(plotting_time_arr)):\n #print(str(count) +': comparing ' + str(messagesarr[i]) + ' with ' + str(averagearr[i]))\n if(messagesarr[i] > averagearr[i]):\n if(last_entry_was_above_line):\n #Don't increment the count because this is part of the same clip\n count = count\n else:\n #new clip above the line, increment clip count\n count = count + 1\n messeges_over_time_sub.annotate(count,xy=(plotting_time_arr[i],messagesarr[i]))\n last_entry_was_above_line = True\n else:\n last_entry_was_above_line = False\n # messeges_over_time_sub.annotate('NA',xy=(plotting_time_arr[i],messagesarr[i]))\n\n #finish plotting\n pyplot.plot(plotting_time_arr, averagearr,'',label='average')\n pyplot.gcf().autofmt_xdate()\n pyplot.ylabel('Messeges*Messeges')\n pyplot.xlabel('Time')\n\n viewers_over_time_sub = messeges_over_time_fig.add_subplot(212)\n\n pyplot.plot(plotting_streamer_timearr,num_chattersarr,label='num chatters')\n pyplot.ylabel('Chatters')\n pyplot.xlabel('Time')\n\n pyplot.tight_layout()\n pyplot.savefig(output_file_location+self.streamer+'.png')\n print('saved chart to ' + output_file_location+self.streamer+'.png')\n # pyplot.show()\n return average_message_count, streamer_messeges_data", "def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)", "def connectionMade(self):", "def analyze(self):\n # turn off all indicator lights\n self._stop_all()\n \n # run, but catch exceptions and abort if necessary\n try:\n # setup\n self.analysis_led[1].blink\n ims_left = self.num_images\n fluid_left = True\n \n data_session = Data(self.data_path)\n \n # run motor & imaging\n while self.power.update() and ims_left > 0:\n # run pump\n self.motor.run(self.pump_runtime)\n \n if not self.power.update():\n break\n \n # image\n time.sleep(self.rest_time)\n self.cam_led.on\n self.camera.capture()\n data_session.fetch_data()\n self.cam_led.off\n \n # subtract from remaining images every cycle\n # if the fluid sensor turns off, set remaining\n # images to the maximum possible remaining\n ims_left -= 1\n if fluid_left and \\\n not self.fluid.update() and \\\n ims_left > self.samps_after_sensor_off:\n fluid_left = False\n ims_left = self.samps_after_sensor_off\n \n # change indicator lights, given complete or power off\n if ims_left == 0:\n # set analysis to green\n self.analysis_led[1].off\n self.analysis_led[0].on\n else:\n # set analysis to solid red\n self.analysis_led[1].on\n \n # transmit data whether or not power switched off\n self.data_led.blink\n data = data_session.prepare_broadcast()\n broadcast_session = Broadcast(self.peer_ip)\n broadcast_session.broadcast_data(data)\n self.data_led.off\n \n except:\n # turn on error indicator and turn off all else\n # do not transmit data\n self._stop_all()\n self.error.on", "def callback(self,data):\n self.cvtImage(data)\n\n \"\"\" Do some image processing; flip, resize, and etc\"\"\"\n self.imgProcessing()\n\n \"\"\" displaying an OpenCV image \"\"\"\n cv2.imshow(self.cv_window_name, self.cv_image)\n cv2.waitKey(1)\n# ------------------------------------------------------------------------------\n\n try:\n \"\"\" coverting the uint8 OpenCV image to ROS image data \"\"\"\n \"\"\" Publisher.publish() -- explicit way \"\"\"\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(self.cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=hallo user=hallo password=hallo\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()", "def post_handler(_dev_idx, raw_res, _captured_frames_path):\n global CUR_IMAGE\n if CUR_IMAGE % 2:\n print(\"\\nFox classification\")\n else:\n print(\"\\nCat classification\")\n\n scores, _labels = postprocess(raw_res[0])\n top_indices(scores[0], 3)\n\n CUR_IMAGE += 1", "def get_image_stats(image, out_dir, cur_file):\n # Output directory\n output_base = osp.join(out_dir, cur_file.split('.')[0])\n os.mkdir(output_base)\n # Print dimensions of the image\n width, height, color = image.shape\n print('The resolution of the image if of {}x{}x{}'.format(width,\n height,\n color))\n print('Total of {} pixels'.format(width * height * color))\n\n # Get histogram\n print('Calculating histogram')\n flat_img = image.mean(axis=2).flatten()\n counts, bins = np.histogram(flat_img, range(257))\n plt.bar(bins[:-1], counts, width=1, edgecolor='none')\n output_file = osp.join(out_dir, output_base, 'histogram.png')\n plt.xlabel('Intensidad')\n plt.ylabel('Número de pixeles')\n print('Saving histogram')\n plt.savefig(output_file, bbox_inches='tight')\n plt.close()\n\n # LAB space\n lab_image = cv2.cvtColor(image[8000:8500, 8000:8500, :], cv2.COLOR_BGR2LAB)\n output_file = osp.join(out_dir, output_base, 'lab.png')\n cv2.imwrite(output_file, lab_image)\n output_file = osp.join(out_dir, output_base, 'original.png')\n cv2.imwrite(output_file, image[8000:8500, 8000:8500, :])", "def gen(self):\n\n # context = zmq.Context()\n # receiver = context.socket(zmq.PULL)\n self.receiver.connect(inference_url())\n\n while self.is_opened:\n ret = self.receiver.recv_pyobj()\n\n nparr = np.frombuffer(np.array(ret['data']), np.uint8)\n\n # logger.warning('Receive: %s', ret['ts'])\n # logger.warning('Time elapsed: %s', (time.time()-self.keep_alive))\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # ret2 = receiver.recv_pyobj()\n # logger.warning(ret2['ts'])\n # logger.warning(ret2['shape'])\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n cv2.imencode('.jpg', img)[1].tobytes() + b'\\r\\n')\n self.receiver.close()", "def process(self):\n return self.output_image", "def main():\n # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer\n # compression time. If read raw images during training, use 0 for faster IO speed.\n\n # create output folders if they don't already exist\n for dir in [save_folder, save_mask_folder,save_hist_plot_folder]:\n if dir != None:\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('mkdir [{:s}] ...'.format(dir))\n\n else:\n # print('Folder [{:s}] already exists. Exit...'.format(save_folder))\n # sys.exit(1)\n pass # uncomment above two lines for ease of working, if necessary\n\n img_list = []\n for root, dirsfoo, file_list in sorted(os.walk(input_folder)): # +'/*SR.tif'\n for x in file_list: # assume only images in the input_folder\n if x.endswith(\"SR.tif\"):\n path = os.path.join(root, x) \n img_list.append(path)\n break\n # img_list = ['/data_dir/Scenes/20190619_191648_25_106f_3B_AnalyticMS_SR.tif'] # for testing\n def update(arg):\n pbar.update(arg)\n # img_list=img_list[238:270] # for testing\n pbar = ProgressBar(len(img_list))\n pool = Pool(n_thread) # (n_thread)\n for path in img_list:\n if input_mask_folder==None:\n path_mask=None\n else:\n path_mask=name_lookup(path) # lookup mask path\n pool.apply_async(worker,\n args=(path, save_folder, crop_sz, step, thres_sz, compression_level, path_mask, save_mask_folder),\n callback=update)\n pool.close()\n pool.join()\n print('All subprocesses done.')", "def _image_callback(self, msg):\n\n try:\n cv_image = self.bridge.imgmsg_to_cv2(msg, \"bgr8\")\n dil_size = self._sliderDil.value()\n eros_size = self._sliderEros.value()\n\t self.cv_image = self._image_widget.calc_bbox(cv_image, dil_size, eros_size)\n self.image = self._image_widget.set_image(cv_image)\n\n if self.save:\n\t\tif self.counter == 5:\n \t self.numImg += 1\n \t self._imgNum_label.setText(str(self.numImg))\n \t self.store_image(self._image_widget.get_image(), self._image_widget.get_bbox(), self.cls_id, self._image_widget.get_mask())\n\t\t self.counter = 0\n\t \telse:\n\t\t self.counter += 1\n except CvBridgeError as e:\n rospy.logerr(e)", "def pipeline(self):\n\n self._get_data()\n self._upload_to_raw()", "def main():\n # Set up socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('localhost', 12345))\n dat = b''\n dataSegement = [0] * 5\n\n while True:\n seg, addr = s.recvfrom(MAX_DGRAM)\n print(\"type: \", type(seg))\n chunk_number = struct.unpack(\"B\", seg[0:1])[0]\n if chunk_number > 1:\n print(\"chunk_number: \", chunk_number)\n dat += seg[1:]\n else:\n dat += seg[1:]\n img = cv2.imdecode(np.frombuffer(dat, dtype=np.uint8), 1)\n cv2.imwrite(\"image/4k_image_sample_compressed.jpg\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n dat = b\"\"", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def execute(self):\n self.advertise_and_connect()\n self.display_data()\n self.disconnect()", "def run(self):\n\n im = None\n while im == None:\n im = self.vid_mem_reader.get_latest_image()\n if im == None:\n print \"not receiving images yet...\"\n time.sleep(0.2)\n\n #Wait for video source to be ready:\n #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):\n #res = self.vid_mem_reader.get_resolution()\n \n #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image \n #(while if you print it, it seems to be a IPL image).\n #print im\n size = cv.GetSize(im[0])\n #print size\n self.res = ({'width':size[0], 'height':size[1]})\n res = self.res\n\n self.transformer = util.speed_angle.SpeedAngle(None, res['width'], res['height'])\n \n while True:\n self.__ticker.tick()\n start_time = time.time()\n img = self.get_new_image()\n ''' Parallel Process Inside this module\n \n im = np.asarray(img[:,:])\n time_spent = time.time() - start_time\n \n #Parallel process\n \n self.parallel_rotate_image(im)\n self.logger.debug(\"Set one finished\")\n \n print \"Image Length: \", self.rotatedImages\n for img in self.rotatedImages:\n self.get_faces(img[0])\n self.update()\n \n self.rotatedImages = []\n '''\n im = np.asarray(img[:,:])\n \n image = self.rotate_image( im, [self.rotation])\n self.get_faces(image)\n self.update()\n\n #TODO: To be removed and or configurable:\n directory = \"/tmp/emergency/\"\n if not os.path.exists(directory):\n os.makedirs(directory) \n try:\n cv.SaveImage(directory + \"image.png\", image)\n except:\n print \"ERROR: Could not write image to /tmp/emergency/\"", "def run(self):\n\n # Get data objects (in a dict) from the controller process \n dataDict = self.controller.recv()\n self.orderedStreams = dataDict['orderedStreams']\n orderedID = 0\n\n# TO DO - perhaps treat empty results differently? Place the ID in a \"discard\"\n# list and do the stats update immediately. Then when incrementing the \n# orderedID, if the updated value is in the discard list, increment again..\n # Begin procesing results queue\n ID = None\n data = None\n c = 0\n while self.clients:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n c += 1\n ID, data = result\n while self.clients and ID != orderedID:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n c += 1\n self.cacheIDX.append(ID)\n self.cache.append(data)\n ID, data = result\n\n # Data is next in sequence, write it out and proceed\n self.writePairs(data)\n orderedID += 1\n while orderedID in self.cacheIDX:\n idx = self.cacheIDX.index(orderedID)\n self.cacheIDX.pop(idx)\n data = self.cache.pop(idx)\n self.writePairs(data)\n orderedID += 1\n\n # Processing is completed but the cache may not be empty. Drain it\n # now (it should contain any missing objects at this point)\n if len(self.cacheIDX):\n while orderedID in self.cacheIDX:\n idx = self.cacheIDX.index(orderedID)\n self.cacheIDX.pop(idx)\n data = self.cache.pop(idx)\n self.writePairs(data)\n orderedID += 1\n\n # Send updated data (stats mainly) via the pipe directly back to\n # the MPController object, close filehandles and finish up.\n self.updateObjectsToController()\n self.closeFileHandles()", "def image_cb(self, msg):\n rospy.logdebug(\"TLDetector.image_cb\")\n self.__has_image = True\n self.__camera_image = msg\n\n cv_image = self.__bridge.imgmsg_to_cv2(msg, \"bgr8\")\n light_wp, state = self.__process_traffic_lights()\n if self.__mode == LABEL_MODE and not self.__classification_done and state != 4:\n self.__classification_done = self.__light_classifier.save_image(\n cv_image, state\n )\n if self.__classification_done:\n rospy.loginfo(\"TLDetector.image_cb: Done generating labels.\")\n\n \"\"\"\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n \"\"\"\n self.__publish_traffic_light_state(light_wp, state)", "def main():\n\n # Configure logging\n module = os.path.basename(__file__).strip('.py')\n setup_logging(module)\n\n # Parse arguments\n args = parse_args()\n\n database_interface.base.metadata.create_all()\n\n # Query the outputs table for a list of lightcurves\n lightcurves = get_lightcurves(args.product_type)\n\n # For each lightcurve, compute statistics and update the database\n logging.info('{} datasets to process'.format(len(lightcurves)))\n for dataset in lightcurves:\n logging.info('Processing {}'.format(dataset))\n stats_dict = get_stats(dataset)\n update_stats_table(stats_dict, dataset)\n\n logging.info('Processing complete')", "def do_command(self, args):\n imageops = dbops.Images()\n imageops.state(args)", "def main():\n\n # Load the API credentials\n with open('./flickr_api.txt') as f:\n keys = yaml.safe_load(f)\n\n # Set the API credentials\n flickr = flickrapi.FlickrAPI(keys['key'], keys['secret'])\n\n # Load the data\n df = pd.read_csv('./beauty-icwsm15-dataset.tsv', sep=\"\\t\", index_col=False)\n total_images = df.shape[0] * 1.0\n df['downloaded'] = None\n\n query_counter = 0.0\n for i, photo_id in enumerate(df['#flickr_photo_id']):\n if query_counter % 100.0 == 0:\n print(str(i) + '/' + str(total_images) + ' images (i.e. ' +\n str(np.round(i / total_images, 3) * 100) + \"%) complete.\")\n time.sleep(15)\n path = OUTPUT_FOLDER + str(photo_id) + \".jpg\"\n if os.path.exists(path):\n df.ix[i, 'downloaded'] = True\n continue\n try:\n query_counter += 1.0\n photo_response = flickr.photos.getInfo(photo_id=photo_id)\n download_photo(photo_id, photo_response)\n df.ix[i, 'downloaded'] = True\n except flickrapi.exceptions.FlickrError:\n df.ix[i, 'downloaded'] = False\n continue\n\n df.to_csv('./download_summary.tsv', sep=\"\\t\", index=False)", "def beforeLoop(self):\n\t\tself.loadInputFiles()\n\t\tself.loadOutputFile()\n\t\tself.addHistogram1D(\"True_Enu\", \"True Neutrino Energy [GeV]\", 100, 0.0, 10.0)#Histogram of neutrino energy\n\t\tself.addHistogram1D(\"True_Enu_Delta\", \"True Neutrino Energy from Delta producing events [GeV]\", 100, 0.0, 10.0)#Histogram of neutrino energy from Delta producing events\n\t\tself.addHistogram1D(\"Interaction_Mode_Delta\", \"NEUT interaction codes of Delta producing events\", 53, 0, 53)#Histogram of interaction modes of Delta producing events\n\t\tself.addHistogram2D(\"Vertex_Location_XY\", \"Location of interaction vertices in the X-Y plane of the detector\",100,-3000,3000,100,-3000,3000)#Histogram of vertex location in XY plane\n\t\tself.addHistogram2D(\"Vertex_Location_YZ\", \"Location of interaction vertices in the Y-Z plane of the detector\",100,-3000,3000,100,-3000,3000)#Histogram of vertex location in YZ plane\n\t\tself.addHistogram1D(\"True_Enu_Delta_inFGD\", \"Neutrino energies of FGD Delta producing events (GeV)\",100,0,10)#Histogram of neutrino energy of Deltas produced in the FGD\n\t\tself.addHistogram1D(\"Delta_Momentum\", \"Momentum of Delta baryons (GeV/c)\",100,0,5)#Histogram of neutrino energy of Deltas produced in the FGD\n\t\tself.addHistogram1D(\"Proton_Momentum\", \"Momentum of Protons from Delta decays (GeV/c)\",100,0,4)#Histogram of proton momentum from Delta decays\n\t\tself.addHistogram1D(\"Pion_Momentum\", \"Momentum of Pions from Delta decays (GeV/c)\",100,0,4)#Histogram of pion momentum from Delta decays", "def _handle_image(self, image_msg):\n # converting the ROS image message to CV2-image\n image = self._cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')\n\n # Skip if image is None\n if image is None:\n rospy.logdebug(\"Image content is None :(\", logger_name=\"vision\")\n return\n\n # Check if its the first image callback\n if self._first_image_callback:\n # Check if a cap may be on the camera\n self._handle_forgotten_camera_cap(image)\n\n # Instances that should be notified with the new image\n internal_image_subscribers =[\n self._field_color_detector,\n self._white_color_detector,\n self._red_color_detector,\n self._blue_color_detector,\n self._unknown_obstacle_detector,\n self._field_boundary_detector,\n self._obstacle_detector,\n self._red_obstacle_detector,\n self._blue_obstacle_detector,\n self._goalpost_detector,\n self._line_detector,\n self._ball_detector,\n self._debug_image_creator,\n ]\n\n # Distribute the image to the detectors\n # Iterate over subscribers\n for vision_object in internal_image_subscribers:\n # Send image\n vision_object.set_image(image)\n\n # Check if the vision should run the conventional and neural net part parallel\n if self._config['vision_parallelize']:\n # Create and start threads for conventional calculation and neural net\n #fcnn_thread = Thread(target=self._ball_detector.compute)\n\n conventional_thread = Thread(target=self._conventional_precalculation())\n\n conventional_thread.start()\n #fcnn_thread.start()\n\n # Wait for both threads\n conventional_thread.join()\n #fcnn_thread.join()\n else:\n # Calc conventional calculation and neural net\n self._ball_detector.compute()\n self._conventional_precalculation()\n\n ########\n # Ball #\n ########\n\n # Get a number of top balls under the field boundary, which have an high enough rating\n all_balls = self._ball_detector.get_top_candidates(count=self._max_balls)\n balls_under_field_boundary = \\\n self._field_boundary_detector.candidates_under_convex_field_boundary(\n all_balls,\n self._ball_candidate_y_offset)\n top_balls = candidate.Candidate.rating_threshold(\n balls_under_field_boundary,\n self._ball_candidate_threshold)\n # check whether there are ball candidates\n if top_balls:\n # Convert ball cancidate list to ball message list\n list_of_balls = map(ros_utils.build_ball_msg, top_balls)\n # Create balls msg with the list of balls\n balls_msg = ros_utils.build_balls_msg(image_msg.header, list_of_balls)\n # Publish balls\n self._pub_balls.publish(balls_msg)\n\n # Debug draw all ball candidates\n self._debug_image_creator.draw_ball_candidates(\n all_balls,\n (0, 0, 255))\n # Debug draw possible ball candidates under the field boundary\n self._debug_image_creator.draw_ball_candidates(\n balls_under_field_boundary,\n (0, 255, 255))\n # Debug draw top ball candidate\n self._debug_image_creator.draw_ball_candidates(\n top_balls,\n (0, 255, 0),\n thickness=2)\n\n #############\n # Obstacles #\n #############\n\n # Init list for obstacle msgs\n list_of_obstacle_msgs = []\n # Add red obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_MAGENTA,\n self._red_obstacle_detector.get_candidates()))\n # Add blue obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_CYAN,\n self._blue_obstacle_detector.get_candidates()))\n # Add UFO's (Undefined Found Obstacles)\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.UNDEFINED,\n self._unknown_obstacle_detector.get_candidates()))\n # Build obstacles msgs containing all obstacles\n obstacles_msg = ros_utils.build_obstacle_array_msg(image_msg.header, list_of_obstacle_msgs)\n # Publish obstacles\n self._pub_obstacle.publish(obstacles_msg)\n\n # Debug draw unknown obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._unknown_obstacle_detector.get_candidates(),\n (0, 0, 0),\n thickness=3)\n # Debug draw red obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._red_obstacle_detector.get_candidates(),\n (0, 0, 255),\n thickness=3)\n # Debug draw blue obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._blue_obstacle_detector.get_candidates(),\n (255, 0, 0),\n thickness=3)\n\n ########\n # Goal #\n ########\n\n # Get all goalposts under field boundary\n goal_posts = self._field_boundary_detector.candidates_under_convex_field_boundary(\n self._goalpost_detector.get_candidates(),\n self._goal_post_field_boundary_y_offset)\n\n # Get goalpost msgs and add them to the detected goal posts list\n goal_post_msgs = ros_utils.build_goal_post_msgs(goal_posts)\n # Create goalposts msg\n goal_posts_msg = ros_utils.build_goal_post_array_msg(image_msg.header, goal_post_msgs)\n # Check if there is a goal\n if goal_posts_msg:\n # If we have a goal, lets publish it\n self._pub_goal_posts.publish(goal_posts_msg)\n\n # Debug draw all goal posts\n self._debug_image_creator.draw_obstacle_candidates(\n self._goalpost_detector.get_candidates(),\n (180, 180, 180),\n thickness=3)\n # Debug draw goal posts which start in the field\n self._debug_image_creator.draw_obstacle_candidates(\n goal_posts,\n (255, 255, 255),\n thickness=3)\n\n #########\n # Lines #\n #########\n if self._use_line_points:\n # Build a LineSegmentInImage message for each linepoint\n line_points = self._line_detector.get_linepoints()\n # Create line segments\n line_segments = ros_utils.convert_line_points_to_line_segment_msgs(line_points)\n # Create line msg\n line_msg = ros_utils.build_line_information_in_image_msg(image_msg.header, line_segments)\n # Publish lines\n self._pub_lines.publish(line_msg)\n\n # Draw debug line points\n self._debug_image_creator.draw_points(\n line_points,\n (0, 0, 255))\n\n if self._use_line_mask:\n # Define detections (Balls, Goal Posts) that are excluded from the line mask\n excluded_objects = top_balls + goal_posts\n # Get line pixel mask\n line_mask = self._line_detector.get_line_mask_without_other_objects(excluded_objects)\n # Create line mask message\n line_mask_message = ros_utils.build_image_msg(image_msg.header, line_mask, '8UC1')\n # Publish line mask\n self._pub_line_mask.publish(line_mask_message)\n\n # Draw debug line mask\n self._debug_image_creator.draw_mask(\n line_mask,\n color=(255, 0, 0),\n opacity=0.8)\n\n ##################\n # Field boundary #\n ##################\n\n # Get field boundary msg\n convex_field_boundary = self._field_boundary_detector.get_convex_field_boundary_points()\n # Build ros message\n convex_field_boundary_msg = ros_utils.build_field_boundary_polygon_msg(image_msg.header, convex_field_boundary)\n # Publish field boundary\n self._pub_convex_field_boundary.publish(convex_field_boundary_msg)\n\n # Debug draw convex field boundary\n self._debug_image_creator.draw_field_boundary(\n convex_field_boundary,\n (0, 255, 255))\n # Debug draw field boundary\n self._debug_image_creator.draw_field_boundary(\n self._field_boundary_detector.get_field_boundary_points(),\n (0, 0, 255))\n\n #########\n # Debug #\n #########\n '''\n if self._config['neural_network_type'] == 'fcnn':\n # Publish fcnn output for the region of interest under the field boundary (for the world model)\n if self._ball_fcnn_publish_output:\n roi_msg = ros_utils.build_fcnn_region_of_interest(\n self._ball_detector.get_fcnn_output(),\n self._field_boundary_detector,\n image_msg.header,\n self._config['ball_fcnn_publish_field_boundary_offset'])\n self._pub_ball_fcnn.publish(roi_msg)\n\n # Publish whole fcnn output for debug purposes\n if self._publish_fcnn_debug_image:\n self._pub_debug_fcnn_image.publish(self._ball_detector.get_debug_image())\n '''\n # Check, if HSV mask images should be published\n if self._publish_HSV_mask_image:\n # Mask images\n white_mask = self._white_color_detector.get_mask_image()\n red_mask = self._red_color_detector.get_mask_image()\n blue_mask = self._blue_color_detector.get_mask_image()\n\n # Publish mask images\n self._pub_white_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, white_mask, '8UC1'))\n self._pub_red_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, red_mask, '8UC1'))\n self._pub_blue_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, blue_mask, '8UC1'))\n\n # Check, if field mask image should be published\n if self._publish_field_mask_image:\n if isinstance(self._field_color_detector, color.DynamicPixelListColorDetector):\n # Mask image\n dyn_field_mask = self._field_color_detector.get_mask_image()\n static_field_mask = self._field_color_detector.get_static_mask_image()\n # Publish mask image\n self._pub_dynamic_color_lookup_table_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, dyn_field_mask, '8UC1'))\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, static_field_mask, '8UC1'))\n else:\n # Mask image\n field_mask = self._field_color_detector.get_mask_image()\n # Publish mask image\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, field_mask, '8UC1'))\n\n # Check if we should draw debug image\n if self._debug_image_creator.active:\n # publish debug image\n self._pub_debug_image.publish(\n ros_utils.build_image_msg(\n image_msg.header,\n self._debug_image_creator.get_image(),\n 'bgr8'))", "def start_processing(self):", "def caption_image():\n image_format = \"not jpeg\"\n\n st = current_time()\n # get beam_size\n beam_size = int(request.args.get(\"beam_size\", \"3\"))\n # get max_caption_length\n max_caption_length = int(request.args.get(\"max_caption_length\", \"20\"))\n # get image_data\n if request.method == 'POST':\n image_data = request.get_data()\n else:\n url = request.args.get(\"url\")\n c_type, image_data = get_remote_file(url)\n if not image_data:\n return Response(status=400, response=jsonify(error=\"Could not HTTP GET %s\" % url))\n if 'image/jpeg' in c_type:\n image_format = \"jpeg\"\n\n # use c_type to find whether image_format is jpeg or not\n # if jpeg, don't convert\n if image_format == \"jpeg\":\n jpg_image = image_data\n # if not jpeg\n else:\n # open the image from raw bytes\n image = Image.open(BytesIO(image_data))\n # convert the image to RGB format, otherwise will give errors when converting to jpeg, if the image isn't RGB\n rgb_image = image.convert(\"RGB\")\n # convert the RGB image to jpeg\n image_bytes = BytesIO()\n rgb_image.save(image_bytes, format=\"jpeg\", quality=95)\n jpg_image = image_bytes.getvalue()\n image_bytes.close()\n\n read_time = current_time() - st\n # restart counter\n st = current_time()\n\n generator = caption_generator.CaptionGenerator(app.model,\n app.vocab,\n beam_size=beam_size,\n max_caption_length=max_caption_length)\n captions = generator.beam_search(app.sess, jpg_image)\n\n captioning_time = current_time() - st\n app.logger.info(\"Captioning time : %d\" % captioning_time)\n\n array_captions = []\n for caption in captions:\n sentence = [app.vocab.id_to_word(w) for w in caption.sentence[1:-1]]\n sentence = \" \".join(sentence)\n array_captions.append({\n 'sentence': sentence,\n 'confidence': math.exp(caption.logprob)\n })\n\n response = {\n 'beam_size': beam_size,\n 'max_caption_length': max_caption_length,\n 'captions': array_captions,\n 'time': {\n 'read': read_time,\n 'captioning': captioning_time,\n 'units': 'ms'\n }\n }\n return Response(response=json.dumps(response), status=200, mimetype=\"application/json\")", "def main_loop(self):\n # Get current image frame from the camera\n frame = self.camera.get_frame()\n self.h,self.w,_c= frame.shape\n \n self.current_centroid = centroid(np.sum(frame,2)/3)\n \n #display unaltered frame\n #imshow(\"Original\",frame)\n \n #collect the output frame for display\n output_frame = process_image(frame,self.current_centroid,self.centroid_1,self.centroid_2,self.centroid_1_active,self.centroid_2_active)\n \n #show the processed/annotated output frame\n imshow(\"Processed\",output_frame)\n \n #handle any key presses\n self.key_handler()", "def server():", "def server():", "def main():\n SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n SOCKET.bind(settings.STASH_ADDRESS)\n SOCKET.listen(1)\n\n while 1:\n connection, address = SOCKET.accept()\n command, key, value = pickle.loads(connection.recv(4096).decode())\n\n if command == 'STATS':\n response = handle_stats()\n elif command in ('GET', 'INCREMENT', 'DELETE'):\n response = COMMAND_HANDLERS[command](key)\n elif command in ('PUT', 'APPEND'):\n response = COMMAND_HANDLERS[command](key, value)\n else:\n response = (False, 'Unknown command type [{}]'.format(command))\n\n update_stats(command, response[0])\n connection.sendall(pickle.dumps(response))\n connection.close()\n\n SOCKET.shutdown(socket.SHUT_RDWR)\n SOCKET.close()", "def doAllPlots ():\n #df = processIp (\"18-06-01-1-attack.pcap\", \"ec:1a:59:79:f4:89\")\n #df.to_csv (\"df.csv\", index=False)\n df = pd.read_csv (\"df.csv\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropyWithThreshold (df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n \"\"\"\n Traffic flow graph\n \"\"\"\n #df = processTrafficFlow (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotTrafficFlow (df)\n\n \"\"\"\n Entropy for source port\n \"\"\"\n #df = processSrcPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df)\n\n \"\"\"\n Entropy for destination port\n \"\"\" \n #df = processDstPort (\"18-06-01-short.pcap\", \"ec:1a:59:79:f4:89\")\n #plotEntropy (df) \n\n \"\"\"\n It will be implemented next day\n df = processPorts (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n\n df = processProtocols (\"18-06-01.pcap\", \"ec:1a:59:79:f4:89\")\n attack_df = parseAnnotation (\"ec1a5979f489.csv\")\n ret = plotThresholds (df, attack_df)\n plotEntropy (df, attack_df, ret[2])\n createUtilityHistogram (ret[0], ret[1]) \n \"\"\"\n return", "def handle_analyzer(f_name, img_name, out_fname):\n # Counting tweets\n tweet_arr, handle_imp = load_pickle(f_name)['dat'], Counter()\n logging.info('Going through tweets now')\n for tweet in tweet_arr:\n handle_imp[tweet['handle']] += 1\n plot_save_dat(handle_imp, out_fname, img_name, 'Number of tweets', 'Probablity')\n logging.info('Saved histogram with number of tweets from handle vs. freq to: {}'.format(img_name))", "def process_and_save(db: Broker, uid, tiff_path: str, data_key: str) -> None:\n run = db[uid]\n dk_uid = run.start.get(\"sc_dk_field_uid\", \"\")\n dk_run = db[dk_uid] if dk_uid else None\n dk_image = _mean(dk_run.data(data_key)) if dk_run else None\n image = _mean(run.data(data_key))\n image -= dk_image\n tw = TiffWriter(tiff_path)\n tw.write(image)\n return", "def event_loop(self):\n self.timer.blockSignals(True) # prevent runaway condition if timer adds more events to loop during processing\n QCoreApplication.processEvents() # process all incoming signals before going onto next image\n \"\"\" \n During the processEvents call, all incoming signals will be processed \n before moving onto the next image. Events processed here include:\n - updating the new ROIs with self.update_roi_coords()\n - recieving new images and adding them to the process queue with self.recieve_image()\n - updating the number of ROI groups with self.update_num_roi_groups()\n - updating the number of ROIs per group with self.update_num_rois_per_group()\n \"\"\"\n\n self.process_next_image() # processes one image from the queue if it is not empty\n self.save() # only saves data if queue is empty and self.should_save = True\n\n self.timer.blockSignals(False) # allow the timer to trigger the event loop again", "def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs", "def processing(request):\n image = get_processing_image(str(settings.BASE_DIR) + request.session['image'])\n image_name = str(request.session['image']).split('/')[-1]\n db_object = ImageProc.objects.get(id=request.session['id'])\n\n # create no illumination image\n removed_illumination = remove_uneven_illumination(image)\n removed_illumination_pil = get_pil_image(removed_illumination)\n db_object.image_illumination_removed.save('no_ilumination ' + image_name,\n ContentFile(removed_illumination_pil), save=False)\n\n # create contours\n image_contours = create_contours_image(image.copy())\n image_contours_pil = get_pil_image(image_contours)\n db_object.image_contours.save('contours ' + image_name, ContentFile(image_contours_pil), save=False)\n\n # create axes\n image_axes = create_axes_image(image.copy())\n image_axes_pil = get_pil_image(image_axes)\n db_object.image_axes.save('axes ' + image_name, ContentFile(image_axes_pil), save=False)\n\n # create CLAHE\n image_clahe = clahe_image(image.copy())\n image_clahe_pil = get_pil_image(image_clahe)\n db_object.image_clahe.save('clahe ' + image_name, ContentFile(image_clahe_pil), save=False)\n\n colour_features = colour_quantification(image)\n\n db_object.white_color = colour_features['WHITE']\n db_object.red_color = colour_features['RED']\n db_object.light_brown_color = colour_features['LIGHT_BROWN']\n db_object.dark_brown_color = colour_features['DARK_BROWN']\n db_object.blue_gray_color = colour_features['BLUE_GRAY']\n db_object.black_color = colour_features['BLACK']\n\n asymmetry_features = asymmetry_quantification(image, enable_processing_features=True)\n\n db_object.a_p_feature = asymmetry_features['a_p']\n db_object.b_p_feature = asymmetry_features['b_p']\n db_object.a_b_feature = asymmetry_features['a_b']\n db_object.b_b_feature = asymmetry_features['b_b']\n db_object.area_p_feature = asymmetry_features['A_p']\n db_object.area_c_feature = asymmetry_features['A_c']\n db_object.solidity_feature = asymmetry_features['solidity']\n db_object.extent_feature = asymmetry_features['extent']\n db_object.equivalent_diameter_feature = asymmetry_features['equivalent diameter']\n db_object.circularity_feature = asymmetry_features['circularity']\n db_object.p_p_feature = asymmetry_features['p_p']\n db_object.b_p_a_p_feature = asymmetry_features['b_p/a_p']\n db_object.b_b_a_b_feature = asymmetry_features['b_b/a_b']\n db_object.entropy_feature = asymmetry_features['entropy']\n\n db_object.save()\n\n request.user.profile.processed_images += 1\n request.user.save()\n\n return redirect('processing_app:results', request.session['id'])", "def main():\n nb_processed = 0\n for dataset_name in DATASETS:\n print(\"-----------------\")\n print(\"Dataset: '%s'\" % (dataset_name,))\n print(\"-----------------\")\n\n dataset_dir = os.path.join(WRITE_MAIN_DIR, dataset_name)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n\n dataset = Dataset([os.path.join(READ_MAIN_DIR, dataset_name)])\n print(\"Found %d images total.\" % (len(dataset.fps),))\n\n errors = []\n\n scale_height, scale_width = SCALES[dataset_name]\n target_aspect_ratio = scale_width / scale_height\n\n # iterate over every image in the current dataset,\n # augment that image N times, add cols/rows until target aspect ratio\n # is reached, resize it (e.g. 64x64), save it\n for img_idx, (image_filepath, image) in enumerate(zip(dataset.fps, dataset.get_images())):\n print(\"[%s] Image %d of %d (%.2f%%)...\" \\\n % (dataset_name, img_idx+1, len(dataset.fps),\n 100*(img_idx+1)/len(dataset.fps)))\n\n # IOErrors during loading of images result here in a None value\n if image is None:\n print(\"Error / None\")\n errors.append((\n image_filepath,\n \"Failed to load image '%s' (idx %d for dataset %s)\" \\\n % (image_filepath, img_idx, dataset_name)\n ))\n else:\n # resize too big images to smaller ones before any augmentation\n # (for performance reasons)\n height = image.shape[0]\n width = image.shape[1]\n aspect_ratio = width / height\n if width > 1000 or height > 1000:\n image = misc.imresize(image, (1000, int(1000 * aspect_ratio)))\n\n # augment image\n # converts augmented versions automatically to float32, 0-1\n augmentations = augment(image, **AUGMENTATIONS[dataset_name])\n\n # create list of original image + augmented versions\n images_aug = [image / 255.0]\n images_aug.extend(augmentations)\n\n # for each augmented version of the images:\n # resize it to target aspect ratio (e.g. same width and height),\n # save it\n for aug_idx, image_aug in enumerate(images_aug):\n image_aug = to_aspect_ratio_add(image_aug, target_aspect_ratio)\n filename = \"{:0>6}_{:0>3}.jpg\".format(img_idx, aug_idx)\n img_scaled = misc.imresize(image_aug, (scale_height, scale_width))\n misc.imsave(os.path.join(dataset_dir, filename), img_scaled)\n\n nb_processed += 1\n\n print(\"Processed %d images with %d errors.\" % (nb_processed, len(errors)))\n for (fp, err) in errors:\n print(\"File %s error:\" % (fp,))\n print(err)\n print(\"Finished.\")", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n rospy.loginfo(\"image info: {}\".format(image.numpy().shape))\n\n # Run pose estimation\n boxes = self.object_detector.infer(image, threshold=0.1, keep_size=False)\n\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n\n # Convert detected boxes to ROS type and publish\n ros_boxes = self.bridge.to_ros_boxes(boxes)\n if self.bbox_publisher is not None:\n self.bbox_publisher.publish(ros_boxes)\n rospy.loginfo(\"Published face boxes\")\n\n # Annotate image and publish result\n # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,\n # only used to test the corresponding bridge methods\n odr_boxes = self.bridge.from_ros_boxes(ros_boxes)\n image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)\n if self.image_publisher is not None:\n message = self.bridge.to_ros_image(np.uint8(image))\n self.image_publisher.publish(message)\n rospy.loginfo(\"Published annotated image\")", "def main():\n\n database = MongoDbUtil('ro').database()\n\n tag = 'Px1id'\n daemons = ['daq_files_watcher', 'jobs_validator', 'submitter']\n colls = ['%s_%s'%(coll, tag) for coll in daemons]\n\n datas = []\n for daemon, coll in zip(daemons, colls):\n last_doc = database[coll].find().skip(database[coll].count()-1)[0]\n accum_stats = last_doc['accum_stats']\n\n vals = {}\n timestamps = []\n for key in accum_stats.keys():\n vals[key] = []\n\n for doc in database[coll].find():\n timestamps.append(doc['date'])\n for key in vals:\n vals[key].append(doc['accum_stats'][key])\n\n urls = []\n for key in vals:\n urls.append(draw(timestamps, vals[key], daemon, key))\n\n datas.append({'title': daemon, 'urls': urls})\n\n make_index_file(tag, datas)", "def application(environ, start_response):\n\n output = []\n\n if environ['REQUEST_METHOD'] == 'GET':\n return bad_request(start_response)\n\n ##### parameters are never safe\n try:\n content_length = int(environ['CONTENT_LENGTH'])\n except ValueError:\n return bad_request(start_response)\n\n # maximum file length is 5MiB\n if content_length > 5*1024*1024:\n return bad_request(start_response)\n \n user_agent = environ.get('HTTP_USER_AGENT', '')\n remote_addr = environ.get('REMOTE_ADDR', '')\n \n # add CONTENT_TYPE check\n # FieldStorage is not the best solution because it reads the entire thing\n # into memory; what I need to do is get parse_headres and parse_multipart\n # working.\n post_env = environ.copy()\n post_env['QUERY_STRING'] = ''\n post = \\\n FieldStorage(\n fp=environ['wsgi.input'],\n environ=post_env,\n keep_blank_values=True)\n\n raw_data, processed_post = handle_new_post(post, user_agent, remote_addr)\n \n if raw_data is None: # if data is fine, processed_post is fine.\n return bad_request(start_response)\n\n images = adjust_image_resolution(raw_data)\n\n if images is None: # should all be good.\n bad_request(start_response)\n return output\n\n entry = insert_post_into_db(processed_post)\n if entry is None:\n return bad_request(start_response)\n\n success = insert_data_into_storage(str(entry), images)\n if success is False:\n # need to delete the database entry.\n return bad_request(start_response)\n\n output.append(dumps({\"id\" : str(entry)}, indent=4))\n\n # send results\n output_len = sum(len(line) for line in output)\n start_response('200 OK',\n [('Content-type', 'application/json'),\n ('Content-Length', str(output_len))])\n\n return output", "def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n self.image_sub.unregister()\n\n except CvBridgeError as e:\n rospy.logerr(e)\n (rows, cols, channels) = cv_image.shape\n #result = cv2.fastNlMeansDenoisingColored(cv_image, None, 20, 10, 7, 21)\n image = cv_image\n # Resize a 720x1280 image to 360x640 to fit it on the screen\n \"\"\"resized_image = cv2.resize(image, (720 / 2, 1280 / 2))\n cv2.imshow(\"/eyrc/vb/camera_1/image_raw\", resized_image)\n rospy.loginfo(self.get_qr_data(image))\"\"\"\n _,threshold = cv2.threshold(image, 70, 255, cv2.THRESH_TRUNC)\n self.get_qr_data(threshold)\n cv2.waitKey(3)", "def handle(self):\n while True:\n try:\n chunk = self.connection.recv(4)\n if len(chunk) < 4:\n break\n slen = struct.unpack(\">L\", chunk)[0]\n chunk = self.connection.recv(slen)\n while len(chunk) < slen:\n chunk = chunk + self.connection.recv(slen - len(chunk))\n obj = self.unPickle(chunk)\n msg = obj['msg']\n if type(msg) is str:\n record = logging.makeLogRecord(obj)\n self.handleLogRecord(record)\n else:\n self.statsThread.addRecord(msg)\n timeDict = msg['time'] \n if timeDict['total'] > LOG_THRESHOLD: \n #obj['msg'] = 'Processed ' + msg['request'] + ' on ' + msg['file'] + ' in ' + ('%.3f' % msg['time']['total']) + ' seconds'\n logMsg = 'Processed ' + msg['request'] + ' on ' + msg['file'] + '. Timing entries in seconds: '\n addComma=False\n for SECTION in self.SECTION_KEYS:\n timeKey=SECTION.strip()\n if timeDict.has_key(timeKey):\n if addComma:\n logMsg += ','\n else:\n addComma = True\n logMsg += ' ' + timeKey + ' ' + ('%.3f' % timeDict[timeKey])\n \n obj['msg'] = logMsg\n record = logging.makeLogRecord(obj)\n self.handleLogRecord(record)\n except Exception, e:\n import sys, traceback, string\n t, v, tb = sys.exc_info()\n print string.join(traceback.format_exception(t, v, tb))", "def main_recognition():\n if request.method == 'POST':\n # print(request.url)\n # stream = BytesIO(request.data)\n # image = Image.open(stream).convert(\"RGBA\")\n # path = 'C:/Users/13/Documents/FRS_v1/path.png'\n # image = image.save(path)\n # stream.close()\n #df = faces_info_export(path)\n print(request.url)\n stream = BytesIO(request.data)\n img_pil=Image.open(stream).convert(\"RGB\")\n stream.close()\n img_cv=np.array(img_pil)\n try:\n df = faces_info_export(img_cv)\n return df.to_json(orient='index')\n except SystemError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n except AttributeError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n if request.method == 'GET':\n # ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n df = faces_info_export(\"C:/Users/13/Documents/FRS_v1/test_image.jpg\")\n return df.to_json(orient='index')", "def process_output(\n self,\n input_data: List[np.ndarray],\n output_data: List[Any]):\n assert len(input_data) == 1\n assert len(output_data) == 1\n img = input_data[0]\n output_data = output_data[0]\n\n if self.input_memory_layout == 'NCHW':\n img = img.transpose(1, 2, 0)\n\n if self.input_color_format == 'BGR':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2RGBA)\n\n img = cv2.resize(img, (self.width, self.height))\n\n img = self.visualize_output(img, output_data)\n\n dpg.set_value(f'input_image_texture_{self.id}', img)\n\n self._draw_fps_counter(dpg.get_frame_rate())\n\n if self.id == 0:\n dpg.render_dearpygui_frame()\n self.swap_layers()", "def main():\n time_start = perf_counter()\n\n args = parse_args(sys.argv[1:]).ordered()\n _, opts = next(args)\n log_level = 0\n try:\n log_level = (0, 20, 10)[opts.verbosity]\n mpl_log_level = log_level + 10 if log_level > 0 else log_level\n except IndexError:\n log_level = 10\n mpl_log_level = log_level\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n # set level for all loggers\n # separate log level for matplotlib because it's so verbose\n for logger in loggers:\n if logger.name.startswith(\"matplotlib\"):\n logger.setLevel(mpl_log_level)\n else:\n logger.setLevel(log_level)\n\n LOG.debug(\"Program opts:\\n%s\", pformat(vars(opts)))\n\n # main vars\n inputs = []\n processed = []\n # im: Optional[Image.Image] = None\n im: Image.Image | np.ndarray | None = None\n in_file_path: Optional[str]\n in_image_size = Size(0, 0)\n in_file_size = 0\n in_dpi = 0\n in_exif: Optional[dict] = None\n out_exif: bytes = b\"\"\n out_exif_size = 0\n out_file_path = None\n out_image_size = Size(0, 0)\n out_file_size = 0\n no_op = False\n\n for cmd, arg in args:\n LOG.debug(\"Processing command %s with args:\\n%s\", cmd, pformat(vars(arg)))\n\n if cmd == \"open\":\n in_file_path = arg.input.name\n in_file_size = os.path.getsize(in_file_path) # type: ignore\n im = Image.open(arg.input)\n in_image_size = Size(*im.size)\n LOG.info(\"Input dims: %s\", in_image_size)\n try:\n in_exif = piexif.load(in_file_path)\n del in_exif[\"thumbnail\"]\n # LOG.debug(\"Exif: %s\", in_exif)\n in_dpi = im.info[\"dpi\"]\n except KeyError:\n pass\n LOG.info(\"Input file size: %s\", humanize_bytes(in_file_size))\n LOG.info(\"Input dpi: %s\", in_dpi)\n if arg.show_histogram:\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n im = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"open2\":\n # Test of opening multiple images for some operations, such as matting\n for item in arg.input:\n _im = Image.open(item)\n try:\n ex = piexif.load(item.name)\n dpi = _im.info[\"dpi\"]\n del ex[\"thumbnail\"]\n except KeyError:\n ex = None\n dpi = (0, 0)\n _im = np.asarray(_im)\n _im = cv2.cvtColor(_im, cv2.COLOR_RGB2BGR)\n inputs.append(\n Img(\n _im,\n file_path=item.name,\n dpi=dpi,\n exif=ex,\n )\n )\n LOG.debug(\"Imgs: %s\", inputs)\n im = inputs[0].data\n in_file_path = inputs[0].file_path\n in_file_size = inputs[0].file_size\n in_image_size = inputs[0].size\n if arg.show_histogram:\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"mat\":\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n im = mat.create_mat(im, size_inches=arg.size)\n out_image_size = Size.from_np(im)\n elif cmd == \"resize\":\n im = Image.fromarray(im) if type(im) == np.ndarray else im\n if is_ndarray(im) or im is None:\n raise TypeError('Expected Image, not ndarray')\n orig_size = Size(*im.size) # type: ignore\n out_image_size = orig_size\n try:\n resize_method, new_size = resize.get_method(\n orig_size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n else:\n # Resize/resample\n try:\n im = resize.resize(\n resize_method,\n im,\n new_size,\n )\n except ImageTooSmallError as e:\n LOG.warning(e)\n out_image_size = Size(*im.size) # type: ignore\n elif cmd == \"resize2\":\n for item in inputs:\n try:\n resize_method, new_size = resize.get_method(\n item.size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n force=arg.force,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n except ResizeAttributeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n else:\n try:\n _im = resize.resize_opencv(\n resize_method, item.data, new_size, resample=cv2.INTER_AREA\n )\n if _im is not None:\n processed.append(Img(_im))\n else:\n LOG.error('Expected image from resize_opencv(), got None')\n except ImageTooSmallError as e:\n LOG.warning(e)\n LOG.info(processed)\n out_image_size = processed[0].size\n im = processed[0].data\n elif cmd == \"text\":\n if im is None:\n LOG.error('Image is None')\n return\n im = watermark.with_text(\n im,\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n ) # type: ignore\n elif cmd == \"text2\":\n im = watermark.with_text(\n Image.fromarray(im),\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n )\n im = np.asarray(im)\n elif cmd == \"watermark\":\n im = watermark.with_image(\n im,\n Image.open(arg.image),\n scale=arg.scale,\n position=arg.position,\n padding=arg.margin,\n opacity=arg.opacity,\n invert=arg.invert,\n )\n elif cmd == \"watermark2\":\n watermark_image = cv2.imread(arg.image.name, cv2.IMREAD_UNCHANGED)\n # im = watermark.with_image_opencv(\n # im,\n # watermark_image,\n # scale=arg.scale,\n # position=arg.position,\n # opacity=arg.opacity,\n # padding=arg.margin,\n # )\n try:\n im = watermark.overlay_transparent(\n im,\n watermark_image,\n scale=arg.scale,\n padding=arg.margin,\n position=arg.position,\n alpha=arg.opacity,\n invert=arg.invert,\n )\n except OverlaySizeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n elif cmd == \"sharpen\":\n im = sharpen.unsharp_mask(im, amount=arg.amount, threshold=arg.threshold)\n elif cmd == \"save\":\n # if type(im) == np.ndarray:\n # im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n use_progressive_jpg = in_file_size > 10000\n if use_progressive_jpg:\n LOG.debug(\"Large file; using progressive jpg\")\n\n # Exif\n if arg.keep_exif:\n out_exif = piexif.dump(piexif.load(in_file_path))\n out_exif_size = sys.getsizeof(out_exif)\n\n outbuf = BytesIO()\n try:\n im.save(\n outbuf,\n \"JPEG\",\n quality=arg.jpg_quality,\n dpi=in_dpi,\n progressive=use_progressive_jpg,\n optimize=True,\n exif=out_exif,\n )\n except AttributeError:\n write_params = [\n cv2.IMWRITE_JPEG_QUALITY,\n arg.jpg_quality,\n cv2.IMWRITE_JPEG_OPTIMIZE,\n ]\n if use_progressive_jpg:\n write_params += [\n cv2.IMWRITE_JPEG_PROGRESSIVE,\n ]\n _, buf = cv2.imencode(\".jpg\", im, write_params)\n outbuf = BytesIO(buf)\n image_buffer = outbuf.getbuffer()\n out_file_size = image_buffer.nbytes + out_exif_size\n LOG.info(\"Buffer output size: %s\", humanize_bytes(out_file_size))\n\n if arg.output is None:\n root, _ = os.path.splitext(in_file_path)\n out_file_path = f\"{root}{arg.suffix}.jpg\"\n else:\n out_file_path = arg.output.name\n\n if arg.no_op:\n no_op = True\n continue\n LOG.info(\"Saving buffer to %s\", out_file_path)\n if (out_path := Path(out_file_path)).exists():\n if not arg.force:\n LOG.critical(\n \"file '%s' exists and force argument not found\", out_path\n )\n print(\n f\"{fg.red}{ef.bold}Error: file '{out_path}' exists;\",\n f\" use -f option to force overwrite.{rs.all}\",\n file=sys.stderr,\n )\n return\n # Create output dir if it doesn't exist\n out_path.parent.mkdir(parents=True, exist_ok=True)\n\n with out_path.open(\"wb\") as f:\n f.write(image_buffer)\n if arg.keep_exif:\n piexif.insert(out_exif, out_file_path)\n out_file_size = os.path.getsize(out_file_path)\n\n elapsed = perf_counter() - time_start\n report = generate_report(\n in_file_size,\n out_file_size,\n in_file_path,\n out_file_path,\n in_image_size,\n out_image_size,\n elapsed,\n no_op,\n )\n print(report)", "def extract_color_histogram_from_objdata(self):\n color_histograms = []\n imgpaths = get_train_imgpaths(self.obj_name)\n if imgpaths is None:\n return # skip if img does not exists\n progress = progressbar.ProgressBar(widgets=['{o}: '.format(o=self.obj_name),\n progressbar.Bar(), progressbar.Percentage(), ' ', progressbar.ETA()])\n image_pub = rospy.Publisher('image_publisher/output', Image, queue_size=1)\n for raw_path, mask_path in progress(imgpaths):\n raw_img = cv2.imread(raw_path)\n mask_img = cv2.imread(mask_path)\n train_img = cv2.add(mask_img, raw_img)\n\n color_hist_sub = rospy.Subscriber('single_channel_histogram_' + self.color_name + '/output', ColorHistogram, self.color_hist_cb)\n bridge = cv_bridge.CvBridge()\n train_img_msg = bridge.cv2_to_imgmsg(train_img, encoding=\"bgr8\")\n train_img_msg.header.stamp = rospy.Time.now()\n\n self.color_hist = None\n while self.color_hist == None:\n image_pub.publish(train_img_msg)\n rospy.sleep(1)\n color_histograms.append(self.color_hist)\n color_histograms = np.array(color_histograms)\n self.save_histogram_data(color_histograms, self.obj_name)", "def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)", "def processRequest():\n # if we are not in the address list, then this is not an initialized connection\n if request.remote_addr not in addressList:\n # if the address is not in the list and it is not a market\n # request, then it is web gallery traffic\n if not urlEncode.isMarket(request.url):\n sendToImageGallery(request)\n return\n # if this is a market request, then proceed with new session initialization\n else:\n encoded = {'url':request.url, 'cookie':[]}\n decoded = urlEncode.decode(encoded)\n sender, receiver = frame.initServerConnection(decoded, PASSWORDS, callback)\n # if the client sent a bad password, print an error message\n # and return an empty image\n if sender == False:\n print \"Bad password entered\"\n return sendToImageGallery(request)\n # Note: this will need to change to accomodate multiple client sessions\n htptObject.assembler = sender\n htptObject.disassembler = receiver\n addressList.append(request.remote_addr)\n #send back a blank image with the new session id\n framed = htptObject.assembler.assemble('')\n image = imageEncode.encode(framed, 'png')\n return serveImage(image)\n #TODO\n #setup some way to maintain a single Internet connection per client\n # if this is an initialized client, then receive the data and see\n # if we have anything to send\n else:\n #receive the data\n decoded = urlEncode.decode({'url':request.url, 'cookie':request.cookies})\n htptObject.disassembler.disassemble(decoded)\n # see if we have any data to return\n readyToRead, readyToWrite, inError = \\\n select.select([htptObject.torSock], [], [], 0)\n # if we have received data from the Tor network for the Tor\n # client, then send it\n if readyToRead != []:\n # get up to a megabyte\n dataToSend = readyToRead[0].recv(1024*1000)\n# print \"Server Sending: {}\".format(dataToSend)\n else:\n dataToSend = ''\n # put the headers on the data (not the actual function name)\n framed = htptObject.assembler.assemble(dataToSend)\n # encode the data\n encoded = imageEncode.encode(framed, 'png')\n # send the data with apache\n return serveImage(encoded)", "def camera_callback(self, data):\n try:\n self.camera_buffer.put(data.images)\n self.publish_sensor_message()\n except Exception as ex:\n self.get_logger().error(f\"Error in camera callback: {ex}\")", "def _state_main(self, gui):\n gui.entry.wait_variable(gui.entry_sv)\n\n '''Clean string'''\n files = literal_eval(gui.entry_sv.get())\n\n '''Remove previous images'''\n if hasattr(gui, \"panel\"):\n gui.panel.destroy()\n\n '''Load each image'''\n for file_name in files:\n file_name = file_name.replace(\"{\", \"\").replace(\"}\", \"\")\n # image = tk.PhotoImage(file=file_name)\n if \".CR2\" in file_name:\n '''Rawpy implementation'''\n file_image = rawpy.imread(file_name)\n file_image = file_image.postprocess()\n '''Rawkit implementation'''\n '''file_image = Raw(file_name)\n file_image = np.array(file_image.to_buffer())'''\n '''OpenCV implementation'''\n '''file_image = cv2.imread(file_name)'''\n else:\n file_image = Image.open(file_name)\n '''image = file_image.resize((500, 500), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n gui.panel = tk.Label(gui.root, image=image)\n gui.panel.image = image\n gui.panel.pack()'''\n # panel.grid(row=2)\n\n image_data = np.array(file_image)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n '''print(image_data.shape)\n print(image_data)\n print(len(image_data))\n print(len(image_data[0]))'''\n returned_image = Image.fromarray(image_data)\n '''cv2.imshow(\"Gray\", image_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Gray\")'''\n\n '''enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(file_image))\n enhanced_image = enhanced_contrast.enhance(255)\n enhanced_data = np.array(enhanced_image)\n plot_functions.imshow(enhanced_image)\n plot_functions.show()'''\n\n # color_space = cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)\n # print(color_space)\n \n '''Create mask for white-ish pixels'''\n '''lower_background = np.array([150, 150, 150])\n upper_background = np.array([255, 255, 255])\n print(image_data)\n white_mask = cv2.inRange(image_data, lower_background, upper_background)\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\n white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n white_mask = white_mask / 255'''\n\n '''Create mask for black-ish pixels'''\n '''lower_background = np.array([0, 0, 0])\n upper_background = np.array([25, 25, 25])\n black_mask = cv2.inRange(image_data, lower_background, upper_background)\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n black_mask = cv2.morphologyEx(black_mask, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))\n black_mask = black_mask / 255'''\n\n '''Add masks together'''\n '''background_mask = white_mask\n # Ensure no value is above 1\n background_mask = np.clip(background_mask, 0, 1)'''\n \n copied_image_data = np.asarray(returned_image).copy()\n # background_mask = np.logical_not(background_mask)\n '''for row_index, [mask_row, image_row] in enumerate(zip(background_mask, copied_image_data)):\n # place black pixel on corresponding masked pixels\n # copied_image_data[row_index] = np.array([image_row[pixel] * int(mask_row[pixel]) for pixel in range(len(mask_row))])\n # make pixel fully white on corresponding masked pixels\n copied_image_data[row_index] = np.array([np.array([255, 255, 255]) if int(mask_row[pixel]) else image_row[pixel] for pixel in range(len(mask_row))])'''\n\n '''Turn removed pixels red'''\n '''mask_image = Image.fromarray(copied_image_data)\n plot_functions.imshow(mask_image)\n plot_functions.show()'''\n trapezoid_data = copied_image_data.copy()\n\n enhanced_contrast = ImageEnhance.Contrast(Image.fromarray(trapezoid_data))\n enhanced_image = enhanced_contrast.enhance(255)\n trapezoid_data = np.array(enhanced_image)\n\n '''Detect lines'''\n edges = cv2.Canny(trapezoid_data, 75, 150)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, maxLineGap=1000)\n # print(lines)\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if y1 == y2:\n cv2.line(copied_image_data, (x1, y1), (x2, y2), (255, 255, 255), 1)\n\n '''Trapezoid attempt'''\n\n # filters image bilaterally and displays it\n bilatImg = cv2.bilateralFilter(trapezoid_data, 5, 175, 175)\n\n # finds edges of bilaterally filtered image and displays it\n edgeImg = cv2.Canny(bilatImg, 75, 200)\n\n # gets contours (outlines) for shapes and sorts from largest area to smallest area\n contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n\n # drawing red contours on the image\n for con in contours:\n cv2.drawContours(trapezoid_data, con, -1, (255, 255, 255), 3)\n\n '''Detect corners'''\n dst = cv2.cornerHarris(edges, 30, 31, 0.001)\n dst = cv2.dilate(dst, None)\n ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,\n 0.001)\n corners = cv2.cornerSubPix(edges, np.float32(centroids), (5, 5),\n (-1, -1), criteria)\n\n good_corners = []\n for corner in corners:\n if (corner[1] < 1000) & (corner[1] > 650) & (corner[0] > 250) & (corner[0] < 2250):\n good_corners.append(corner)\n cv2.circle(edges, (corner[0], corner[1]), 10, (255, 255, 255))\n\n print(good_corners)\n if len(good_corners) >= 3:\n corner_combos = itertools.combinations(good_corners, 3)\n elif len(good_corners) > 1:\n corner_combos = itertools.combinations(good_corners, 2)\n\n best_corner_combo = None\n best_coef = np.inf\n for corner_combo in corner_combos:\n regression = LinearRegression().fit(np.array([corner[0] for corner in corner_combo]).reshape(-1, 1),\n np.array([corner[1] for corner in corner_combo]))\n if np.abs(regression.coef_) < best_coef:\n best_coef = np.abs(regression.coef_)\n best_corner_combo = np.array([corner[1] for corner in corner_combo])\n\n y_edge = int(round(np.mean(best_corner_combo)))\n edges = edges[y_edge:3000, 200:2200]\n copied_image_data = copied_image_data[y_edge:2500, 200:2200]\n trapezoid_data = trapezoid_data[y_edge:2500, 200:2200]\n\n # and double-checking the outcome\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.imshow(\"Contours check\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Contours check\")\n\n # find the perimeter of the first closed contour\n perim = cv2.arcLength(contours[0], True)\n # setting the precision\n epsilon = 0.02 * perim\n # approximating the contour with a polygon\n approxCorners = cv2.approxPolyDP(contours[0], epsilon, True)\n # check how many vertices has the approximate polygon\n approxCornersNumber = len(approxCorners)\n\n for corners in approxCorners:\n cv2.circle(trapezoid_data, (corners[0], corners[1]), radius=10, color=(255, 255, 255), thickness=-1)\n cv2.imshow(\"Vertex position\", trapezoid_data)\n cv2.waitKey()\n cv2.destroyWindow(\"Vertex position\")\n cv2.imshow(\"linesEdges\", edges)\n cv2.imshow(\"linesDetected\", copied_image_data)\n cv2.waitKey(0)\n cv2.destroyAllWindows()" ]
[ "0.6015694", "0.5810847", "0.57723105", "0.5640843", "0.56245506", "0.56011194", "0.5577318", "0.55666625", "0.5558006", "0.5541642", "0.5531403", "0.5502279", "0.5499759", "0.54302055", "0.541875", "0.5414848", "0.5412454", "0.541103", "0.5408943", "0.53958166", "0.535821", "0.5323972", "0.53221464", "0.53163385", "0.53163385", "0.53163385", "0.5315273", "0.5299037", "0.52876776", "0.5269307", "0.525263", "0.5250087", "0.5202395", "0.5201893", "0.5198737", "0.51843065", "0.5182205", "0.5162777", "0.51598746", "0.5136487", "0.51328963", "0.51259714", "0.5124999", "0.51213807", "0.51191074", "0.5116062", "0.5107722", "0.50901806", "0.5079073", "0.50781864", "0.50689846", "0.5065673", "0.50634414", "0.5052208", "0.5048892", "0.5048849", "0.50469065", "0.5044315", "0.5037371", "0.50327617", "0.5029715", "0.50274813", "0.5023908", "0.5023823", "0.5021976", "0.5012934", "0.5011323", "0.5006988", "0.500198", "0.4993568", "0.49878472", "0.49805441", "0.4976216", "0.49598676", "0.49590427", "0.49432248", "0.4935481", "0.4928668", "0.4927819", "0.4927819", "0.49258888", "0.4923434", "0.49222642", "0.49215668", "0.4920147", "0.49192476", "0.49138454", "0.49114925", "0.4908867", "0.4908263", "0.49074736", "0.4903768", "0.4901541", "0.4893107", "0.4893059", "0.48912412", "0.48816046", "0.48800352", "0.48759362", "0.48716828", "0.48704517" ]
0.0
-1
Check if the output from server is a string of error message Raise error if it is string. It should be a dictionary.
def check_r_type(r): if type(r) is str: raise TypeError('Get Error message.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def is_error(response: str) -> bool:\n return \"ERROR\" in response", "def check_error_output(self, output):\n\n # Display info message\n log.info(\"check_error_output\")\n\n # Check if output has some data\n if output:\n\n # Yes\n\n # Display info message\n log.info(\"check_error_output: output has some data\")\n\n # Check all elements in the list of output\n for element in self._send_command_error_in_returned_output:\n\n # Display info message\n log.info(f\"check_error_output: element: {element}\")\n\n # Display info message\n log.info(f\"check_error_output: output[0]: {output[0]}\")\n\n # Check if the output starts with a string with an error message (like \"% Invalid input detected at '^' marker.\")\n\n # Error message?\n if output.startswith(element):\n\n # Yes\n\n # Raise an exception\n raise Exception(output)", "def handle_error(e, error_response_str):\n error_output = e.decode(encoding='UTF-8')\n print_error(error_response_str)\n print_error(error_output)", "def recvcheck(self):\n\n data = self.recv()\n if self.serializer == 'json' and data and isinstance(data, dict):\n if '@error' in data:\n exception = getattr(builtins, data['@error'])\n if (isinstance(exception, type) and\n issubclass(exception, Exception)):\n raise exception(data['@message'])\n else:\n if isinstance(data, Exception):\n raise data\n return data", "def error(str):\n\n Utils.send('error', str)", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def _validate_response(self, response):\n # Check for unexpected response - all should be JSON dicts that have\n # already been deserialised\n if not isinstance(response, types.DictionaryType):\n self.message(\n \"\\t\\t[!] ERROR - Unexpected value returned from the API: '%s'\" %\n (response))\n return False\n\n # Check for valid errors\n if \"error\" in response and \"msg\" in response:\n self.message(\n \"\\t\\t[!] ERROR - %s (%s)\" %\n (response[\"msg\"], response[\"timestamp\"]))\n return False\n\n # Is this a valid response message\n if \"msg\" in response:\n return True\n\n # Catch all...dictionary returned but does not contain expected keys?\n # Who know's what's going on here?!\n else:\n self.message(\n \"\\t\\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'\" %\n (response))\n return False", "async def test_is_error_response() -> None:\n assert not is_error_response(\"True\")\n assert not is_error_response(True)\n assert not is_error_response(\"\")\n assert is_error_response(\n json.loads(\n '{\"errorCode\": \"INVALID_NUMBER_PARAMETER_VALUE\", \"minValue\": 0.0, \"maxValue\": 1.01}'\n )\n )\n assert not is_error_response(json.loads('{\"errorCode\": \"\"}'))", "def __cli_connection_error(self, output):\n ret = True\n regex = None\n regex = re.search( r'Using Thrift port ' + re.escape(self.thrift_port) + r'\\n', output, re.M|re.I)\n if regex:\n regex = re.search( r'Could not connect to thrift client on port ' + self.thrift_port + r'\\n', output, re.M|re.I)\n if(regex):\n self.logger.error(\"CLI Error: %s\", regex.group())\n else:\n regex = re.search( r'Obtaining JSON from switch\\.\\.\\.\\nDone', output, re.M|re.I)\n if(regex): ret = False\n else:\n regex = re.search( r'No Thrift port specified, using CLI default\\n', output, re.M|re.I)\n if regex:\n regex = re.search( r'Could not connect to thrift client on port 9090\\n', output, re.M|re.I)\n if(regex):\n self.logger.error(\"CLI Error: %s\", regex.group())\n else:\n regex = re.search( r'Obtaining JSON from switch\\.\\.\\.\\nDone', output, re.M|re.I)\n if(regex): ret = False\n else:\n self.logger.error(\"__cli_connection_error: Unknown Error\")\n return ret", "def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)", "def processError(sMsg, sOutput):\n if sOutput == \"\":\n errorMsg(sMsg)\n else:\n errorMsg(sMsg + \":\\n \" + ' '.join(sOutput.splitlines(True)))", "def error_invalid_response(self):\r\n return self.type() == 0x00", "def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)", "def is_valid_string(session):\n\n try:\n session.data = six.text_type(session.data)\n return session.data\n except ValueError:\n raise session.field.invalid(error_type='type_error')", "def print_result(result: Any):\n if isinstance(result, str):\n print_msg(result)\n elif isinstance(result, dict) and result.get(\"error\"):\n print_stderr(result.get(\"error\"))\n elif result is not None:\n print_msg(json_encode(result))", "def test_cli_format_error_handler_broken_json():\n resp = MagicMock()\n resp.json.side_effect = ValueError(\"\")\n resp.text = \"Not JSON\"\n output = format_utils.cli_format_error_handler(resp)\n assert 'Error: Unable to decode response. Value: Not JSON' in output", "def handle_errors(resp: requests.Response):\n error_text = resp.text\n if isinstance(resp.text, bytes):\n try:\n error_text = error_text.decode(UTF_ENCODING)\n except UnicodeDecodeError:\n error_text = error_text.decode(\"iso-8859-1\")\n if error_text != \"\":\n _raise_error(error_text)\n resp.raise_for_status()", "def process_str_response(self, page_data: str) -> Union[str, List[dict]]:\n self.logger.info(f\"Warning! page_data was string: {page_data}\\n\")\n \n if \"<!DOCTYPE html>\" in page_data:\n self.logger.info(\"HTML was returned, trying again...\\n\")\n return GithubApiResult.HTML\n\n if not page_data:\n self.logger.info(\"Empty string, trying again...\\n\")\n return GithubApiResult.EMPTY_STRING\n\n try:\n list_of_dict_page_data = json.loads(page_data)\n return list_of_dict_page_data\n except TypeError:\n return \"failed_to_parse_json\"", "def checkError(invoke_response, message):\n\n if 'FunctionError' in invoke_response:\n err_message = invoke_response['Payload'].read()\n print(message)\n print(err_message)\n return {\n 'statusCode': 500,\n 'body': json.dumps(str(err_message))\n }\n return None", "async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def check_output(self, output):\n return self.results[output if isinstance(output, _basestring) else output.name]", "async def _handle_response(response: ClientResponse) -> Dict:\n content = await response.json(encoding='utf-8', loads=loads)\n if response.status != 200:\n for member in JmRpcErrorType:\n if content['message'] != member.value:\n continue\n raise JmRpcError(response.status, content)\n response.raise_for_status()\n return content", "def is_string(value):\n return isinstance(value, (str, bytes))", "def _err_response(self, msg):\r\n return {'success': False, 'error': msg}", "def test_parse_error_response(self):\n self.assertEqual(\n parse_server_answer(ERROR_SERVER_RESPONSE),\n f'Bad response. {ERROR_SERVER_RESPONSE[RESPONSE]}: {ERROR_SERVER_RESPONSE[ERROR]}'\n )", "def _process_error_response(self, toc, buf):\n\n\t\terrorSev = None\n\t\terrorMsg = None\n\t\terrorDet = None\n\n\t\tif toc != 'E' and toc != 'N':\n\t\t\treturn\n\n\t\tparts = buf.split(b'\\0')\n\n\t\tfor part in parts:\n\t\t\tpart = part.decode()\n\t\t\tif len(part) < 1:\n\t\t\t\tcontinue\n\t\t\t_type = part[0]\n\t\t\tif _type == 'M':\n\t\t\t\terrorMsg = part[1:]\n\t\t\telif _type == 'S':\n\t\t\t\terrorSev = part[1:]\n\t\t\telif _type == 'D':\n\t\t\t\terrorDet = part[1:]\n\t\t\n\t\tif not errorSev and not errorMsg:\n\t\t\treturn\n\n\t\tif toc != 'E':\t\t\t\t# This is not an error report it as debug\n\t\t\tif self.Pfdebug:\n\t\t\t\tself.Pfdebug.write(f'BACKEND {errorSev}: {errorMsg}\\n')\n\t\t\t\tif errorDet:\n\t\t\t\t\tself.Pfdebug.write(f'DETAIL: {errorDet}\\n')\n\t\telse:\n\t\t\tif errorDet:\n\t\t\t\tself.pcp_internal_error(f'{errorSev}: {errorMsg}\\nDETAIL: {errorDet}\\n')\n\t\t\telse:\n\t\t\t\tself.pcp_internal_error(f'{errorSev}: {errorMsg}\\n')\n\t\t\tself._setResultStatus(ResultStateType.BACKEND_ERROR)", "def check_restype(restype, exc_message):\n if restype != 'OK':\n raise IMAPClientError(exc_message)", "def get_error_message(self, data, response=None):\n return str(data)", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def test_response_error(err_msg):\n from server import response_error\n error_text = b'HTTP/1.1 %s' % err_msg\n assert response_error(err_msg).split(b'\\r\\n')[0] == error_text", "def parse_error (self, error_str):\r\n\t\t# Regex out the error and channel indices from the string\r\n\t\tob = re.match(ERROR_FORMAT, error_str)\r\n\t\t\r\n\t\t# If error_str doesn't match an error, return None\r\n\t\tif ob is None:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# Extract the two matched groups (i.e. the error and channel indices)\r\n\t\terrno,chno = ob.groups()\r\n\t\terrno = int(errno)\r\n\t\tchno = int(chno)\r\n\t\t\r\n\t\t# Get the error description; if none is defined, mark as unrecognised\r\n\t\terrdesc = self.error_desc_dict.get(errno, 'Unrecognised error code.').format(ch=chno)\r\n\t\t\r\n\t\treturn {'type':'err', 'id':errno, 'ch':chno, 'desc':errdesc, 'raw':error_str}", "def _get_error_message(response):\n try:\n return response.json()[\"detail\"]\n except (KeyError, _JSONDecodeError):\n return response.text", "def assert_is_string(object_to_test):\n if( (type(object_to_test) == type(\"\")) or (type(object_to_test) == type(u\"\")) ):\n return\n logging.error(\"assert_is_string() test failed!\")\n logging.critical(repr(locals()))\n raise(ValueError)", "def assert_error(self, result):\n error_msg = ['no height', 'invalid height range', 'invalid method', 'timeout', 'error', 'no hex',\n 'couldnt get addressutxos', 'invalid address or amount too small', 'not enough funds',\n 'invalid address or amount too small', 'invalid utxo', 'wif expired', 'not implemented yet',\n 'invalid utxo']\n result_d = self.type_convert(result)\n error = result_d.get('error')\n if error:\n if error in error_msg:\n pass\n else:\n raise AssertionError(\"Unknown error message\")\n else:\n raise AssertionError(\"Unexpected response\")", "def output_error(text):\n if conf.eval_output:\n info_dict = {'type':'error', 'text' : text}\n output_result_eval(info_dict)\n else:\n output_result('[ERROR] ' + text)", "def check_output(out: Union[str, bytes], fmt: str) -> None:\n if fmt in [\"png\", \"pdf\"]:\n assert isinstance(out, bytes)\n elif fmt in [\"vega\", \"vega-lite\"]:\n assert isinstance(out, str)\n dct = json.loads(out)\n assert len(dct) > 0\n else:\n assert isinstance(out, str)\n assert len(out) > 0", "def test_error_type():\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nSource currency code is invalid.\" }') == 1\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nExchange currency code is invalid.\" }') == 2\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nCurrency amount is invalid.\" }') == 3", "def unexpected_error_msg(error):\n return 'Unexpected response: ' + pprint.pformat(vars(error))", "def parsed_error_msg(self):\r\n return self.error_msg", "def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')", "def test_output_invalid(self):\n assert (\n self.route.output_invalid(hug_core.output_format.json).route[\"output_invalid\"]\n == hug_core.output_format.json\n )", "def handle_response(response):\n if isinstance(response, str):\n return response\n else:\n return response.decode('utf-8')", "def validate_response(response):\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()", "def _parse_store_error(self, response):\n default_msg = \"Failure working with the Store: [{}] {!r}\".format(\n response.status_code, response.content\n )\n try:\n error_data = response.json()\n except ValueError:\n return default_msg\n\n try:\n error_info = [(error[\"message\"], error[\"code\"]) for error in error_data[\"error-list\"]]\n except (KeyError, TypeError):\n return default_msg\n\n if not error_info:\n return default_msg\n\n messages = []\n for msg, code in error_info:\n if code:\n msg += \" [code: {}]\".format(code)\n messages.append(msg)\n return \"Store failure! \" + \"; \".join(messages)", "def parse_response_error(html_text: str) -> str:\n html = BeautifulSoup(markup=html_text, features=\"html.parser\")\n inner_html = BeautifulSoup(markup=html.p.text, features=\"html.parser\")\n message = inner_html.text if inner_html.p is None else inner_html.p.text\n if \"face_not_found\" in message:\n message = \"Could not find a face in the image.\"\n elif \"multiple_faces\" in message:\n message = \"The image has more than one person.\"\n elif \"quality_failed\" in message:\n message = \"The provided image does not have enough quality.\"\n return message", "def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0", "def check_response(response):\n for status_code, err_class, err_type in [\n (HTTPStatus.INTERNAL_SERVER_ERROR, ServerError, 'Server'),\n (HTTPStatus.BAD_REQUEST, ClientError, 'Client')\n ]: # highest http status code first\n if response.status_code >= status_code:\n try:\n status = HTTPStatus(response.status_code)\n except ValueError as err:\n m = re.search(r'\\d{3}', err.args[0], flags=re.ASCII)\n if not m:\n raise err\n msg = f'Generic {err_type} Error ({m.group()})'\n else:\n msg = f'({status}) {status.description}'\n\n raise err_class(msg)\n\n if response.status_code == HTTPStatus.OK \\\n and SERVER_DB_ERROR_MSG in response.text:\n raise ServerError('Server cannot access the database')", "async def test_return_error_if_any(request_format): # type: ignore[no-untyped-def]\n bad_python = \"this_is_bad = 'hihi\"\n\n response: HTTPResponse = await request_format(\n formatter=\"black\",\n code=[bad_python],\n options={\"line_length\": 123, \"string_normalization\": False},\n )\n json_result = _check_http_code_and_schema(\n response=response,\n expected_code=200,\n expected_schema=EXPECTED_FROMAT_SCHEMA,\n )\n assert json_result[\"code\"][0][\"error\"] == \"Cannot parse: 1:13: this_is_bad = 'hihi\"", "def _process_message(self, response):\n message = str()\n try:\n message = response.json()\n except (simplejson.JSONDecodeError, ValueError) as e:\n message = response.text\n return message", "def is_valid_output(output) -> bool:\n log.info(f\"Output validation: {output}\")\n\n try:\n float(output)\n except ValueError as value_error:\n log.error(value_error)\n return False\n\n log.info(\"Output successfully validated\")\n return True", "def test_cli_format_error_handler_bogus_json():\n resp = MagicMock()\n resp.json = MagicMock(return_value=json.loads('{\"key\": \"value\"}'))\n output = format_utils.cli_format_error_handler(resp)\n assert 'Error: Not specified' in output\n assert 'Reason: Not specified' in output", "def _validate_unicode(data, err=\"Input not valid unicode\"):\n try:\n if not isinstance(data, str) and not isinstance(data, str):\n raise UnicodeError(err)\n # In some cases we pass the above, but it's still inappropriate utf-8.\n str(data)\n except UnicodeError:\n raise UnicodeError(err) # lint-amnesty, pylint: disable=raise-missing-from", "def _check_errors(self, json_loaded):\n\n content = json_loaded\n try:\n m = content[u'error'][u'message']\n c = content[u'error'][u'code']\n out= \"API Error code: {}\\nError message: {}\".format(c, m)\n raise InvalidQueryException(self.name, out)\n except KeyError:\n pass", "def xen_api_error(error):\n if type(error) == tuple:\n error = list(error)\n if type(error) != list:\n error = [error]\n if len(error) == 0:\n error = ['INTERNAL_ERROR', 'Empty list given to xen_api_error']\n\n return { \"Status\": \"Failure\",\n \"ErrorDescription\": [str(x) for x in error] }", "def get_data_type_error_text(field_name, field_value, type_name):\n\n\tmessage = ''\n\n\ttry:\n\t\tmessage = (\"Value '{0}' entered for '{1}' could not be parsed as a valid {2}\"\n\t\t\t\t .format(str(field_value),field_name,type_name))\n\texcept TypeError:\n\t\tmessage = (\"A value entered for '{0}' could not be read\".format(field_name))\n\n\treturn message", "def test_bad_value_type(self):\n\n print 'Patience, this may take 20 seconds'\n request = service.get_request('POST', {u'species': u'Nosuchtaxonia mistakea'})\n x = self.start_request_tests(request)\n self.assertTrue(x.status_code % 100 == 4, x.status_code)\n json.dump(x.to_dict(), sys.stdout, indent=2)\n # TBD: Change this to a *correct* check for message informativeness.\n m = x.json().get(u'message')\n self.assertTrue(u'species' in m, #informative?\n 'no \"species\" in \"%s\"' % m)", "def validate_response(response):\n\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n\n raise HTTPError(message)", "def is_string(value):\n return isinstance(value, basestring)", "def validate_input_string(self):\n invalid_string = \"\"\n try:\n for key in self.module.params:\n val = self.module.params[key]\n if key == \"description\" or key == \"snap_schedule_name\" \\\n or key == \"snap_schedule_id\":\n continue\n if isinstance(val, str) \\\n and val == invalid_string:\n errmsg = 'Invalid input parameter \"\" for {0}'.format(\n key)\n self.module.fail_json(msg=errmsg)\n\n except Exception as e:\n errormsg = \"Failed to validate the module param with \" \\\n \"error {0}\".format(str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)", "def _get_error_type(self):\n\n error_type = AladdinUserFaultType.Unknown\n if not self.error_msg:\n return error_type.value\n\n error_msg = self.error_msg.lower()\n if 'unrecognized' in error_msg:\n error_type = AladdinUserFaultType.UnrecognizedArguments\n elif 'expected one argument' in error_msg or 'expected at least one argument' in error_msg \\\n or 'value required' in error_msg:\n error_type = AladdinUserFaultType.ExpectedArgument\n elif 'misspelled' in error_msg:\n error_type = AladdinUserFaultType.UnknownSubcommand\n elif 'arguments are required' in error_msg or 'argument required' in error_msg:\n error_type = AladdinUserFaultType.MissingRequiredParameters\n if '_subcommand' in error_msg:\n error_type = AladdinUserFaultType.MissingRequiredSubcommand\n elif '_command_package' in error_msg:\n error_type = AladdinUserFaultType.UnableToParseCommandInput\n elif 'not found' in error_msg or 'could not be found' in error_msg \\\n or 'resource not found' in error_msg:\n error_type = AladdinUserFaultType.AzureResourceNotFound\n if 'storage_account' in error_msg or 'storage account' in error_msg:\n error_type = AladdinUserFaultType.StorageAccountNotFound\n elif 'resource_group' in error_msg or 'resource group' in error_msg:\n error_type = AladdinUserFaultType.ResourceGroupNotFound\n elif 'pattern' in error_msg or 'is not a valid value' in error_msg or 'invalid' in error_msg:\n error_type = AladdinUserFaultType.InvalidParameterValue\n if 'jmespath_type' in error_msg:\n error_type = AladdinUserFaultType.InvalidJMESPathQuery\n elif 'datetime_type' in error_msg:\n error_type = AladdinUserFaultType.InvalidDateTimeArgumentValue\n elif '--output' in error_msg:\n error_type = AladdinUserFaultType.InvalidOutputType\n elif 'resource_group' in error_msg:\n error_type = AladdinUserFaultType.InvalidResourceGroupName\n elif 'storage_account' in error_msg:\n error_type = AladdinUserFaultType.InvalidAccountName\n elif \"validation error\" in error_msg:\n error_type = AladdinUserFaultType.ValidationError\n\n return error_type.value", "def evaluate_response(response):\n if isinstance(response, Exception):\n raise type(response)(response)\n else:\n if response.status_code == 401:\n raise MyExceptions.WrongCredentials(\n 'The RPC credentials in the ' +\n 'settings.py file are incorrect\\n' +\n 'Fix it and try again'\n )\n try:\n error = response.json()['error']\n except (ValueError, KeyError):\n try:\n return response.json()['result']\n except (ValueError, KeyError):\n return\n else:\n raise MyExceptions.RpcError(error['message'])", "def parse_response(self, response, **kw):\n data = super().parse_response(response, **kw)\n error = data.get('error')\n if error is None:\n return data['result']\n else:\n # assume error object follows json-rpc 2.0 spec formatting\n self.handle_error(code=error['code'], msg=error['message'])", "def test_no_response(self):\n self.assertRaises(ValueError, parse_server_answer, {ERROR: 'Bad Request'})", "def test_raises_error_if_arg_not_string(self):\n def result():\n return encode_run_length([678])\n \n self.assertRaises(TypeError, result)", "def test_cli_format_error_handler_messages_broken():\n resp_val = \"\"\"\n{\n \"apiVersion\": \"v1.0\",\n \"status\": \"Failure\",\n \"metadata\": {},\n \"message\": \"Unauthenticated\",\n \"code\": \"401 Unauthorized\",\n \"details\": {\n \"messageList\": [\n { \"message\":\"Hello1\", \"error\": false },\n { \"error\": true },\n { \"message\":\"Hello3\" }\n ]\n },\n \"kind\": \"status\",\n \"reason\": \"Credentials are not established\"\n}\n\"\"\"\n resp = MagicMock()\n resp.json = MagicMock(return_value=json.loads(resp_val))\n output = format_utils.cli_format_error_handler(resp)\n assert \"Error: Unauthenticated\" in output\n assert \"Reason: Credentials are not established\" in output\n assert \"- Error: None\" in output\n assert \"- Info: Hello3\" in output", "def validateString(key, value, maxLenght):\n if value is None or isinstance(value, str) and len(value) <= maxLenght and len(value) > 0:\n return None\n else:\n return {'error': 'invalid value: %s (%s)' % (value, pythonTypeToJSONType(value))}", "def _msg(response):\n try:\n return response.json().get('message')\n except simplejson.scanner.JSONDecodeError:\n return response.text\n except Exception: # pylint: disable=W0703\n return 'Unexpected error.'", "def build_error_output():\n\n error_type, error_value, error_tb = sys.exc_info()\n\n alert_data = dict()\n alert_data['type'] = type(error_value).__name__\n alert_data['value'] = str(error_value)\n alert_data['host'] = platform.node()\n alert_data['os'] = platform.system()\n alert_data['traceback'] = traceback.format_list(traceback.extract_tb(error_tb))\n\n return alert_data", "def parse_response(self, response):\n try:\n response = json.loads(response)\n if 'error' in response:\n if 'message' in response['error']:\n raise self.CMoreError(response['error']['message'])\n elif 'description' in response['error']:\n raise self.CMoreError(response['error']['description'])\n elif 'code' in response['error']:\n raise self.CMoreError(response['error']['error'])\n\n except ValueError: # when response is not in json\n pass\n\n return response", "def check_response(self, xml_str, xml_name):\n\n if \"<ok/>\" not in xml_str:\n self.module.fail_json(msg='Error: %s failed.' % xml_name)", "def test_invalid_json(self):\n req = '{\"jsonrpc\": \"2.0\", \"method\": \"foobar, \"params\": \"bar\", \"baz]'\n resp = '{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32700, \"message\": \"ParseError: Parse error\"}, \"id\": null}'\n status = 500\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def parse_server_response(msg):\n if msg.startswith(\"'\"):\n print_debug(msg[5:-5])\n return msg[5:-5]\n else:\n print_debug(msg)\n return msg", "def test_cli_format_error_handler_no_messages():\n resp_val = \"\"\"\n{\n \"apiVersion\": \"v1.0\",\n \"status\": \"Failure\",\n \"metadata\": {},\n \"message\": \"Unauthenticated\",\n \"code\": \"401 Unauthorized\",\n \"details\": {},\n \"kind\": \"status\",\n \"reason\": \"Credentials are not established\"\n}\n\"\"\"\n resp = MagicMock()\n resp.json = MagicMock(return_value=json.loads(resp_val))\n output = format_utils.cli_format_error_handler(resp)\n assert \"Error: Unauthenticated\" in output\n assert \"Reason: Credentials are not established\" in output", "def test_pydantic_error_pickable():\n p = pickle.dumps(StrRegexError(pattern='pika'))\n error = pickle.loads(p)\n assert isinstance(error, StrRegexError)\n assert error.pattern == 'pika'", "def _get_error_text(self, result: dict) -> str:\n try:\n return result[self._FIELD_TEXT]\n except KeyError:\n return self._DEFAULT_ERROR_MSG", "def _convert(string, type, message):\n try:\n return type(string)\n except ValueError as e:\n print(e)\n raise CharmmPSFError('Could not convert %s' % message)", "def test_unexpectedType(self):\n self.assertRaises(TypeError, nativeString, 1)", "def test_output_writer_errors():\n with pytest.raises(TypeError):\n load_output_writer(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_output_writer({\"format\": \"invalid_driver\"})", "def _handle_response(response: requests.Response) -> typing.Union[dict, str]:\n check_status_code_and_raise_error(response)\n content_type = response.headers.get(CONTENT_TYPE_HEADER, None)\n if content_type is not None and content_type.lower().strip().startswith(JSON_CONTENT_TYPE):\n return response.json()\n else:\n return response.text", "def _raise_on_error(data: Union[list, dict]) -> None:\n if isinstance(data, list) and data:\n data = data[0]\n\n if isinstance(data, dict) and \"error\" in data:\n raise_error(data[\"error\"])", "def test_str_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(\n lambda: inner_test(param='{\"json\": \"Not allowed.\"}'), 3113\n )", "def test_error_string_not_list():\n\n log_file_path = \"mobile_testkit_tests/test_data/mock_panic_log.txt\"\n\n with pytest.raises(ValueError) as e:\n scan_logs.scan_for_errors('panic', log_file_path)\n\n error_message = str(e.value)\n assert error_message.startswith(\"'error_strings must be a list'\")", "def not_string_error(name, yml):\n\n yml = symlink_target(yml)\n output_1 = path(yml) + '\\n'\n output_2 = colored(' - Error: ', 'red')\n output_3 = colored(name, attrs=['bold'])\n output_4 = colored(' type should be ', 'red')\n output_5 = colored('str', 'yellow')\n return output_1 + output_2 + output_3 + output_4 + output_5", "def check_response(response):\n status = response.get('status')\n ret = status and status == 'OK'\n if not ret:\n logging.error('Received unexpected failure response from polyswarmd: %s', response)\n return ret", "def test_arg_astringInvalidLiteralSize(self):\n self.assertRaises(imap4.IllegalClientResponse,\n self.server.arg_astring, b'{[object Object]}')", "def test_interpret_response(input, expected_state, expected_last_error):\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n cmd.check_response_length(input)\r\n state, last_error = cmd.interpret_response(input)\r\n assert type(state) == type(expected_state)\r\n assert type(last_error) == type(expected_last_error)\r\n assert state == expected_state\r\n assert last_error == expected_last_error", "def ISERROR(value):\n return is_error(lazy_value_or_error(value))", "def invalid_command(response): # string -> interaction\r\n print(\"Sorry; '\" + response + \"' isn't a valid command. Please try again.\")", "def check_output_contains(context, text, err_msg):\n res = re.search(text, context.output.decode('utf-8'))\n if res is None:\n print(context.output.decode('utf-8'))\n raise Exception(err_msg)", "def err(error_dictionary):\n return {'error': error_dictionary['message']}, error_dictionary['code']", "def _str_validator(arg):\n if arg is None or arg is '' or type(arg) != str:\n raise ValueError('Incorrect value: input should be a string')", "def get_error_response(e):\n # Default\n response = \"Error: \" + str(e)\n\n if \"Method 'transfer_split' failed with RPC Error of unknown code -4\" in str(e): # Not enough spendable inputs\n response = \"Sorry, you do not have enough spendable balance. Wait a bit, or see [this guide](https://www.reddit.com/r/{botname}/wiki/index#wiki_why_is_all_my_monero_unconfirmed.3F_i_want_to_send_more_tips.21) for a solution.\"\n if \"of unknown code -3\" in str(e): # Node out of sync\n response += \"\\n\\n The tipbot node might be really out of sync. Checking on it soon; /u/OsrsNeedsF2P...\"\n if \"not enough money\" in str(e) or \"tx not possible\" in str(e): # Can't afford fee\n response += \"\\n\\n You do not have a high enough balance to cover the network fee. If you would like to manually withdraw the rest of your balance (<1 cent), you can try to by extracting your private key\"\n if \"per_subaddress\" in str(e): # No balance, and it tried to run sweep_all\n response += \"\\n\\n You do not have any balance! Try filling some up by clicking \\\"Get Started\\\".\"\n\n return response", "def _is_valid_result(result):\n return result.get(\"version\", \"\") != \"\"", "def error(msg):\n ssw(\"Content-type: text/plain\\n\\n\")\n ssw(msg)\n sys.exit(0)", "def test_no_response(self):\r\n self.assertRaises(ValueError, unpack_answ, {ERROR: 'Bad Request'})", "def _process_input(data, context):\n if context.request_content_type == 'application/json':\n data = data.read().decode(\"utf-8\")\n return data if len(data) else ''\n raise ValueError('{{\"error\": \"unsupported content type {}\"}}'.format(\n context.request_content_type or \"unknown\"\n ))", "def errReceived(self, data):\n log.msg(\"Error output from process: \" + data,\n isError=True)", "def has_error(self, response):\n return response.find(' Matched') == -1 and response.find(' Failed') == -1", "def check_response_errors(self, resp):\n return True" ]
[ "0.67002916", "0.6622685", "0.65426445", "0.64297044", "0.6372873", "0.62947875", "0.61671525", "0.604404", "0.6020194", "0.59307563", "0.590339", "0.5883814", "0.5872144", "0.58716655", "0.58712596", "0.5850643", "0.5848179", "0.5827672", "0.58271694", "0.58263636", "0.582624", "0.5825286", "0.582169", "0.58004546", "0.57876956", "0.5765075", "0.5760208", "0.5725204", "0.5721433", "0.5686074", "0.56744236", "0.5622478", "0.56079805", "0.55877787", "0.55873764", "0.5585368", "0.5579924", "0.557924", "0.55739963", "0.5557945", "0.5554689", "0.55544245", "0.55533564", "0.5546875", "0.5530199", "0.55218107", "0.5517618", "0.5516449", "0.5496971", "0.5492584", "0.54845774", "0.5480555", "0.54792535", "0.5467135", "0.5465127", "0.5457194", "0.5443996", "0.54363585", "0.54343665", "0.5434274", "0.5432151", "0.54297215", "0.5421205", "0.54114324", "0.5406025", "0.5405223", "0.53993946", "0.5380967", "0.5379144", "0.5379031", "0.53732413", "0.5358754", "0.53582567", "0.5345608", "0.534432", "0.534261", "0.5340289", "0.5338658", "0.53343207", "0.5326523", "0.53245044", "0.53186345", "0.5314589", "0.53121424", "0.531172", "0.5311275", "0.5305144", "0.5304381", "0.53034663", "0.5303328", "0.53028554", "0.5290781", "0.5289044", "0.5279238", "0.52784306", "0.52701217", "0.5267181", "0.5266213", "0.5262985", "0.52624536" ]
0.68892103
0
Extract filename and extension from filepath
def get_file_name(filepath): # need pytest filename, extension = os.path.splitext(filepath.split('/')[-1]) return filename, extension
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_ext(path):\n result = os.path.splitext(path)[1]\n return result", "def get_file_ext(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[1]", "def get_file_name_with_ext(path: str) -> str:\n return os.path.basename(path)", "def _get_ext(self, path):\n return os.path.splitext(path)[1][1:]", "def _getFileExtension( filepath ):\r\n file = os.path.splitext(filepath.lower())\r\n if len( file ):\r\n return file[1].replace( '.', '' )\r\n else:\r\n return filepath", "def get_fileext(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[1]", "def getExtension(filename):\n return filename[filename.rfind('.'):]", "def get_file_ext(filename):\n return filename.rsplit('.', 1)[1]", "def extract_file_extension(url_file):\n pattern = re.split(\"\\.\",url_file)\n return pattern[-1]", "def filePathToFileName(path):\n return os.path.splitext(os.path.basename(path))[0]", "def get_extension(filename: str) -> str:\n return Path(filename).suffix[1:]", "def filepath_ext(filepath: str) -> str:\n return pathlib.Path(filepath).suffix", "def filename_from_path(filepath: str) -> str:\n return filepath.split(\"/\")[-1]", "def get_file_extension(fpath):\n return str(fpath).split(\".\")[-1]", "def filename_ext(filename):\n base = os.path.basename(filename)\n return os.path.splitext(base)[1][1:]", "def getFileName(filepath):\n return os.path.splitext(os.path.basename(filepath))[0]", "def getFilenameExtension(path):\n return os.path.splitext(os.path.normcase(path))[1]", "def filepath_name_ext(filepath: str) -> str:\n return pathlib.Path(filepath).name", "def get_extension(filename: str) -> str:\n return filename.split(\".\")[-1]", "def get_file_ext(filename):\n p = pathlib.Path(filename)\n if len(p.suffixes) <= 1:\n return p.suffix\n elif p.suffixes[-2] in include_subexts:\n return ''.join(p.suffixes[-2:])\n else:\n return p.suffix", "def extension_from_path(path):\n extension = path.split(\".\")[-1]\n return extension", "def get_extension_from_filename(filename):\n return filename[-4:]", "def fextension(filename):\n filename = os.path.normpath(filename)\n return os.path.splitext(filename)[1]", "def getfilename(path):\r\n return path.split('\\\\').pop().split('/').pop().rsplit('.', 1)[0]", "def get_file_extension(filename):\n if not filename:\n return \"\"\n\n dotpos = filename.rfind(\".\")\n return filename[dotpos + 1:].lower() if dotpos != -1 else \"\"", "def extract_file_name(file_path):\n # ファイルパスからファイル名(拡張子含む)を取り出す\n file_name = file_path.split('/')[-1]\n # 拡張子を取り除く\n return file_name.split('.')[0]", "def get_file_name(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[0]", "def splitpath_root_file_ext(path):\r\n head, tail = os.path.split(path)\r\n filename, ext = os.path.splitext(tail)\r\n return head, filename, ext", "def _get_extension_from_string(path):\n file_name_parts = os.path.basename(path).split('.')\n if len(file_name_parts) == 1: # no periods in file name\n return ''\n if len(file_name_parts) > 2: # two or more periods in file name\n return '.'.join(file_name_parts[-2:])\n return file_name_parts[-1] # one period in file name", "def split_ext(filepath):\n\t(fn, ext) = os.path.splitext(filepath)\n\tif ext=='.gz':\n\t\t(fn, ext) = os.path.splitext(fn)\n\t\text += '.gz'\n\treturn (fn, ext)", "def find_file_extention(file_name):\n \n index = file_name.rfind(\".\")\n ext = file_name[index:].lower()\n \n return ext", "def get_filename(path):\n return path.split('/')[-1]", "def get_file_name(filepath: str) -> str:\n\n f = os.path.basename(filepath)\n filename, _ = os.path.splitext(f)\n\n return filename", "def get_file_extention(file_path):\n # http://domain.tld/foo.bar -> foo.bar\n filename = os.path.basename(file_path)# Make sure we don't include domain names\n # foo.bar -> bar\n # foo.bar?baz -> bar\n # foobar/baz -> None\n # foobar/baz?fizz -> None\n file_extention_regex = \"\"\"\\.([a-zA-Z0-9]+)[?]?\"\"\"\n file_extention_search = re.search(file_extention_regex, filename, re.IGNORECASE)\n if file_extention_search:\n file_extention = file_extention_search.group(1)\n return file_extention", "def get_file_extension(file_path):\n _ext = os.path.splitext(file_path)[-1]\n if _ext:\n return _ext[1:] if _ext.startswith('.') else _ext\n else:\n return \"\"", "def get_fname_ext(fname):\n splitext = os.path.splitext(fname)\n\n return splitext[-1]", "def getExtension(path):\n\tfrom os.path import splitext\n\treturn splitext(path)[1]", "def get_filename_extension(filename):\n m = FILENAME_EXTENSION_RE.search(filename)\n return m.group(1) if m else None", "def splitext_no_dot(filename):\n name, ext = os.path.splitext(filename)\n ext.strip('.')\n return name, ext.strip('.')", "def splitext(path):\n base, ext = posixpath.splitext(path)\n if base.lower().endswith('.tar'):\n ext = base[-4:] + ext\n base = base[:-4]\n return base, ext", "def get_file_extension(fname):\n _, ext = os.path.splitext(fname)\n return ext", "def file_name(path):\n return os.path.basename(path).split('.')[0]", "def file_parts(file_path):\n\n base_path, tail = os.path.split(file_path)\n name, ext = os.path.splitext(tail)\n\n return base_path, name, ext", "def get_filename(filepath):\n # Get only the name of the file\n filename_ext = os.path.basename(filepath)\n # Get the name of the file without the extnesion\n filename = os.path.splitext(filename_ext)[0]\n\n return filename", "def strip_extension(filename):\n return filename.rsplit('.',1)[-2]", "def _extr_ext(p: str) -> str:\n file_name = os.path.basename(p)\n _, ext = os.path.splitext(file_name)\n return ext", "def get_filename(filepath):\n return os.path.basename(filepath)", "def filename_strip_ext(filename):\n base = os.path.basename(filename)\n # Strip file extension\n return os.path.splitext(base)[0]", "def ext(self):\n return os.path.splitext(self.path)[1]", "def get_filename(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[0]", "def get_file_ext(path):\n # Test if the path exist and if it is a file (not a directory):\n if not os.path.isfile(path):\n # If it is a directory, check if it is a mff directory\n if path[-3:].lower() != \"mff\":\n raise ValueError(\"The provided path (\" + path + \") is not valid.\")\n # Find file extension :\n file, ext = os.path.splitext(path)\n # Be sure to be in lowercase :\n ext = ext.lower()\n return file, ext", "def parse_file_path(file_path):\n base = Path(file_path)\n return str(base.parents[0]), str(base.stem), str(base.suffix)", "def get_ext(f_name):\n \n for i in range(len(f_name)-1,-1,-1):\n if f_name[i]=='.':\n return f_name[i:]\n return None", "def get_file_ext(self):\n return self.archive.url.split('.')[-1].lower()", "def path_show_ext(fullpath):\n tmp = os.path.splitext(fullpath)\n ext = tmp[1]\n p = tmp[0]\n while tmp[1] != '':\n tmp = os.path.splitext(p)\n ext = tmp[1] + ext\n p = tmp[0]\n\n path = os.path.dirname(p)\n if path == '':\n path = '.'\n base = os.path.basename(p)\n return path, base, ext", "def find_extension(file):\n\n index_ext = file.name.rfind('.')\n if index_ext != -1:\n return file.name[index_ext+1:]\n # else: we raise an exception because\n # we can't find any extension", "def splitext( filename ):\n index = filename.find('.')\n if index == 0:\n index = 1+filename[1:].find('.')\n if index == -1:\n return filename, ''\n return filename[:index], filename[index:]\n return os.path.splitext(filename)", "def get_extention(full_path: str) -> str:\n file_extention_obj = re.search(\n FILE_EXT_RE_PATTERN,\n full_path,\n flags=re.IGNORECASE,\n )\n if file_extention_obj is None:\n file_extention = '.html'\n else:\n file_extention = file_extention_obj.group(0)\n return file_extention", "def get_file_name(file_path):\n full_file_name = file_path.split(os.sep)[-1]\n file_name = full_file_name.split(\".\")[0]\n return file_name", "def path_extension(self):\r\n return self.path.split('/')[-1].split('.')[-1]", "def get_ext(url):\r\n root, ext = splitext(url)\r\n return ext", "def extension(self):\n return os.path.splitext(self.fname)[1]", "def basename(file_path: str, extension: bool = False):\n file_name = os.path.basename(file_path)\n if not extension:\n file_name, *_ = file_name.split(\".\")\n return file_name", "def getFilename(path):\n\tfrom os.path import split\n\tpath = normalizePath(path)\n\treturn split(path)[1]", "def get_filename(file_extension=False) -> str:\n if file_extension == False:\n return filename[0:filename.find(\".json\")]\n else:\n return filename", "def _get_file_name(url: str) -> str:\n url = url.strip('/')\n result = findall(r'/(\\w+\\.\\w+)[?|$]', url)\n if result:\n return result[-1]\n return url.split('/')[-1]", "def parse_path(file_path: str) -> str:\n file_path.replace(\" \", \"\")\n if file_path.count('.') != 1:\n print(\"1: File path is incorrect. Must be only one dot.\")\n return ''\n head, tail = os.path.split(file_path)\n if len(tail) == 0:\n print(\"1: File name no exist\")\n return ''\n\n file_name, file_ext = os.path.splitext(tail)\n if len(file_name) == 0:\n print(\"1: File name not found\")\n return ''\n save_path = head + '/' + file_name\n return save_path", "def filename_split(path):\n\tdirectory = os.path.dirname(path)\n\tfilename, extension = os.path.splitext(os.path.basename(path))\n\treturn directory, filename, extension", "def split_file_name(file_path):\n file_name = os.path.splitext(file_path)[0]\n file_name = os.path.split(file_name)[1]\n\n return file_name", "def filepath_name_only(filepath: str) -> str:\n return pathlib.Path(filepath).stem", "def get_filename(img_path):\n filename = os.path.splitext(img_path)\n return os.path.basename(filename[0])", "def filepath_without_ext(filepath: str) -> str:\n return str(pathlib.Path(filepath).with_suffix(''))", "def get_ext(url):\n\n path = urlparse(url).path\n ext = splitext(path)[1]\n return ext", "def find_specific_file_extension_in_dir(dir_path, extension):\r\n return glob.glob(os.path.join(dir_path, r'*{}'.format(extension)))[0].replace('\\\\', '/').split('/')[-1]", "def path_filename_representation(path):\n # Strip leading / and replace / with .\n return re.sub(r\"^/(.*)$\", r\"\\1\", path).replace(\"/\", \".\")", "def split_path(path):\n #drop file extension\n filename = path.rsplit('.', 1)[0]\n #drop static/img/\n filename = filename[11:]\n return filename", "def get_file_extension(self, filename):\n if not isinstance(filename, str):\n return None\n\n # Get extension with `.`\n extension = pathlib.Path(filename).suffix\n\n # If empty string return None\n if not extension:\n return None\n else:\n return extension.replace('.', '')", "def get_file_name(file):\n return os.path.splitext(os.path.basename(file))[0]", "def get_fname(a_file):\r\n fname, fext = os.path.splitext(a_file)\r\n return os.path.basename(fname)", "def get_fname(a_file):\r\n fname, fext = os.path.splitext(a_file)\r\n return os.path.basename(fname)", "def extract_description(path):\n return os.path.splitext(os.path.basename(path))[0]", "def guess_ext_from_url(self):\n # http://docs.python.org/2.7/library/urlparse.html#urlparse.urlsplit\n # 0:scheme,1:netloc,2:path,3:query\n url_path = urlparse.urlsplit(self.what_url)[2]\n # path index is 2,Hierarchical path,may be empty string\n if '' == url_path:\n self.save_file_ext = None\n else:\n # 0: root 1: .ext\n file_name_info = os.path.splitext(url_path)\n # '.exe', contain .\n self.save_file_ext = file_name_info[1]", "def extract_filename(str):\n regex = r\"([0-9_-]+).jpg\"\n matches = re.search(regex, str)\n if matches:\n return matches.group(1)", "def getExtension(self, fileName):\r\n print 'getExtension():' \r\n extension = split(basename(fileName), '.')[1]\r\n return extension", "def extension(self):\n #type: ()->Text\n return os.path.splitext(os.path.basename(self.fileName))[1]", "def get_file_name(path):\n return os.path.basename(path)", "def get_extension(file: Union[str, FileStorage]) -> str:\n filename = _retrieve_filename(file)\n return os.path.splitext(filename)[1]", "def ext(self):\n import os, string\n (base,ext) = os.path.splitext(self.filename())\n if ext == '':\n return None\n else:\n return string.lstrip(ext, '\\.')", "def filename_from(url):\n filename = url.split('/')[-1]\n return filename", "def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname", "def _getFileName(self, filePath):\r\n\t\thead, tail = ntpath.split(filePath)\r\n\t\treturn tail or ntpath.basename(head)", "def get_file_name_with_extension(file_name: str):\n return file_name + '.txt'", "def ambil_ext_file(filename):\n '''mengambil ekstensi file, return String ekstensi file'''\n ext = os.path.splitext(filename)\n return ext[1]", "def split_ext(filename):\n parts = filename.split(\".\")\n if len(parts) == 1:\n return filename, \"\"\n\n tail = list(dropwhile(lambda x: len(x) < 5,\n reversed(parts[1:])))\n\n file_parts = parts[:1] + tail[::-1]\n ext_parts = parts[1+len(tail):]\n return \".\".join(file_parts), \".\" + \".\".join(ext_parts)", "def get_name_from_file(filename):\n return filename.split(\".\")[0]", "def parse_filename(url):\n # extract the URL path\n url_path = urlparse.urlparse(url).path\n filename = url_path.split('/')[-1]\n\n # make loose assumption the file name is for an HTML page\n if len(filename) < 1:\n filename = 'index.html'\n\n return filename", "def get_file_extension(filename):\n # Find the first match from the list of supported file extensions\n extension = next((ext for ext in EXT_LST if filename.lower().endswith(ext)), None)\n return extension", "def changeFilenameExtension(path, extension):\n return os.path.splitext(path)[0] + extension", "def _splitzipext(self, filename):\n\n if self._iszip(filename):\n return os.path.splitext(filename)\n else:\n return filename, None", "def just_the_name(path):\n return os.path.splitext(os.path.basename(path))[0]" ]
[ "0.80850315", "0.7980222", "0.7961075", "0.79087883", "0.7875789", "0.7851262", "0.7824376", "0.7815243", "0.7754425", "0.7731678", "0.7668338", "0.7649159", "0.76470023", "0.7626894", "0.7625784", "0.76075673", "0.7575197", "0.7550891", "0.75276417", "0.7518576", "0.74935955", "0.7448077", "0.74469334", "0.7444954", "0.74412894", "0.74397844", "0.7434998", "0.742482", "0.7411595", "0.7399563", "0.737115", "0.7349677", "0.73170173", "0.7316183", "0.7295645", "0.7291051", "0.72878516", "0.72443163", "0.7232328", "0.71825707", "0.71747077", "0.71660155", "0.71459365", "0.7130371", "0.71212184", "0.7105588", "0.7100467", "0.71003675", "0.70923805", "0.7091393", "0.7075686", "0.7063273", "0.7044947", "0.70427585", "0.70394707", "0.7036835", "0.70022494", "0.6996575", "0.6984648", "0.69830143", "0.6975961", "0.6961779", "0.6941055", "0.6916498", "0.69040394", "0.68851614", "0.6850023", "0.6835943", "0.6813081", "0.6796724", "0.6788144", "0.6775965", "0.6770598", "0.67688286", "0.6746301", "0.67443234", "0.6740689", "0.67386514", "0.67204756", "0.67204756", "0.6720212", "0.6697405", "0.6692231", "0.6681647", "0.66788673", "0.66682804", "0.66680914", "0.6667423", "0.6664412", "0.6662342", "0.66594887", "0.66567737", "0.66435266", "0.6641409", "0.6624436", "0.66139704", "0.6611562", "0.6611134", "0.6596401", "0.65762484" ]
0.79995126
1
Check if multiple or single file is chosen.
def check_multi_single(filenames): num = len(filenames) if num == 1: single = bool(1) else: single = bool(0) return single
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_multi_file(self):\n return 'files' in self.torrent['info']", "def select_files():\n\n if not Settings.is_prompt(): return [File.get_random_file()]\n category = Settings.select_category()\n if not category: return File.select_file_upload_method()\n # if not Settings.confirm(category): return File.select_files()\n Settings.print(\"Select Files or a Folder\")\n files = []\n while True:\n file = File.select_file(category)\n if not file: break\n ##\n if \"performer\" in str(category):\n cat = Settings.select_category([cat for cat in Settings.get_categories() if \"performer\" not in cat])\n performerName = file.get_title()\n file = File.select_file(cat, performer=performerName)\n if not file: break\n setattr(file, \"performer\", performerName)\n files.append(file)\n if \"galler\" in str(cat) or \"video\" in str(cat): break\n ##\n files.append(file)\n if \"galler\" in str(category) or \"video\" in str(category): break\n if str(files[0]) == \"unset\": return files\n if not Settings.confirm([file.get_title() for file in files]): return File.select_files()\n return files", "def choose_file(self):\n pass", "def select_files(self):\n pass", "def select_file_upload_method():\n\n if not Settings.prompt(\"upload files\"): \n return \"unset\"\n Settings.print(\"Select an upload source\")\n sources = Settings.get_source_options()\n question = {\n 'type': 'list',\n 'name': 'upload',\n 'message': 'Upload:',\n 'choices': [src.title() for src in sources]\n }\n upload = PyInquirer.prompt(question)[\"upload\"]\n\n\n # everything after this part should be in another function\n # this should just return the string of the upload source\n\n\n if str(upload) == \"Local\":\n return File.select_files()\n elif str(upload) == \"Google\":\n return Google_File.select_files()\n # elif str(upload) == \"Dropbox\":\n # return Dropbox.select_files()\n elif str(upload) == \"Remote\":\n return Remote.select_files()\n return File.select_files()", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def SearchFileType(ext, message0 = \"\", message1 = \"\", message2 = \"\"):\n extList = glob.glob('*'+ext)\n ChooseNumOption(extList, \"file\", ext, message0, message1, message2, True)", "def _filename_multi(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilenames(**self._kwargs)", "def OpenAnyFiles():\n \n wildcard = create_wildcard(\"All files\", ['*', '*.*'])\n\n files = []\n dlg = wx.FileDialog(None, \n \"Select file(s)\", \n paths.samples, \n \"\", \n wildcard, \n wx.FD_OPEN | wx.MULTIPLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n files = dlg.GetPaths()\n\n dlg.Destroy()\n return files", "def select_file() -> True:\n current_directory = os.getcwd()\n selected_file = eg.fileopenbox(title=EG_TITLE+': Open a file',\n default=os.path.join(current_directory, \"..\"),\n filetypes=\"*.txt,*.py\")\n print(f\"Selected file: {os.path.basename(selected_file)}\")\n print(f\"In directory: {os.path.dirname(selected_file)}\")\n return True", "def is_multiple(self):\n return self.browser.get_attribute(\"multiple\", self) is not None", "def get_file_type_options():\n curr_type = global_settings.settings[menu_option]\n msg = \"\"\n for i in range(len(file_types)):\n if curr_type == file_types[i]:\n msg += str(i) + \" \" + file_types[i] + \" SELECTED\\n\"\n else:\n msg += str(i) + \" \" + file_types[i] + \"\\n\"\n return msg", "def FileDialog( message, wildcard, style, defaultDir=os.getcwd(), defaultFile='' ):\n dlg = wx.FileDialog( wx.GetApp().GetTopWindow(), message, defaultDir, defaultFile, wildcard, style )\n if dlg.ShowModal() == wx.ID_OK:\n if style & wx.MULTIPLE:\n result = dlg.GetPaths()\n else:\n result = dlg.GetPath()\n else:\n result = False\n dlg.Destroy()\n \n return result", "def get_file(path, extension, override=False):\r\n\r\n ext_lst = []\r\n for f_name in os.listdir(path):\r\n if f_name.endswith(extension):\r\n ext_lst.append(f_name)\r\n\r\n if len(ext_lst) == 0:\r\n return ext_lst\r\n\r\n elif len(ext_lst) == 1:\r\n return ext_lst[0]\r\n\r\n else:\r\n\r\n if override:\r\n return True\r\n\r\n else:\r\n while True:\r\n files = [file for file in ext_lst]\r\n file_choice = input(\"\\nMore than one {} file exists in {}.\\n\"\r\n \"Files available: {}\\n\"\r\n \"Please input the file name that you want. \".format(extension, path, files))\r\n if file_choice in ext_lst:\r\n return file_choice\r\n else:\r\n print(\"File does not exist.\")", "def _selectInput(self):\n\n (my_file, my_path) = misc.get_file(FilterSpec='*.wav', \n DialogTitle='Select sound-input:', \n DefaultName='')\n if my_path == 0:\n print('No file selected')\n return 0\n else:\n full_in_file = os.path.join(my_path, my_file)\n print('Selection: ' + full_in_file)\n return full_in_file", "def supportedType(request, video_types):\n return request.FILES['file'].content_type in video_types.keys()", "def test_multiple_file_types(self):\n\n support.create_project(self, 'candy')\n support.add_step(self)\n support.add_step(self, name='.md', position='0')\n\n project = cauldron.project.get_internal_project()\n steps = project.steps\n\n self.assertTrue(steps[0].filename.startswith('S01'))\n self.assertTrue(steps[1].filename.startswith('S02'))", "def select_files(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(self.parent,\n \"File Export\",\n os.path.expanduser('~/'),\n \"Ensemble Files (*.ens, *.bin);;Binary Files (*.bin);;All Files (*)\",\n options=options)\n if files:\n # Store the list of results\n self.selected_files = files\n\n # Analyze the files\n self.analyze_files()", "def validFiles(self, files):\n for myfile in files:\n if not ( ( myfile.get_uri_scheme() == 'file' ) or \\\n ( myfile.get_uri_scheme() == 'smb' ) ):\n return False\n elif ( not myfile.get_mime_type() in self.oootypes ) and \\\n ( not myfile.get_mime_type() in self.plaintypes ):\n return False\n return True", "def selectfile (f_name):\n global file_name\n file_name = \"\"\n for i in f_name:\n file_name = file_name + i\n try:\n file = open(file_name,\"r\")\n dictionnary = False\n print(\"Now using {0} as base file\".format(file_name))\n file.close()\n except:\n print(\"Are you kidding me? That file doesn't even exist, could you please try again?\")\n return", "def select(files, file_type):\n k = 0\n print('== ' + file_type + ' List ==')\n for file in files:\n print(\"[\" + str(k) + \"]. \" + file)\n k += 1\n print('Select a ' + file_type + ' to continue')\n idx = int(input())\n return files[idx]", "def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])", "def askOpen(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN):\r\n defaultDir,defaultFile = [GPath(x).s for x in (defaultDir,defaultFile)]\r\n dialog = wx.FileDialog(parent,title,defaultDir,defaultFile,wildcard, style )\r\n if dialog.ShowModal() != wx.ID_OK: \r\n result = False\r\n elif style & wx.MULTIPLE:\r\n result = map(GPath,dialog.GetPaths())\r\n else:\r\n result = GPath(dialog.GetPath())\r\n dialog.Destroy()\r\n return result", "def check_input():\n\n #If the length of the input is different from 2 return False\n if len(sys.argv) != 2:\n return False\n\n else:\n\n #If the input does not contain the string \".mp3\" or\n #Its length is less than 4 return False \n if \".mp3\" not in sys.argv[1] or len(sys.argv[1]) <= 4:\n return False\n\n for mp3_filename in os.listdir(MP3_FOLDER):\n\n #Return True if the input is found in the MP3_FOLDER\n if mp3_filename == sys.argv[1]:\n return True\n\n\n #If it is not in the mp3 folder return False\n return False", "def has_preset(self, filename):\r\n\r\n return filename in self.preset_ids()", "def default_filter(files):\n\n if '1.mkv' in files and '2.mkv' in files and 'Labels.json' in files:\n return True\n\n return False", "def is_accept_type(file_name):\n bare_name, file_extension = os.path.splitext(file_name)\n for ext in ACCEPTED_FILES:\n if file_extension.lower() == ext:\n return True\n return False", "def test_multiple_file_types_many(self):\n\n support.create_project(self, 'candy')\n support.add_step(self)\n support.add_step(self)\n support.add_step(self)\n support.add_step(self, name='.md', position='0')\n\n project = cauldron.project.get_internal_project()\n steps = project.steps\n\n self.assertTrue(steps[0].filename.startswith('S01'))\n self.assertTrue(steps[1].filename.startswith('S02'))\n self.assertTrue(steps[2].filename.startswith('S03'))\n self.assertTrue(steps[3].filename.startswith('S04'))", "def choosefile():\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**options)\r\n #print filename, '*****'\r\n\r\n # open file on your own\r\n if filename:\r\n #return open(filename, 'r')\r\n tasks.upload_chosen = filename", "def supported():\n return os.path.isfile(OPENCOR)", "def selectFiles(self):\n\n filenames = []\n self.fileIDs = \"\"\n self.caseIDs = \"\" # clears any case selections\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from source\")\n result = cur.fetchall()\n for row in result:\n filenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n self.fileIDs += \",\" + str(row[0])\n if len(self.fileIDs) > 0:\n self.fileIDs = self.fileIDs[1:]\n\n Dialog_selectfile = QtGui.QDialog()\n ui = Ui_Dialog_selectfile(filenames)\n ui.setupUi(Dialog_selectfile, \"Select file(s) to view\", \"many\")\n ok = Dialog_selectfile.exec_()\n if ok:\n tmp_IDs = \"\"\n selectedFiles = ui.getSelected() # list of dictionaries\n for row in selectedFiles:\n tmp_IDs += \",\" + str(row['id'])\n if len(tmp_IDs) > 0:\n self.fileIDs = tmp_IDs[1:]", "def askFilename():\n# print(\"\\nDo you have the file already?\"+\n# \"\\nYes - proceed\\t\\t No - go back to main menu\")\n# choice = input(\"(Y/N) \")\n# if choice.upper() == \"N\":\n# filename = None\n# elif choice.upper() == \"Y\": \n print(\"\\nInsert file name (without the filetype)\")\n print(\"(PRESS CTRL+C IF THERE IS NO FILE YET!!)\")\n fileOpt = input(\"or press enter if saved on default name: \") \n if fileOpt != \"\":\n filename = fileOpt+\".txt\"\n else:\n print(\"\\n\\nFinding file...\")\n print(\"\\n\\nWhich party is it for?\")\n print(\"A. Labor\\t\\t B. Liberal\")\n partyOpt = input(\"Selected party is (A/B): \")\n list1 = [\"A\", \"B\"]\n while partyOpt.upper() not in list1:\n partyOpt = input(\"Selected party is (A/B): \")\n marginOpt = input(\"\\nWhat was the margin used? (enter as int) \")\n if partyOpt.upper() == \"A\":\n filename = \"LaborParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n elif partyOpt.upper() == \"B\":\n filename = \"LiberalParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n return filename", "def submit(self, event=None):\n\n if self.select_dirs == False:\n for item in self.dialog_selection:\n if isdir(item):\n messagebox.showwarning(\n \"Error - Invalid Selection\",\n \"Unable to select directory. Please select a file(s).\"\n )\n return\n\n if self.select_files == False:\n for item in self.dialog_selection:\n if isfile(item):\n messagebox.showwarning(\n \"Error - Invalid Selection\",\n \"Unable to select file. Please select a folder(s)\"\n )\n return\n \n self.dialog.destroy()", "def get_availability(self):\n return [self.to_fname(type).is_file() for type in (\"pdf\", \"si\")]", "def _open_files(self):\n file_names = filedialog.askopenfilenames(initialdir=self.current_directory, title = \"Select file\")\n if(file_names): self.current_directory = os.path.dirname(file_names[0])\n if(len(file_names) == 1):\n file_names = file_names[0]\n return file_names", "def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False", "def is_file(field):\n return isinstance(field.field.widget, forms.FileInput)", "def askOpenMulti(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN|wx.MULTIPLE):\r\n return askOpen(parent,title,defaultDir,defaultFile,wildcard,style )", "def check_file(self):\n\n # File manipulation status\n status = {}\n\n # check if the post request has the file part\n if 'datasource' not in self.request.files:\n status['error'] = 'No file part'\n return False, status\n\n file = request.files['datasource']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n #flash('No selected file')\n #return redirect(request.url)\n status['error'] = 'No selected file'\n return False, status\n\n # Get filename\n # Save to local hardrive\n filename = secure_filename(file.filename)\n # file.save(os.path.join(self.kwargs['UPLOAD_FOLDER'], filename))\n is_saved, error = self.save_file(self.kwargs['UPLOAD_FOLDER'], filename, file)\n\n if is_saved:\n # Return filename\n status['filename'] = filename\n return True, status\n else:\n\n # Return error if something wrong\n status['error'] = error\n return False, status", "def open(self):\n file = askopenfilename(\n initialdir=self.initial_directory,\n filetypes=(\n (\"Audio Video Interleave\", \"*.avi\"),\n (\"Matroska\", \"*.mkv\"),(\"MPEG-4 AVC\",\"*.mp4\"),\n )\n )\n if isinstance(file, tuple):\n return\n if os.path.isfile(file):\n self.play_film(file)", "def check_input(self):\n try:\n if(self.datatype == \"eeg\"):\n self.model.set_datatype(self.datatype)\n self.model.set_dyad(self.dyad)\n self.model.set_channel(self.channel_or_video)#causes loading of data\n elif(self.datatype == \"motion\"):\n self.model.set_datatype(self.datatype)\n self.model.set_filepath(self.database.dictionary[str(self.dyad)][\"video\"][str(self.channel_or_video)][\"motion\"][\"in_roi\"][\"1\"][\"path\"])#TODO NOT ALWAYS 1\n self.model.set_channel(self.channel_or_video)\n else:\n QMessageBox.about(self, \"Incorrect selection\", \"Choose datatype\")\n self.accept()\n except KeyError as e:\n QMessageBox.about(self, \"Incorrect selection\", \"Please choose wisely\" + str(e))", "def is_file(self):\n return self.tipo == 'file' or self.tipo is None", "def select_files():\n root = Tk()\n root.withdraw()\n root.wm_attributes('-topmost', 1)\n files = askopenfilenames(parent=root,\n title=\"Select file\",\n filetypes=((\"Image files\", '*' + ';*'.join(supported_extensions)), (\"all files\", \"*.*\"))\n )\n return root.tk.splitlist(files)", "def _accept_for_flag (self, filename):\n\t\troot, ext = os.path.splitext(filename)\n\t\tif not ext:\n\t\t\treturn 1\n\t\telse:\n\t\t\tbinary_extensions = ['.jpg', '.gif', '.png', '.jar' ]\n\t\t\treturn ext not in ['.bak', '.off','.old', '.works', '.clean', '.obs', '.log', '.db'] + binary_extensions", "def on_file_entry_changed(self, *args):\n name = self.fileEntry.get_text()\n if name == \"\":\n self.okButton.set_sensitive(False)\n else:\n self.okButton.set_sensitive(True)", "def file_allowed(self):\n if self._allowed_ext:\n if self.get_ext() not in self._allowed_ext:\n return False\n \n return True", "def SupportedFiletypes( self ):\n return ['plaintex', 'tex']", "def SupportedFiletypes( self ):\n return ['plaintex', 'tex']", "def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True", "def has_file(self, name):\n return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')", "def select_file(cls):\n note = ''\n if module_exists('Tkinter'):\n from Tkinter import Tk\n import tkFileDialog\n root = Tk()\n root.filename = tkFileDialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if module_exists('tkinter'):\n import tkinter\n from tkinter import filedialog\n root = tkinter.Tk()\n root.filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if not root.filename:\n root.destroy()\n cancel_input = yes_no(\"Would you like to add a release note?\")\n if cancel_input is True:\n cls.select_input()\n else:\n return False\n else:\n with open(root.filename, 'r') as file_notes:\n lines = file_notes.readlines()\n for line in lines:\n note += line\n if note == '':\n note = False\n return note", "def test_invalid_prompt_files(self):\n invalid_files = [\n \"p01.txt\",\n \"abc.txt\",\n \"p000001s000001.mp3\",\n \"p000001s000001n001.txt\",\n \"u000001.txt\"\n ]\n filtered_files = list(filter(format.is_prompt_file, invalid_files))\n\n assert len(filtered_files) == 0", "def checkForFile(self, filename:str):\n\t\tfor item in os.listdir(self.getPath()):\n\t\t\tif filename in item:\n\t\t\t\treturn True\n\t\treturn False", "def test_general_subset_file_type():\n pass", "def select_file(cls):\n cls.init()\n note = ''\n if module_exists('Tkinter'):\n from Tkinter import Tk\n import tkFileDialog\n root = Tk()\n root.filename = tkFileDialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if module_exists('tkinter'):\n import tkinter\n from tkinter import filedialog\n root = tkinter.Tk()\n root.filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n if not root.filename:\n root.destroy()\n cancel_input = yes_no(\"Would you like to add a release note?\")\n if cancel_input is True:\n cls.select_input()\n else:\n return False\n else:\n with open(root.filename, 'r') as file_notes:\n lines = file_notes.readlines()\n for line in lines:\n note += line\n if note == '':\n note = False\n cls.logger.debug(\"Note selected from a file.\")\n return note", "def __check_for_video_file(self):\n formats = ('avi', 'mpg', 'mpeg', 'mp4')\n if os.path.splitext(self.args.input.name)[-1] in (\".%s\" % ext for ext in formats):\n # we got a valid (at least according to extension) file\n pass\n else:\n logging.critical(\"Input is not a video file. Only supports %s\" % \", \".join(formats))\n sys.exit(10)", "def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True", "def is_file(self):\n return self.type == \"file\"", "def _is_in_sample(self, file):\n if self._sampling is None:\n return True\n elif os.path.basename(file) in self._sampling:\n return True\n else:\n return False", "def test_pick_files(mock_zip_file):\n\n files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']\n\n options = {'input_files': files, 'zip_path': str(mock_zip_file)}\n\n b = process.Packager(options)\n\n result = b.options['input_files']\n expected = files\n\n assert result == expected, 'Returns only the files picked.'", "def OpenAnnotationFiles(multiple=True):\n\n wildcard = create_wildcard(\"All files\", sppas.src.anndata.aio.extensionsul)\n wildcard += '|' + create_wildcard(\"SPPAS\", sppas.src.anndata.aio.ext_sppas)\n wildcard += '|' + create_wildcard(\"Praat\", sppas.src.anndata.aio.ext_praat)\n wildcard += '|' + create_wildcard(\"ELAN\", sppas.src.anndata.aio.ext_elan)\n wildcard += '|' + create_wildcard(\"Transcriber\", sppas.src.anndata.aio.ext_transcriber)\n wildcard += '|' + create_wildcard(\"Phonedit\", sppas.src.anndata.aio.ext_phonedit)\n wildcard += '|' + create_wildcard(\"ASCII\", sppas.src.anndata.aio.ext_ascii)\n\n files = list()\n if multiple is True:\n dlg = wx.FileDialog(None, \n \"Select annotation file(s)\", \n os.getcwd(), \n \"\", \n wildcard, \n wx.FD_OPEN | wx.MULTIPLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n files = dlg.GetPaths()\n\n else:\n dlg = wx.FileDialog(None, \n \"Select annotation file\", \n paths.samples, \n \"\", \n wildcard, \n wx.FD_OPEN | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n files.append(dlg.GetPath())\n\n dlg.Destroy()\n\n if multiple is False:\n return files[0]\n return files", "def is_selected(self):\n return NSCSpecIO().read()[\"profile\"] == self.path.stem", "def choose_gv_file_type():\n display_menu(menu_msg + get_file_type_options() + menu_msg_2)\n while True:\n inpt = input()\n try:\n # If we get a good number, select it!\n if int(inpt) in range(len(file_types)):\n selected = file_types[int(inpt)]\n show_notification(\"Selected:\\n\" + selected)\n global_settings.update(menu_option, selected)\n return int(inpt)\n else:\n show_error(\"Not a valid observer index\")\n except ValueError:\n # Check if the user wishes to quit\n inpt = inpt.lower()\n if inpt in [\"e\", \"exit\"]:\n return None\n else:\n show_error(\"Command not recognized\")", "def has_single_file(self, upload_id: str, token: str,\n file_type: str = 'PDF') -> bool:\n stat = self.get_upload_status(upload_id, token)\n try:\n next((f.name for f in stat.files if f.file_type == file_type))\n except StopIteration: # Empty iterator => no such file.\n return False\n return True", "def test_multiple_file_types_named(self):\n\n support.create_project(self, 'candera')\n support.add_step(self, name='A')\n support.add_step(self, name='B')\n support.add_step(self, name='C')\n support.add_step(self, name='D.md', position='0')\n\n project = cauldron.project.get_internal_project()\n steps = project.steps\n\n self.assertTrue(steps[0].filename.startswith('S01-D'))\n self.assertTrue(steps[1].filename.startswith('S02'))\n self.assertTrue(steps[2].filename.startswith('S03'))\n self.assertTrue(steps[3].filename.startswith('S04'))", "def selection_file_type(self):\n self.selection_directory()\n self.ui_FileList.clearSelection()\n if not self.show_save_action:\n self.ui_SelectedName.setText(None)\n if self.show_save_action:\n text = self.ui_SelectedName.text()\n new_text = text.split(\".\")[0]\n self.ui_SelectedName.setText(new_text)", "def validateSelection(self, exportItems):\n\n invalidItems = []\n # Look for selected items which arent of the correct type\n for item in exportItems:\n if not item.sequence() and not item.trackItem():\n invalidItems.append(item)\n\n return len(invalidItems) < len(exportItems)", "def open_file(self: object) -> None:\n self.file = filedialog.askopenfilename(\n initialdir= os.getcwd(),title=\"Select File\",filetypes=(\n (\"Text Files\", \"*.txt\"),(\"all files\",\"*.*\")))\n\n if self.file:\n messagebox.showinfo(\"Selected file\", \"You have selected %s\"%(\n self.file))", "def checkFile(self,selected_file):\n path_holder = pathlib.Path(selected_file)\n if path_holder.exists():\n if path_holder.is_file():\n if path_holder.stat().st_size == 0 or path_holder.stat().st_size is None:\n raise CoreException.FileEmptyError(\"File should not be empty!\")\n return False\n\n if path_holder.is_symlink():\n raise CoreException.FileNotSupportedError(\"Symbolic link not supported\")\n return False\n \n # File Clean if they pass the required identity of file.\n return True", "def is_select_multiple(self) -> bool:\n select_multiple_starts = (\n 'select_multiple ',\n 'select_multiple_external ',\n )\n row_type = self.get_type()\n return any(row_type.startswith(item) for item in select_multiple_starts)", "def handle_path_file_conflict(self):\n# self.smm_output.clear_output()\n if self.savebtn.button_style == 'warning': # second press, we have path/file problem to sort out\n if self.whatif_file_exist.index == 0:\n self._delete_temp_fig_files()\n elif self.whatif_file_exist.index == 2:\n self.figdir += datetime.now().strftime('__%Y-%m-%d_%H-%M-%S')\n os.makedirs(self.figdir)\n elif self.savebtn.button_style == 'info': # second or third press, we are generating the movie, user call for abort\n self.__reset_save_button()\n return True\n elif not self.savebtn.button_style: # user press the button for the first time\n self.savebtn.description, self.savebtn.tooltip, self.savebtn.button_style = \\\n 'continue', 'continue with selected option', 'warning'\n with self.smm_output:\n print('directory ' + self.figdir + ' not empty')\n display(self.whatif_file_exist)\n return True\n else:\n return True", "def browse_target(self):\n return self.type in ('a', 's')", "def _check_input_files(self, truth_files, decl_files):\n\n # Ensure we have list group of declaration and truth file names.\n if not isinstance(decl_files, list):\n decl_files = [decl_files]\n\n if not isinstance(truth_files, list):\n truth_files = [truth_files]\n\n if len(decl_files) != len(truth_files):\n raise AtlasScorerError(\n \"Must have same number of truth and declaration files.\"\n )\n\n if not decl_files:\n raise AtlasScorerError(\"Must have at least one declaration file.\")", "def setNumberOfFiles(self):\n while True:\n try:\n tempNumberOfFiles = int(input(\n \"How many files make up the run? [%d]: \" % self.numberOfFiles) or self.numberOfFiles)\n if tempNumberOfFiles >= 1:\n self.numberOfFiles = tempNumberOfFiles\n break\n else:\n print (\"Integer >= 1 needed!\")\n except ValueError:\n print (\"Integer >= 1 needed!\")", "def check_file(file: Path):\n if Path(file).is_file() or file == \"\":\n return file\n else:\n files = glob.glob(\"./**/\" + file, recursive=True) # find file\n FILE_NOT_FOUND_MSG = f\"File Not Found: {file}\"\n MULTIPLE_FILE_MSG = f\"Multiple files match '{file}', specify exact path:{files}\"\n\n assert len(files), FILE_NOT_FOUND_MSG # assert file was found\n assert len(files) == 1, MULTIPLE_FILE_MSG # assert unique\n return files[0] # return file", "def _what_is_filename(self):\n found = False\n\n while not found:\n prompt = \"-?- What file to send -> \"\n fn = self._input(prompt)\n found = FileTools().does_file_exist(fn)\n fs = FileTools().what_is_filesize(fn)\n return fn, fs", "def check_file_name_extensions(self, file_name, input_output):\n file_type = FileTypes ()\n extension_types = file_type.get_extension_types ()\n for extension in extension_types:\n if file_name.endswith (extension):\n if input_output == 'input':\n self._input_file = file_type.get_file_type (extension)\n else:\n self._output_file = file_type.get_file_type (extension)\n return True\n print (\"File name must end with:\")\n for extension in extension_types:\n print (extension)\n return False", "def check_arguments(self):\n self.check_num_arguments()\n self.are_readable_files(self.args)", "def on_File1_toolButton_clicked(self):\n my_file = QtWidgets.QFileDialog.getOpenFileName(self, u'打开文件', '/')\n if my_file[0]:\n self.File1_lineEdit.setText(my_file[0])\n else:\n QtWidgets.QMessageBox.warning(self, u'警告', u'请选择输入文件')", "def is_valid_file_or_directory(args):\n if is_valid_file(args) or is_valid_directory(args):\n return True\n return False", "def OpenSpecificFiles(name, extensions):\n \n wildcard = create_wildcard(name, extensions)\n\n afile = \"\"\n dlg = wx.FileDialog(None, \n \"Select a file\", \n os.getcwd(), \n \"\", \n wildcard, \n wx.FD_OPEN | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n afile = dlg.GetPath()\n\n dlg.Destroy()\n return afile", "def get_accepted_and_ommited_files(files):\n assert len(files) > 0, \"ERROR: get_accepted_and_ommited_files(). No files\"\n allowed = []\n not_allowed = []\n for file in files:\n if is_file_allowed(file):\n allowed.append(file)\n else:\n not_allowed.append(file)\n return (allowed, not_allowed)", "def test_valid_upload_modes(self):\n upload_helpers.verify_upload_mode(MODE_DEFAULT)\n upload_helpers.verify_upload_mode(MODE_FAST5)\n upload_helpers.verify_upload_mode(MODE_ASSEMBLIES)", "def handle_select(self):\n #self.selected = input('>> ')\n self.selected = '0'\n if self.selected in ['Q', 'q']:\n sys.exit(1)\n elif self.selected in ['B', 'b']:\n self.back_to_menu = True\n return True\n elif is_num(self.selected):\n if 0 <= int(self.selected) <= len(self.hrefs) - 1:\n self.back_to_menu = False\n return True\n else:\n print(Colors.FAIL +\n 'Wrong index. ' +\n 'Please select an appropiate one or other option.' +\n Colors.ENDC)\n return False\n else:\n print(Colors.FAIL +\n 'Invalid input. ' +\n 'Please select an appropiate one or other option.' +\n Colors.ENDC)\n return False", "def is_valid_file(ext, argument):\n formats = {\n 'input_dataset_path': ['csv', 'txt'],\n 'output_dataset_path': ['csv'],\n 'output_plot_path': ['png'],\n 'input_model_path': ['pkl']\n }\n return ext in formats[argument]", "def selectFile(title=\"Select image\", initialdir=None, multiple=False):\r\n file = filedialog.askopenfilename(\r\n initialdir=initialdir,\r\n multiple=multiple,\r\n title=title\r\n )\r\n return file", "def _detect_files(data):\n return any(attr[\"extra\"].get(\"files\")\n for attr in data[\"attributes\"] if attr[\"extra\"])", "def choose_file():\n chdir(getcwd()+'/data/US')\n f = []\n for (dirpath, dirnames, filenames) in walk(getcwd()):\n f.extend(filenames)\n print('Which file do you want to work on?')\n for i in f:\n print(str(f.index(i)) + ' - ' + i)\n while True:\n try:\n return f[int(input('Type its number: '))]\n except ValueError or IndexError:\n print('Invalid input.')", "def is_valid_file(args):\n if args.file is not None:\n return True\n return False", "def validate_input(arg):\n if not type(arg) == list:\n raise ValidArgException('Input \"%s\" must be a list. Got %s' %(arg, type(arg)))\n \n if len(arg) != len(set(arg)):\n raise ValidArgException('\\n\\nDuplicate files found in input list %s\\n' %(arg))\n \n bnames= [os.path.split(x)[1] for x in arg]\n bnames= [re.sub('\\.gz$', '', x) for x in bnames]\n if len(bnames) == 2 and len(set(bnames)) == 1:\n raise ValidArgException('\\n\\nPaired fastq files must have different, unzipped names even if they are in different directories.\\nGot %s\\n' %(arg))\n \n for x in arg:\n if not os.path.isfile(x):\n raise ValidArgException('\\n\\nFile \"%s\" not found\\n' %(x))\n \n if len(arg) == 2:\n return('raw')\n elif len(arg) == 1:\n ext= os.path.splitext(arg[0])[1]\n if ext in ['.sam', '.bam']:\n return(ext.strip('.'))\n else:\n return('raw')\n else:\n raise ValidArgException('\\n\\n1 or 2 item must be in input \"%s\". Got %s\\n' %(arg, len(arg)))", "def selection(self):\n self.file.seek(0)\n start = self.file.readline().rstrip().upper()\n if start[0] == \">\":\n self.file_type = \"fasta\"\n elif start[0:1] == \"ID\":\n self.file_type = \"embl_gcg\"\n elif start[0:5] == \"LOCUS\":\n self.file_type = \"genbank\"\n elif start[0] == \";\":\n self.file_type = \"ig\"\n elif not re.search('[^GATCN]', start):\n self.file_type = \"plain\"\n else:\n self.file_type = None\n return self.file_type", "def test_choose_file_name(self):\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.title == 'test').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)\n\t\tself.assertEqual('aww/test - (testuser)', file, msg='Failed to convert basic Test post!')", "def has_file(self, name):\n return name in self.files", "def AskForFileName():\n file_doesnot_exsit = True\n file_name = None\n while file_doesnot_exsit:\n try:\n file_name = input(\"What is the name of the input file?\")\n file = open(file_name, 'r')\n file_doesnot_exsit = False\n except FileNotFoundError:\n print(\"File is not found\")\n return file_name", "def no_file_error(self: object) -> None:\n messagebox.showerror(\"No file selected\", \"Please select a file\")", "def is_primary_file(self):\n return self.file_type() == FileType.FILE_TYPE_PRIMARY", "def OnOpenFile( self, event ):\n dialog = wx.FileDialog( self, style=wx.OPEN|wx.FD_MULTIPLE )\n if dialog.ShowModal( ) == wx.ID_OK:\n paths = dialog.GetPaths()\n if self.loader:\n # we've already got a displayed data-set, open new window...\n frame = MainFrame()\n frame.Show( True )\n frame.load( *paths )\n else:\n self.load( *paths )", "def check_media_file_type(media_file_class):\n if media_file_class == 'AudioFile':\n media_file_type = 'Audio file'\n elif media_file_class == 'VideoFile':\n media_file_type = 'Video file'\n elif media_file_class == 'DocumentFile':\n media_file_type = 'Document file'\n elif media_file_class == 'ImageFile':\n media_file_type = 'Image file'\n\n return media_file_type", "def is_multi_selection(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_multi_selection\")", "def _check_options(self, options):\r\n xmi_file = options.get(\"xmi_file\")\r\n if not xmi_file or not os.path.exists(xmi_file):\r\n self._error(\"Select XMI file\")\r\n return \r\n\r\n target_folder = options[\"target_folder\"]\r\n if not target_folder:\r\n self._error(\"Select target folder\")\r\n return\r\n \r\n if not os.path.exists(target_folder):\r\n self._error(\"Target folder not exists\")\r\n return \r\n \r\n return True" ]
[ "0.6862918", "0.6714143", "0.64036655", "0.6360947", "0.62420434", "0.6179852", "0.6168158", "0.606886", "0.6025578", "0.5963181", "0.59183747", "0.5914388", "0.59100074", "0.59086585", "0.5877628", "0.5876562", "0.5874005", "0.58681023", "0.5844574", "0.5818032", "0.5810063", "0.57893705", "0.57590026", "0.57482564", "0.5744882", "0.5712493", "0.56653386", "0.56629163", "0.5659043", "0.56516206", "0.5638307", "0.5620679", "0.56094974", "0.56072694", "0.5600479", "0.55919063", "0.55831045", "0.5575616", "0.55488414", "0.55269516", "0.55235183", "0.55206895", "0.5485558", "0.54725313", "0.5470901", "0.5470149", "0.5448993", "0.5448993", "0.54416627", "0.54391444", "0.5432115", "0.5429743", "0.5428236", "0.54221123", "0.5419235", "0.5408156", "0.54062074", "0.53693426", "0.5366522", "0.5357819", "0.53568673", "0.5351649", "0.53400546", "0.5338422", "0.5320828", "0.53144413", "0.53123736", "0.5308315", "0.5304896", "0.5293326", "0.5293183", "0.52903533", "0.52793616", "0.5279035", "0.52664435", "0.5260758", "0.5259803", "0.5259695", "0.5259505", "0.52541083", "0.5251799", "0.52471983", "0.5239317", "0.52390325", "0.52335393", "0.52311134", "0.5229801", "0.52259964", "0.5215372", "0.5207634", "0.52067417", "0.5202506", "0.5202078", "0.5180059", "0.517159", "0.5168248", "0.51635396", "0.5162698", "0.5161925", "0.5160261" ]
0.6930178
0
Call other function to run analysis and upload the results to the database It can both accept one single .zip file or one/multiple image file(s)
def run_analysis(filepath, ID, method): filename, extension = get_file_name(filepath[0]) if extension == '.zip': msg = run_zip_analysis(filepath, ID, method) else: msg = run_images_analysis(filepath, ID, method) return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_images_analysis(filepath, ID, method):\n for path in filepath:\n try:\n Image.open(path)\n except IOError:\n msg = 'Please import images files, or just a single zip archive'\n else:\n filename, extension = get_file_name(path)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename, extension, path)\n\n err, msg = check_msg(msg)\n\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def run_zip_analysis(filepath, ID, method):\n with zipfile.ZipFile(filepath[0]) as zf:\n for entry in zf.namelist():\n if not entry.startswith(\"__\"): # Get rid hidden files in zip\n with zf.open(entry) as file:\n data = file.read()\n fh = io.BytesIO(data)\n Image.open(fh)\n\n filename, extension = get_file_name(file.name)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename,\n extension, fh.getvalue())\n err, msg = check_msg(msg)\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def do_analysis(ckpt, queries_type, entities_type, request):\n global currently_analyzing, results, d, analysis_user\n try:\n print(\"starting analysis!\")\n if entities_type == \"all\":\n print(\"using all entities detected!\")\n elif entities_type == \"uploaded\":\n print(\"using only entities specified in csv file!\")\n \n currently_analyzing = True\n analysis_user = request.user.username\n results = []\n proj_path = os.path.abspath(os.path.dirname(__file__)).split(\"FYP_Web_App\")[0]\n ckpt = proj_path + \"FewRel/checkpoint/\" + ckpt\n if d is None or d.ckpt_path != ckpt:\n d = DetectionFramework(ckpt_path=ckpt)\n if cancel_flag[0]:\n return\n d.clear_support_queries()\n if len([i for i in os.listdir(\"temp/relation_support_datasets\") if 'csv' in i and request.user.username in i]) == 0:\n raise ValueError(\"Please upload relation support dataset!\")\n \n d.load_support_files(\"temp/relation_support_datasets\", request.user.username)\n if queries_type == \"csv_option\":\n if not os.path.exists(\"temp/queries.csv\"):\n raise ValueError(\"Please upload query CSV dataset!\")\n d.load_queries_csv(\"temp/queries.csv\")\n \n elif queries_type == \"url_option\":\n if not os.path.exists(\"temp/url.txt\"):\n raise ValueError(\"Please specify news article url!\")\n with open(\"temp/url.txt\") as f:\n url = f.read()\n d.load_url(url)\n \n elif queries_type == \"txt_option\":\n d.load_text_files(os.path.abspath(\"temp/text_files\"))\n \n elif queries_type == \"ind_sentence_option\":\n ind_sentence = request.POST.get('ind_sent')\n d.load_ind_sentence(ind_sentence)\n \n elif queries_type == \"html_option\":\n d.load_html_file_queries(os.path.abspath(\"temp/html_files\"))\n \n if entities_type == \"uploaded\":\n d.trim_queries_based_on_entities_file(os.path.abspath(\"temp/entities_csv_file.csv\"))\n\n if cancel_flag[0]:\n return\n d.detect(rt_results=results, cancel_flag=cancel_flag)\n if cancel_flag[0]:\n return\n src=None\n if queries_type == \"csv_option\":\n src = \"queries_csv\"\n elif queries_type == \"txt_option\":\n src = \"queries_text_file\"\n elif queries_type == \"ind_sentence_option\":\n src = \"ind_sentence\"\n elif queries_type == \"url_option\":\n with open(\"temp/url.txt\") as f:\n src = f.read()\n elif queries_type == \"html_option\":\n src = \"html_files\"\n \n s = Source(source=src, user=request.user)\n s.save()\n for r in results:\n er = ExtractedRelation(sentence=r['sentence'],head=r['head'],tail=r['tail'],pred_relation=r['pred_relation'],sentiment=r['sent'],conf=r['conf'],ckpt=ckpt, source=s)\n er.save()\n except Exception as e:\n print(len(str(e)))\n print(str(e))\n errors.append(str(e))\n tb = traceback.format_exc()\n print(tb)\n finally:\n currently_analyzing = False\n analysis_user = None", "def run(self,infilename): \n ### initizlize the analysis\n self.init_analysis(infilename)\n ### run the analysis\n self.run_analysis()\n ### store selected results\n self.store_results()\n return", "def run(self):\n \n try:\n with zipfile.ZipFile(self.tmpArchiveFilePath, 'w', zipfile.ZIP_DEFLATED) as z:\n z.write(self.resultFilePath, self.fileType+'.tif')\n if os.path.isfile(self.resultPrjFilePath):\n z.write(self.resultPrjFilePath, self.fileType+'.prj')\n except:\n msg = traceback.format_exc()\n self.emit( SIGNAL('failed(QString)'), msg)\n return\n \n with open(self.tmpArchiveFilePath, \"rb\") as binFile:\n base64Data = base64.b64encode(binFile.read())\n fileData = ('file_data', self.tmpArchiveFilePath, base64Data)\n \n self.emit( SIGNAL('prep_completed(int, QString, QString, QString, QString)'), self.eventId, self.fileType, fileData[0], fileData[1], fileData[2])\n return", "def execute(args, **kwargs):\n p = set_options()\n a = p.parse_args(args)\n # logging.info(str(a))\n\n ifiles = ImageFiles(a)\n\n if a.info:\n ifiles.describe()\n else:\n ifiles.write()", "def take_action(self, parsed_args):\n if parsed_args.file:\n for file in parsed_args.file:\n if not os.path.exists(file):\n self.logger.error('Specified file does not exist: {}'.format(file))\n continue\n self.logger.info('File uploading is started: {}'.format(file))\n file_id = self.app.metagen.upload_files(file)\n if not file_id:\n return False\n self.logger.info('File {} has been sent to analysis.'.format(file))\n self.logger.info('Use File ID to get Analysis Result: {}'.format(file_id))\n self.logger.info('Task Done')", "def execute_event(self):\n try:\n with open(self._import_path_input.get(), 'r') as \\\n raw_data_file, \\\n open(self._export_path_input.get(),\n 'w', newline='') as processed_data_file:\n\n # all situation counter\n count_true = 0\n count_false = 0\n count_tag = 0\n count_not_found = 0\n\n # get the user input api(url)\n original_url = self._url_input.get()\n\n # true/false and tag/NotFound flags\n flag_true_false = False\n flag_tag_notfound = False\n\n # set the flag\n temp_line = raw_data_file.readline().strip().strip('\\r\\n')\n temp_clean_url = original_url[:-8] + temp_line\n temp_response = self.api_dealer(temp_clean_url)\n if temp_response == 't' or temp_response == 'f':\n flag_true_false = True\n else:\n flag_tag_notfound = True\n\n # process the file\n for line in raw_data_file:\n clean_line = line.strip().strip('\\r\\n')\n if clean_line == '':\n tk.messagebox.showinfo('info', 'end of file or '\n 'unexpected newline in '\n 'the end')\n break\n # get rid of the '<target>' ending of the original url\n # and combine with the real target\n clean_url = original_url[:-8] + clean_line\n response = self.api_dealer(clean_url)\n\n # deal with different kinds of output and update the counter\n if response == 't':\n count_true += 1\n elif response == 'f':\n count_false += 1\n elif response == 'Not Found':\n count_not_found += 1\n else:\n count_tag += 1\n\n # create the output row and write to file\n output_row = clean_line + ' ' + response + '\\r\\n'\n processed_data_file.write(output_row)\n\n # now output the ratio\n if flag_true_false:\n self._ratio_var.set(str(count_true) + '/' + str(\n count_true + count_false))\n elif flag_tag_notfound:\n self._ratio_var.set(str(count_tag) + '/' + str(count_tag\n + count_not_found))\n\n tk.messagebox.showinfo('message', 'job done! have a nice day!')\n\n except Exception as e:\n tk.messagebox.showerror('error', e)", "def upload_result():\n if len(request.files) == 0:\n return jsonify(success=False), 400\n\n file = next(request.files.values())\n filename = secure_filename(file.filename)\n file.save(op.join(RESULTS_FOLDER, filename))\n\n result = Result()\n result.file = op.join(RESULTS_FOLDER, filename)\n\n result.detector_start_time = datetime.fromtimestamp(float(request.form[\"detector_start_time\"]))\n result.detector_end_time = datetime.fromtimestamp(float(request.form[\"detector_end_time\"]))\n\n db.session.add(result)\n db.session.commit()\n\n return jsonify(success=True, result_id=result.id), 200", "def runDataExtraction():\r\n config = CONFIG['steps']['DataExtraction']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n columns = ci['columns']\r\n nrows = ci['nrows']\r\n input_bucket = ci['bucket']\r\n no_of_files = ci['no_of_files']\r\n\r\n output_bucket = co['bucket']\r\n csv_name_prefix = co['csv_name_prefix']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n zip_files = get_files(input_bucket, boto_client, file_type='zip')\r\n\r\n no_of_files_to_process = no_of_files if no_of_files is not None else len(\r\n zip_files)\r\n for zip_file in tqdm(zip_files[:no_of_files_to_process], total=no_of_files_to_process):\r\n process_file(zip_file, input_bucket, output_bucket, minioClient, columns,\r\n nrows=nrows, output_csv_name_prefix=csv_name_prefix)", "def upload_analysis(list_of_contents, list_of_names, list_of_dates, session_id, job_id, clean_input_dir):\n\n clean_input_dir = len(clean_input_dir) != 0\n\n print('UPLOAD')\n\n if session_id is not None and list_of_contents is None:\n print(f'Running in session {session_id}')\n\n # make a subdirectory for this session if one doesn't exist\n input_dir = join(BASE_DIR, 'input', f'input_{session_id}')\n try:\n os.mkdir(input_dir)\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(input_dir, 'analysis'))\n except FileExistsError:\n pass\n\n # Create an output directory for this session if it doesn't exist\n output_dir = join(BASE_DIR, 'output', f'output_{session_id}')\n try:\n os.mkdir(output_dir)\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'analysis'))\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'analysis', 'images'))\n except FileExistsError:\n pass\n\n def _clean_input_dir():\n \"\"\"\n Clean the input directory by removing every existing file.\n \"\"\"\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))\n\n try:\n\n # If the user isn't uplaoding anything and\n # hasn't uploaded anything, ask them to do so.\n # print(os.listdir(input_dir))\n if list_of_contents is None and len(os.listdir(join(input_dir, 'analysis'))) == 0:\n return 'Please upload some files.'\n\n # if the user is uploading something, first clean the input directory,\n # then write the uploaded files to BASE_DIR/input/input_{session_id}\n if list_of_contents:\n\n if clean_input_dir:\n _clean_input_dir()\n\n # Save successfully uploaded filenames here\n written = list()\n\n # Write uploaded files to BASE_DIR/input/input_{session_id}\n # If any of the files do not end in .txt,\n # or cannot be decoded properly, or cannot be parsed\n # into Voigt models, then clean the input directory and print\n # the error message. Otherwise, show a bullet list of files\n # uploaded to the input directory.\n\n if not clean_input_dir:\n old_peaks = pd.read_csv(join(input_dir, 'peaks.csv'))\n old_models = pd.read_csv(join(input_dir, 'models.csv'))\n else:\n old_peaks = pd.DataFrame()\n old_models = pd.DataFrame()\n \n new_peaks = pd.DataFrame()\n\n for i, c in enumerate(list_of_contents):\n\n if not list_of_names[i].endswith('.txt'):\n raise Exception(f'File {list_of_names[i]} must be .txt')\n\n s = c.split(',')[1]\n\n try:\n s = base64.b64decode(s).decode()\n except UnicodeDecodeError:\n raise Exception(f'Error uploading file {list_of_names[i]}.\\\n Please check file format and try again.')\n\n with open(join(input_dir, 'analysis', list_of_names[i]), 'w') as f:\n f.write(s)\n\n try:\n parsed_file = parse_file(join(input_dir, 'analysis', list_of_names[i]))\n new_peaks = pd.concat([new_peaks, parsed_file], sort=True)\n except Exception as e:\n import traceback\n traceback.print_exc()\n raise Exception(f'Cannot parse file {list_of_names[i]}: {e}')\n\n written.append(list_of_names[i])\n\n res = [html.Li(x) for x in written]\n res.insert(0, html.P(f'Success! {len(written)} \\\n .txt files were uploaded.'))\n\n # peaks = read_input(session_id)\n id_vars = pd.Series(new_peaks.columns)\n mask = ~(id_vars.str.contains('(p|n)m', regex=True) &\n id_vars.str.contains('center'))\n id_vars = id_vars.loc[mask]\n new_peaks = new_peaks.melt(id_vars=id_vars)\n new_peaks = new_peaks.loc[new_peaks.value.notnull()]\n\n def compute_models(DATA):\n res = pd.DataFrame([], columns=['filename', 'peak_name', 'peak_position', 'amplitude'])\n for idx, (_, model) in enumerate(DATA.iterrows()):\n\n row = pd.Series()\n row['filename'] = model.filename\n row['peak_name'] = model.variable\n row['peak_position'] = model.value\n \n amp_col = model.variable[:model.variable.index('_')] + '_amplitude'\n row['amplitude'] = model[amp_col]\n\n res.loc[idx] = row\n\n return res\n\n new_models = compute_models(new_peaks)\n\n models = pd.concat([old_models, new_models])\n peaks = pd.concat([old_peaks, new_peaks])\n\n models.to_csv(join(input_dir, 'models.csv'))\n\n peaks.to_csv(join(input_dir, 'peaks.csv'))\n\n return res\n\n except Exception as e:\n # If any of the files raise an error (wrong extension,\n # decoding error, error parsing into models),\n # then print the error message.\n _clean_input_dir()\n import traceback; traceback.print_exc()\n return f'An error occurred while uploading files: {e}'", "def main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n print('song file processing is complete')\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n print('log file processing is complete')\n conn.close()", "def upload_file(self):\r\n try:\r\n assert self._db_connection, {\r\n STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR,\r\n MESSAGE_KEY: DB_ERROR}\r\n\r\n batch = BatchStatement()\r\n error = False\r\n error_message = None\r\n\r\n if not self.files:\r\n error = True\r\n error_message = \"No files to upload\"\r\n\r\n file_names_list = self.file_names\r\n select_query = SELECT_ALGORITHM_NAME_QUERY.format(NAME, TABLE_NAME, self.algo_name,\r\n \",\".join(map(lambda x: \"'\" + x + \"'\", file_names_list)))\r\n result_set = self._csql_session.execute(select_query)\r\n if result_set[0]['count'] == 0 or result_set[0]['count'] < len(file_names_list):\r\n error_message = \"Please give the existing algorithm or file name\"\r\n return JsonResponse({MESSAGE_KEY: error_message}, status=HTTP_500_INTERNAL_SERVER_ERROR)\r\n\r\n for file in self.files:\r\n\r\n if file.name not in self.file_names:\r\n error = True\r\n error_message = \"Uploaded file name(\" + file.name + \") not found in given file name list\"\r\n break\r\n\r\n description = None\r\n if file.name in self.description:\r\n description = self.description[file.name]\r\n LAST_MODIFIED_DATE = str(round(time.time() * 1000))\r\n\r\n extension = os.path.splitext(file.name)[1]\r\n json_data = \"\"\r\n if self.algo_name == 'last_10_tmt' and file.name == 'features.csv':\r\n file_data = pandas.read_csv(file, encoding='unicode escape')\r\n json_data = file_data.to_json()\r\n elif extension == \".csv\":\r\n file_data = pandas.read_csv(file, encoding='ISO-8859-1')\r\n json_data = file_data.to_json()\r\n elif extension == \".xml\":\r\n file_data = et.parse(file)\r\n xml_str = ElementTree.tostring(file_data.getroot(), encoding='unicode')\r\n json_data = json.dumps(xmltodict.parse(xml_str))\r\n elif extension == \".joblib\":\r\n\r\n json_datas = joblib.load(file)\r\n json_data = escape(str(json_datas))\r\n\r\n \"\"\" insert query into cassandra table \"\"\"\r\n insert_query = FILE_UPLOAD_QUERY.format(NAME, TABLE_NAME, self.algo_name,\r\n file.name,\r\n description,\r\n \"textAsBlob('\" + json_data + \"')\",\r\n LAST_MODIFIED_DATE,\r\n FLAG)\r\n\r\n batch.add(SimpleStatement(insert_query))\r\n\r\n if error is True:\r\n return JsonResponse({MESSAGE_KEY: error_message}, status=HTTP_500_INTERNAL_SERVER_ERROR)\r\n\r\n self._csql_session.execute(batch, timeout=200.0)\r\n return JsonResponse({MESSAGE_KEY: UPLOADED_SUCCESSFULLY}, safe=False)\r\n\r\n except AssertionError as e:\r\n log_error(\"Exception due to : %s\", e)\r\n return JsonResponse({MESSAGE_KEY: e.args[0][MESSAGE_KEY]},\r\n status=e.args[0][STATUS_KEY])\r\n except Exception as e:\r\n log_error(traceback.format_exc())\r\n return JsonResponse({MESSAGE_KEY: EXCEPTION_CAUSE.format(\r\n traceback.format_exc())},\r\n status=HTTP_500_INTERNAL_SERVER_ERROR)", "def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)", "def perform_process(transformer: transformer_class.Transformer, check_md: dict) -> dict:\n # Process each CSV file into BETYdb\n start_timestamp = datetime.datetime.now()\n files_count = 0\n files_csv = 0\n lines_read = 0\n error_count = 0\n files_loaded = []\n for one_file in check_md['list_files']():\n files_count += 1\n if os.path.splitext(one_file)[1].lower() == '.csv':\n files_csv += 1\n\n # Make sure we can access the file\n if not os.path.exists(one_file):\n msg = \"Unable to access csv file '%s'\" % one_file\n logging.debug(msg)\n return {'code': -1000,\n 'error': msg}\n\n try:\n # Read in the lines from the file\n with open(one_file, 'r') as in_file:\n reader = csv.DictReader(in_file)\n files_loaded.append(one_file)\n for row in reader:\n centroid_lonlat = [row['lon'], row['lat']]\n time_fmt = row['dp_time']\n timestamp = row['timestamp']\n dp_metadata = {\n \"source\": row['source'],\n \"value\": row['value']\n }\n trait = row['trait']\n\n __internal__.create_datapoint_with_dependencies(transformer.args.clowder_url, transformer.args.clowder_key,\n trait, (centroid_lonlat[1], centroid_lonlat[0]), time_fmt,\n time_fmt, dp_metadata, timestamp)\n lines_read += 1\n\n except Exception:\n logging.exception(\"Error reading CSV file '%s'. Continuing processing\", os.path.basename(one_file))\n error_count += 1\n\n if files_csv <= 0:\n logging.info(\"No CSV files were found in the list of files to process\")\n if error_count > 0:\n logging.error(\"Errors were found during processing\")\n return {'code': -1001, 'error': \"Too many errors occurred during processing. Please correct and try again\"}\n\n return {\n 'code': 0,\n configuration.TRANSFORMER_NAME: {\n 'version': configuration.TRANSFORMER_VERSION,\n 'utc_timestamp': datetime.datetime.utcnow().isoformat(),\n 'processing_time': str(datetime.datetime.now() - start_timestamp),\n 'num_files_received': str(files_count),\n 'num_csv_files': str(files_csv),\n 'lines_loaded': str(lines_read),\n 'files_processed': str(files_loaded)\n }\n }", "def add_processed_image(image_proc_type, name, b64_string, export_file_type):\n\n if image_proc_type == \"contrast stretching\":\n info = process_contrast_stretch(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with contrast stretching')\n\n if image_proc_type == \"adaptive equalization\":\n info = process_adapt_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with adaptive equalization')\n\n if image_proc_type == \"histogram equalization\":\n info = process_histogram_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with histogram equalization')\n\n if image_proc_type == \"reverse video\":\n info = process_reverse_image(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with reverse image')\n\n if image_proc_type == \"log compression\":\n info = process_log_compression(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with log compression')\n\n return jsonify(\"it worked\")", "def main():\n # All the files in the uploads/siemens directory\n file_iterator = glob.iglob(os.path.join(VALUE_REPORT_DIRECTORY, \"*.csv\"))\n # Reseeding the database, so we need to look at all values including already imported ones\n include_archive = len(sys.argv) > 1 and sys.argv[1] == 'all'\n if include_archive:\n # Also iterate over all the archived CSVs\n file_iterator = itertools.chain(file_iterator,\n glob.iglob(os.path.join(ARCHIVE_DIRECTORY, \"*.csv\")))\n\n for filename in file_iterator:\n print(filename, file=sys.stderr)\n with open(filename, 'r') as csv_file:\n try:\n reader = csv.reader(csv_file)\n next(reader) # headers\n\n # Gets the given number associated with a point that the values are indexed on\n point_names = save_point_name_index(reader)\n\n next(reader) # Date Range\n next(reader) # Report Timings\n next(reader) # empty\n next(reader) # headers\n\n array_for_json = arrange_value_tuples(reader, point_names)\n\n # API call returns a boolean signifying if the import was successful\n success = post_values(array_for_json)\n # If is was successful and we are not reseeding, we want to move the files to the\n # archives folder so we aren't trying to reimport them every day.\n if success[0] and not include_archive:\n os.system('mv %s %s' % (filename, ARCHIVE_DIRECTORY))\n if success[1] and not include_archive:\n os.system('mv %s %s' % (filename, ARCHIVE_WEIRD_DIRECTORY))\n except Exception as e:\n print()\n print(\"Exception while reading file:\", filename)\n print(e)", "def process(image):\n pass", "def main(save_dir, img_dir, df, fname_col):\n\tpool = mp.Pool(mp.cpu_count())\n\tresult = pool.map(multi_run_wrapper,[(save_dir, img_dir, \n\t\t\t\t\t\tfname) for fname in df[fname_col].values[0:4]])", "def run(self):\n # get components list\n #component_id_list = self.getComponentsList()\n asset_id = 3776\n component_id_list = self.get_component_info_for_one_asset(asset_id)\n # call computeResults method\n results = self.compute_results(component_id_list)\n # write to the output file\n self.write_to_file(results)", "def main():\r\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb2 user=postgres password=261998\")\r\n cur = conn.cursor()\r\n\r\n process_data(cur, conn, filepath='C:/Users/AG/Downloads/Data-engineering-nanodegree-master/Data-engineering-nanodegree-master/1_dend_data_modeling/P1_Postgres_Data_Modeling_and_ETL/data/song_data', func=process_song_file)\r\n process_data(cur, conn, filepath='C:/Users/AG/Downloads/Data-engineering-nanodegree-master/Data-engineering-nanodegree-master/1_dend_data_modeling/P1_Postgres_Data_Modeling_and_ETL/data/log_data', func=process_log_file)\r\n\r\n conn.close()", "def post(self):\n filekey = self.request.get(\"filekey\")\n blob_key = self.request.get(\"blobkey\")\n\n if self.request.get(\"daily_speed_sum\"):\n logging.info(\"Starting daily speed sum...\")\n pipeline = DailySpeedSumPipeline(filekey, blob_key)\n pipeline.start()\n self.redirect(pipeline.base_path + \"/status?root=\" + pipeline.pipeline_id)\n else:\n\t logging.info(\"Unrecognized operation.\")", "def start_operation(self):\n\n # Check file\n is_good_file, info = self.check_file()\n\n if is_good_file:\n # Check input params from client\n is_error, json_params = self.check_text()\n\n if bool(is_error):\n return is_error, json_params\n else:\n # Start machine learning here\n from slashmlapi.app.slashml.ml_manager import MLManager\n\n # Path to zip file\n path_textfile = info['filename']\n print(json_params)\n results = MLManager.get_results(path_textfile, json_params, self.config, self.start_time)\n return True, results\n else:\n return is_good_file, info", "def load_batch(self, request, *args, **kwargs):\n # initialize file to avoid the warning below\n file = ''\n try:\n # get a list of the files in the associated path\n base_path = self.request.user.profile.VRExperiment_path\n file_list = listdir(base_path)\n # include only csv files\n # file_list = [el[:-4] for el in file_list if ('.csv' in el) and ('sync' not in el)]\n file_list = [el[:-4] for el in file_list if ('.avi' in el)]\n # get a list of the existing file names\n existing_rows = [el[0] for el in VRExperiment.objects.values_list('slug')]\n # for all the files\n for file in file_list:\n # check if the entry already exists\n if file.lower() in existing_rows:\n # if so, skip making a new one\n continue\n # get the data for the entry\n data_dict = parse_path_experiment(file, self, 'VRExperiment_path')\n # get rid of the animal2 entry\n del data_dict['animal2']\n # check the paths in the filesystem, otherwise leave the entry empty\n for key, value in data_dict.items():\n # if the entry is already empty, don't check\n if data_dict[key] == '':\n continue\n if (isinstance(value, str)) and ('path' in key) and (not exists(value)):\n # print a warning\n print('Path not found for key %s and value %s' % (key, value))\n # clear the path\n data_dict[key] = ''\n # # if the tif file exists but the calcium_data file doesn't, log it in the notes\n # if (data_dict['fluo_path'] == '') and (data_dict['tif_path'] != ''):\n # data_dict['imaging'] = 'no'\n # data_dict['notes'] += 'norois'\n # select the experiment type based on the notes\n if data_dict['rig'] in ['VWheel', 'VWheelWF']:\n if 'fmm' in data_dict['notes']:\n experiment_name = 'Anesthetized'\n else:\n experiment_name = 'Head_fixed'\n else:\n experiment_name = 'Free_behavior'\n # create the model instance with the data\n model_instance = VRExperiment.objects.create(**data_dict)\n # get the model for the experiment type to use\n experiment_type = ExperimentType.objects.filter(experiment_name=experiment_name)\n # add the experiment type to the model instance (must use set() cause m2m)\n model_instance.experiment_type.set(experiment_type)\n # save the model instance\n model_instance.save()\n\n return HttpResponseRedirect('/loggers/vr_experiment/')\n except:\n print('Problem file:' + file)\n return HttpResponseBadRequest('loading file %s failed, check file names' % file)", "def call_file_submission(self):\n if not self.filesSubmitted:\n if CONFIG_BROKER[\"use_aws\"]:\n self.filenames = {\"appropriations\": \"test1.csv\",\n \"award_financial\": \"test2.csv\",\n \"program_activity\": \"test4.csv\",\n \"cgac_code\": \"SYS\", \"frec_code\": None,\n \"reporting_period_start_date\": \"01/2001\",\n \"reporting_period_end_date\": \"03/2001\", \"is_quarter\": True}\n else:\n # If local must use full destination path\n file_path = CONFIG_BROKER[\"broker_files\"]\n self.filenames = {\"appropriations\": os.path.join(file_path, \"test1.csv\"),\n \"award_financial\": os.path.join(file_path, \"test2.csv\"),\n \"program_activity\": os.path.join(file_path, \"test4.csv\"),\n \"cgac_code\": \"SYS\", \"frec_code\": None,\n \"reporting_period_start_date\": \"01/2001\",\n \"reporting_period_end_date\": \"03/2001\", \"is_quarter\": True}\n self.submitFilesResponse = self.app.post_json(\"/v1/submit_files/\", self.filenames,\n headers={\"x-session-id\": self.session_id})\n self.updateSubmissionId = self.submitFilesResponse.json[\"submission_id\"]\n return self.submitFilesResponse", "def process(self, image):", "def nipt_upload_all(context: click.Context, dry_run: bool):\n\n LOG.info(\"*** UPLOAD ALL AVAILABLE NIPT RESULTS ***\")\n\n nipt_upload_api = NiptUploadAPI(context.obj)\n analyses = nipt_upload_api.get_all_upload_analyses()\n if not analyses:\n LOG.info(\"No analyses found for upload\")\n return\n\n for analysis in analyses:\n case_id = analysis.family.internal_id\n context.invoke(nipt_upload_case, case_id=case_id, dry_run=dry_run)", "def run(self):\n # FILE INPUT\n if self.text_type == \"file\":\n self.process_files()\n\n # STRING INPUT\n else:\n self.process_strings()\n\n if self.json:\n self.save_json()\n\n if self.errors:\n print(\"\\nThe following file(s) could not be opened:\")\n for error in self.errors:\n print(f\"\\t{error}\")", "def main():\n conn = psycopg2.connect('host=127.0.0.1 dbname=sparkifydb user=student password=student')\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', function=process_song_file)\n process_data(cur, conn, filepath='data/log_data', function=process_log_file)\n\n conn.close()", "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def main():\n \n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n \n cur = conn.cursor()\n process_data(cur, conn, filepath='data/song_data',\n func=process_song_file) \n \n process_data(cur, conn, filepath='data/log_data',\n func=process_log_file)\n \n conn.close()", "def process_image(self):\n pass", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def uploadimg():\n print(str(pathlib.Path(__file__).resolve().parents[1])+\"im hereeeeeeeeeeeeeeeeeeeeeeeee\")\n path = str(pathlib.Path(__file__).resolve().parents[1])\n target = os.path.join(path,'Facial recognition/dataset')\n email = session['username']\n target = target+'/'+email\n # app_root, 'C:/Users\\meetp\\OneDrive\\Desktop\\IotAssigment2\\src\\Facial recognition\\dataset/')\n # print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n print(destination)\n file.save(destination)\n\n # encode the image\n # en = encode()\n # en.run(target)\n\n return render_template(\"imguploaded.html\")", "def main():\n\n # Load the API credentials\n with open('./flickr_api.txt') as f:\n keys = yaml.safe_load(f)\n\n # Set the API credentials\n flickr = flickrapi.FlickrAPI(keys['key'], keys['secret'])\n\n # Load the data\n df = pd.read_csv('./beauty-icwsm15-dataset.tsv', sep=\"\\t\", index_col=False)\n total_images = df.shape[0] * 1.0\n df['downloaded'] = None\n\n query_counter = 0.0\n for i, photo_id in enumerate(df['#flickr_photo_id']):\n if query_counter % 100.0 == 0:\n print(str(i) + '/' + str(total_images) + ' images (i.e. ' +\n str(np.round(i / total_images, 3) * 100) + \"%) complete.\")\n time.sleep(15)\n path = OUTPUT_FOLDER + str(photo_id) + \".jpg\"\n if os.path.exists(path):\n df.ix[i, 'downloaded'] = True\n continue\n try:\n query_counter += 1.0\n photo_response = flickr.photos.getInfo(photo_id=photo_id)\n download_photo(photo_id, photo_response)\n df.ix[i, 'downloaded'] = True\n except flickrapi.exceptions.FlickrError:\n df.ix[i, 'downloaded'] = False\n continue\n\n df.to_csv('./download_summary.tsv', sep=\"\\t\", index=False)", "def rerun_analysis(request):\n workflowid = request.POST.get('workflowid')\n workflowid = workflowid.replace('\"', '')\n resultid = request.POST.get('resultid')\n gi = GalaxyInstance(url=request.session.get('server'), email=request.session.get('galaxyemail'), password=request.session.get(\"galaxypass\"))\n ftp = gi.config.get_config()[\"ftp_upload_site\"]\n galaxyemail = request.session.get(\"galaxyemail\")\n galaxypass = request.session.get(\"galaxypass\")\n uploaded_files = []\n urls = request.POST.get('urls')\n urls = urls.split(',')\n gi.histories.create_history(name=resultid)\n history_id = get_history_id(request.session.get('galaxyemail'), request.session.get('galaxypass'), request.session.get('server'))\n for u in urls:\n filename = u.replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"\").replace('\"', '')\n cont = subprocess.Popen(\n [\"curl -s -u \" + request.session.get('username') + \":\" + request.session.get('password') + \" \" +\n request.session.get('storage') + \"/\" + filename], stdout=subprocess.PIPE, shell=True).communicate()[0]\n file = filename.split('/')\n with open(request.session.get('username') + \"/\" + file[len(file)-1], \"w\") as infile:\n infile.write(cont)\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"put \" + infile.name + \"; bye\\\"\"], shell=True)\n gi.tools.upload_from_ftp(infile.name.split(\"/\")[-1], history_id, file_type=\"auto\", dbkey=\"?\")\n uploaded_files.append(infile.name.split(\"/\")[-1])\n call([\"rm\", infile.name])\n hist = gi.histories.show_history(history_id)\n state = hist['state_ids']\n dump = json.dumps(state)\n status = json.loads(dump)\n # Stop process after workflow is done\n while status['running'] or status['queued'] or status['new'] or status['upload']:\n time.sleep(20)\n hist = gi.histories.show_history(history_id)\n state = hist['state_ids']\n dump = json.dumps(state)\n status = json.loads(dump)\n if not status['running'] and not status['queued'] and not status['new'] and not status['upload']:\n for uf in uploaded_files:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"rm -r \" + uf + \"; bye\\\"\"], shell=True)\n break\n oc_folders = subprocess.Popen(\n [\"curl -s -X PROPFIND -u \" + request.session.get('username') + \":\" + request.session.get('password') + \" '\" + request.session.get('storage') +\n \"/\" + resultid + \"' | grep -oPm250 '(?<=<d:href>)[^<]+'\"], stdout=subprocess.PIPE, shell=True).communicate()[0].split(\"\\n\")\n for f in oc_folders:\n if \".ga\" in f:\n if \"/owncloud/\" in request.session.get('storage'):\n ga = f.replace('/owncloud/remote.php/webdav/', '')\n else:\n ga = f.replace('/remote.php/webdav/', '')\n gacont = subprocess.Popen([\"curl -s -u \" + request.session.get('username') + \":\" + request.session.get('password') + \" \" +\n request.session.get('storage') + \"/\" + ga], stdout=subprocess.PIPE, shell=True).communicate()[0]\n ga = ga.split('/')\n with open(request.session.get('username') + \"/\" + ga[len(ga)-1], \"w\") as gafile:\n gafile.write(gacont)\n time.sleep(30)\n if workflowid != \"0\":\n gi.workflows.import_workflow_from_local_path(gafile.name)\n workflows = gi.workflows.get_workflows(published=False)\n jwf = json.loads(gacont)\n in_count = 0\n datamap = dict()\n mydict = {}\n for workflow in workflows:\n if \"API\" in workflow[\"name\"]:\n newworkflowid = workflow[\"id\"]\n jsonwf = gi.workflows.export_workflow_json(newworkflowid)\n elif jwf[\"name\"] in workflow[\"name\"]:\n newworkflowid = workflow[\"id\"]\n jsonwf = gi.workflows.export_workflow_json(newworkflowid)\n for i in range(len(jsonwf[\"steps\"])):\n if jsonwf[\"steps\"][str(i)][\"name\"] == \"Input dataset\":\n try:\n label = jsonwf[\"steps\"][str(i)][\"inputs\"][0][\"name\"]\n except IndexError:\n label = jsonwf[\"steps\"][str(i)][\"label\"]\n mydict[\"in%s\" % (str(i+1))] = gi.workflows.get_workflow_inputs(newworkflowid, label=label)[0]\n for k, v in mydict.items():\n datamap[v] = {'src': \"hda\", 'id': get_input_data(request.session.get('galaxyemail'), request.session.get('galaxypass'),\n request.session.get('server'))[0][in_count]}\n in_count += 1\n gi.workflows.invoke_workflow(newworkflowid, datamap, history_id=history_id)\n gi.workflows.delete_workflow(newworkflowid)\n call([\"rm\", gafile.name])\n return HttpResponseRedirect(reverse(\"index\"))", "def runAll(self):\n \n worker = worker()\n if self.FileFolder.text() == \"\":\n self.makeWarningPopup(\"Please Select a file or Files to run\") \n elif self.OutputFolder.text() == \"\":\n self.makeWarningPopup(\"Please select an output folder\")\n else:\n TheFiles = self.FileFolder.text()\n TheOutPutFolder = self.OutputFolder.text()\n \n runArt = worker.MakeUITeamConversion(self,TheFiles,TheOutPutFolder)", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def main():\n\n conn = psycopg2.connect(\n \"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()", "def process(self):\n level = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n\n for (n, input_file) in enumerate(self.input_files):\n self.logger.info(\"INPUT FILE %i / %s\", n, input_file.pageId or input_file.ID)\n file_id = make_file_id(input_file, self.output_file_grp)\n\n pcgts = page_from_file(self.workspace.download_file(input_file))\n self.add_metadata(pcgts)\n page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)\n page = pcgts.get_Page()\n \n page_image, page_xywh, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n zoom = 300.0/self.parameter['dpi']\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi *= 2.54\n self.logger.info('Page \"%s\" uses %f DPI', page_id, dpi)\n zoom = 300.0/dpi\n else:\n zoom = 1\n \n if level == 'page':\n self.process_page(page, page_image, page_xywh, zoom,\n input_file.pageId, file_id)\n else:\n if level == 'table':\n regions = page.get_TableRegion()\n else: # region\n regions = page.get_AllRegions(classes=['Text'], order='reading-order')\n if not regions:\n self.logger.warning('Page \"%s\" contains no text regions', page_id)\n for region in regions:\n region_image, region_xywh = self.workspace.image_from_segment(\n region, page_image, page_xywh, feature_filter='binarized')\n if level == 'region':\n self.process_region(region, region_image, region_xywh, zoom,\n input_file.pageId, file_id + '_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n self.logger.warning('Page \"%s\" region \"%s\" contains no text lines',\n page_id, region.id)\n for line in lines:\n line_image, line_xywh = self.workspace.image_from_segment(\n line, region_image, region_xywh, feature_filter='binarized')\n self.process_line(line, line_image, line_xywh, zoom,\n input_file.pageId, region.id,\n file_id + '_' + region.id + '_' + line.id)\n\n # update METS (add the PAGE file):\n file_path = os.path.join(self.output_file_grp, file_id + '.xml')\n pcgts.set_pcGtsId(file_id)\n out = self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n local_filename=file_path,\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n self.logger.info('created file ID: %s, file_grp: %s, path: %s',\n file_id, self.output_file_grp, out.local_filename)", "def run(self, input_path, output_path):\n # read in data\n try:\n image = Image.open(input_path)\n except Exception:\n raise ValueError(\"invalid image file\")\n \n # data preprocessing\n img = self.preprocess(image)\n \n # perform inference\n output = self.model(img)\n \n # post process\n results = self.postprocess(output)\n \n # save output\n results = {'results': results}\n\n with open(output_path, 'w') as out:\n json.dump(results, out)", "def main():\n parser = optparse.OptionParser()\n\n parser.add_option(\n '-d',\n '--delete',\n action ='store_true',\n default = False,\n help = 'Delete each input file after successfully queing it' )\n\n parser.add_option(\n '-u',\n '--user',\n help = 'The username, email, or openid of the owner' )\n\n options, file_name_list = parser.parse_args()\n\n if not options.user:\n parser.error('User is required')\n\n if len(file_name_list) == 0:\n parser.error('No files specified')\n\n try:\n user = models.User.objects.get( email = options.user )\n except models.User.DoesNotExist:\n user = models.User.objects.get( username = options.user )\n\n processor = operations.initialize_processor(\n MODULE_NAME,\n DEFAULT_INPUTS,\n DEFAULT_OUTPUTS,\n DEFAULT_ACCEPTED_MIME_TYPES ) [0]\n\n # pylint: disable-msg=W0702\n # -> no exception type given\n for file_name in file_name_list:\n try:\n upload_file(processor, user, file_name)\n except:\n logging.exception('Failed to process %s' % file_name )\n else:\n if options.delete:\n os.remove(file_name)\n # pylint: enable-msg=W0702", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def analyze_data(inputFileList, **kwargs):\n OBSKEY = 'OBSTYPE'\n MTKEY = 'MTFLAG'\n SCNKEY = 'SCAN_TYP'\n FILKEY = 'FILTER'\n FILKEY1 = 'FILTER1'\n FILKEY2 = 'FILTER2'\n APKEY = 'APERTURE'\n TARKEY = 'TARGNAME'\n EXPKEY = 'EXPTIME'\n FGSKEY = 'FGSLOCK'\n CHINKEY = 'CHINJECT'\n\n acsFiltNameList = [FILKEY1, FILKEY2]\n\n catalog = None # Astrometric catalog used for alignment\n catalogSources = 0 # Number of astrometric catalog sources determined based upon coordinate overlap with image WCS\n foundSources = 0 # Number of sources detected in images\n matchSources = 0 # Number of sources cross matched between astrometric catalog and detected in image\n rms_x = -1.0\n rms_y = -1.0\n rms_ra = -1.0\n rms_dec = -1.0\n chisq_x = -1.0\n chisq_y = -1.0\n completed = False # If true, there was no exception and the processing completed all logic\n dateObs = None # Human readable date\n mjdutc = -1.0 # MJD UTC start of exposure\n fgslock = None\n processMsg = None\n status = 9999\n compromised = 0\n headerletFile = None\n\n fit_rms = -1.0\n total_rms = -1.0\n datasetKey = -1.0\n\n namesArray = ('imageName', 'instrument', 'detector', 'filter', 'aperture', 'obstype',\n 'subarray', 'dateObs', 'mjdutc', 'doProcess', 'processMsg', 'catalog', 'foundSources',\n 'catalogSources','matchSources', 'rms_x', 'rms_y', 'rms_ra', 'rms_dec', 'completed',\n 'fit_rms', 'total_rms', 'datasetKey', 'status', 'headerletFile')\n dataType = ('S20', 'S20', 'S20', 'S20', 'S20', 'S20', 'b', 'S20', 'f8', 'b', 'S30',\n 'S20', 'i4', 'i4', 'i4', 'f8', 'f8', 'f8', 'f8', 'b', 'f8', 'f8', 'i8', 'i4', 'S30')\n\n # Create an astropy table\n outputTable = Table(names=namesArray,dtype=dataType)\n\n # Loop over the list of images to determine viability for alignment processing\n #\n # Capture the data characteristics before any evaluation so the information is\n # available for the output table regardless of which keyword is used to \n # to determine the data is not viable for alignment.\n\n for inputFile in inputFileList:\n\n header_hdu = 0\n header_data = getheader(inputFile, header_hdu)\n\n # Keywords to use potentially for downstream analysis\n instrume = (header_data['INSTRUME']).upper()\n detector = (header_data['DETECTOR']).upper()\n subarray = header_data['SUBARRAY']\n dateObs = header_data['DATE-OBS']\n mjdutc = header_data['EXPSTART']\n\n # Obtain keyword values for analysis of viability\n obstype = (header_data[OBSKEY]).upper()\n mtflag = (header_data[MTKEY]).upper()\n \n scan_typ = ''\n if instrume == 'WFC3':\n scan_typ = (header_data[SCNKEY]).upper()\n\n sfilter = ''\n if instrume == 'WFC3':\n sfilter = (header_data[FILKEY]).upper()\n # Concatenate the two ACS filter names together with an underscore\n # If the filter name is blank, skip it\n if instrume == 'ACS':\n for filtname in acsFiltNameList:\n\n # The filter keyword value could be zero or more blank spaces \n # Strip off any leading or trailing blanks\n if len(header_data[filtname].upper().strip()) > 0:\n\n # If the current filter variable already has some content,\n # need to append an underscore before adding more text\n if len(sfilter) > 0:\n sfilter += '_'\n sfilter += header_data[filtname].upper().strip()\n\n aperture = (header_data[APKEY]).upper()\n targname = (header_data[TARKEY]).upper()\n exptime = header_data[EXPKEY]\n fgslock = (header_data[FGSKEY]).upper()\n\n chinject = 'NONE'\n if instrume == 'WFC3' and detector == 'UVIS':\n chinject = (header_data[CHINKEY]).upper()\n\n # Determine if the image has one of these conditions. The routine\n # will exit processing upon the first satisfied condition.\n\n noProcKey = None\n noProcValue = None\n doProcess = True\n # Imaging vs spectroscopic or coronagraphic\n if obstype != 'IMAGING':\n noProcKey = OBSKEY\n noProcValue = obstype \n\n # Moving target\n elif mtflag == 'T':\n noProcKey = MTKEY\n noProcValue = mtflag \n\n # Bostrophidon without or with dwell (WFC3 only)\n elif any ([scan_typ == 'C', scan_typ == 'D']):\n noProcKey = SCNKEY\n noProcValue = scan_typ\n\n # Filter which does not begin with: 'F'(F###), 'C'(CLEAR), 'N'(N/A), and is not blank\n # The sfilter variable may be the concatenation of two filters (F160_CLEAR)\n elif sfilter[0] != 'F' and sfilter[0] != '' and sfilter[0] != 'C' and sfilter[0] != 'N': \n noProcKey = FILKEY\n noProcValue = sfilter\n\n elif '_' in sfilter:\n pos = sfilter.index('_')\n pos += 1\n\n if sfilter[pos] != 'F' and sfilter[pos] != '' and sfilter[pos] != 'C' and sfilter[pos] != 'N': \n noProcKey = FILKEY\n noProcValue = sfilter\n\n # Ramp, polarizer, grism, or prism \n elif any (x in aperture for x in ['RAMP', 'POL', 'GRISM', '-REF', 'PRISM']):\n noProcKey = APKEY\n noProcValue = aperture \n\n # Calibration target\n elif any (x in targname for x in ['DARK', 'TUNG', 'BIAS', 'FLAT', 'DEUT', 'EARTH-CAL']):\n noProcKey = TARKEY\n noProcValue = targname\n\n # Exposure time of effectively zero\n elif math.isclose(exptime, 0.0, abs_tol=1e-5):\n noProcKey = EXPKEY\n noProcValue = exptime \n\n # Commanded FGS lock\n elif any (x in fgslock for x in ['GY', 'COARSE']):\n noProcKey = FGSKEY\n noProcValue = fgslock\n\n # Charge injection mode\n elif chinject != 'NONE':\n noProcKey = CHINKEY\n noProcValue = chinject\n\n # If noProcKey is set to a keyword, then this image has been found to not be viable for\n # alignment purposes.\n if (noProcKey is not None):\n if (noProcKey != FGSKEY):\n doProcess = False\n msgType = Messages.NOPROC.value\n else:\n msgType = Messages.WARN.value\n\n processMsg = noProcKey + '=' + str(noProcValue)\n\n # Issue message to log file for this data indicating no processing to be done or \n # processing should be allowed, but there may be some issue with the result (e.g., \n # GYROS mode so some drift)\n generate_msg(inputFile, msgType, noProcKey, noProcValue)\n\n # Populate a row of the table\n outputTable.add_row([inputFile, instrume, detector, sfilter, aperture, obstype,\n subarray, dateObs, mjdutc, doProcess, processMsg, catalog, \n foundSources, catalogSources, matchSources, rms_x, rms_y, \n rms_ra, rms_dec, completed, fit_rms, total_rms, datasetKey,\n status, headerletFile])\n #outputTable.pprint(max_width=-1)\n\n return(outputTable)", "def main():\n\n\n\n skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)\n\n # fetch and sort the .mnc and .tag files\n mnc_files = [f for f in skulls_folder if 'mnc' in f]\n tag_files = [f for f in skulls_folder if 'tag' in f]\n mnc_names = [i.split('.mnc')[0] for i in mnc_files]\n \n mnc_files.sort()\n tag_files.sort()\n mnc_names.sort()\n\n # Process and package ndarrays as tuples inside npy file\n package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)\n \n print('\\n' * 5)\n\n # Push the npy files to GCP Cloud Storage\n upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME)", "def main():\n conn = psycopg2.connect(f\"host=127.0.0.1 dbname=sparkifydb user={username} password={password}\")\n cur = conn.cursor()\n conn.set_session(autocommit=True)\n\n artists_data, songs_data = process_song_file()\n songplays_help_df, time_data, users_data = process_log_file()\n songplays_data = process_songplays_data(artists_data, songs_data, songplays_help_df)\n\n data_list = [songplays_data, users_data, songs_data, artists_data, time_data]\n for idx, (data, query) in enumerate(zip(data_list, insert_table_queries), start=1):\n print(f\"inserting file {idx}/{len(data_list)}\")\n for row in data:\n try:\n cur.execute(query, row)\n except psycopg2.Error as error:\n print(f\"Psychog2 error @ file {idx} row {row}: {error} NOTE: this file will not be inserted.\")\n\n conn.close()", "def run_analysis(wf):\n if wf.analysis[\"type\"] == \"one_sample_tests\":\n start_one_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"two_sample_tests\":\n start_two_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"factorial_tests\":\n start_factorial_tests(wf)\n\n elif wf.analysis[\"type\"] == \"n_sample_tests\":\n start_n_sample_tests(wf)\n\n info(\"> Finished analysis\")", "def submit(self, func, *args, **kwargs):\n self.task_counter += 1\n task_id = self.task_counter\n\n input_files = []\n output_files = []\n std_files = []\n\n # Add input files from the \"inputs\" keyword argument\n func_inputs = kwargs.get(\"inputs\", [])\n for inp in func_inputs:\n if isinstance(inp, File):\n input_files.append(self.create_name_tuple(inp, \"in\"))\n\n for kwarg, inp in kwargs.items():\n # Add appropriate input and output files from \"stdout\" and \"stderr\" keyword arguments\n if kwarg == \"stdout\" or kwarg == \"stderr\":\n if (isinstance(inp, tuple) and len(inp) > 1 and isinstance(inp[0], str) and isinstance(inp[1], str)) or isinstance(inp, str):\n if isinstance(inp, tuple):\n inp = inp[0]\n if not os.path.exists(os.path.join(\".\", os.path.split(inp)[0])):\n continue\n # Create \"std\" files instead of input or output files\n if inp in self.registered_files:\n input_files.append((inp, os.path.basename(inp) + \"-1\", False, \"std\"))\n output_files.append((inp, os.path.basename(inp), False, \"std\"))\n else:\n output_files.append((inp, os.path.basename(inp), False, \"std\"))\n self.registered_files.add(inp)\n # Add to input file if passed-in argument is a File object\n elif isinstance(inp, File):\n input_files.append(self.create_name_tuple(inp, \"in\"))\n\n # Add to input file if passed-in argument is a File object\n for inp in args:\n if isinstance(inp, File):\n input_files.append(self.create_name_tuple(inp, \"in\"))\n\n # Add output files from the \"outputs\" keyword argument\n func_outputs = kwargs.get(\"outputs\", [])\n for output in func_outputs:\n if isinstance(output, File):\n output_files.append(self.create_name_tuple(output, \"out\"))\n\n if not self.submit_process.is_alive():\n raise ExecutorError(self, \"Workqueue Submit Process is not alive\")\n\n # Create a Future object and have it be mapped from the task ID in the tasks dictionary\n fu = Future()\n self.tasks_lock.acquire()\n self.tasks[str(task_id)] = fu\n self.tasks_lock.release()\n\n logger.debug(\"Creating task {} for function {} with args {}\".format(task_id, func, args))\n\n # Pickle the result into object to pass into message buffer\n function_data_file = os.path.join(self.function_data_dir, \"task_\" + str(task_id) + \"_function_data\")\n function_result_file = os.path.join(self.function_data_dir, \"task_\" + str(task_id) + \"_function_result\")\n\n logger.debug(\"Creating Task {} with executable at: {}\".format(task_id, function_data_file))\n logger.debug(\"Creating Task {} with result to be found at: {}\".format(task_id, function_result_file))\n\n self._serialize_function(function_data_file, func, args, kwargs)\n\n # Create message to put into the message queue\n logger.debug(\"Placing task {} on message queue\".format(task_id))\n category = func.__qualname__ if self.autocategory else 'parsl-default'\n msg = {\"task_id\": task_id,\n \"data_loc\": function_data_file,\n \"category\": category,\n \"result_loc\": function_result_file,\n \"input_files\": input_files,\n \"output_files\": output_files,\n \"std_files\": std_files}\n self.task_queue.put_nowait(msg)\n\n return fu", "def analysis():\n global prediction\n\n json_path = os.path.join(basedir, 'static', 'data', 'tmp_json')\n # csv_path = os.path.join(basedir, 'static', 'data', 'csv')\n # if not os.path.exists(csv_path):\n # os.mkdir(csv_path)\n\n if os.name == 'nt':\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf.dir'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf.dir'))\n else:\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf'))\n\n # Files exists\n if audio_file.is_file() and video_file.is_file():\n with shelve.open(os.path.join(json_path, 'facial_data.shlf')) as shelf:\n emotion_data = shelf['emotion_data']\n microexpression_data = shelf['micro_expression_data']\n blink_data = shelf['blink_data']\n\n with shelve.open(os.path.join(json_path, 'audio_data.shlf')) as shelf:\n mean_energy = shelf['mean_energy']\n max_pitch_amp = shelf['max_pitch_amp']\n vowel_duration = shelf['vowel_duration']\n pitch_contour = shelf['pitch_contour']\n\n else:\n emotion_data = None\n microexpression_data = None\n blink_data = None\n mean_energy = None\n max_pitch_amp = None\n vowel_duration = None\n pitch_contour = None\n\n # Training Files (choose one)\n # soc_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_soc.txt')\n # niko_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_niko.txt')\n # vero_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_vero.txt')\n\n # txt_file = soc_file\n\n # train_data = []\n\n # for cases where one parameter has more elements\n # for i in range(min(len(blink_data), len(microexpression_data), len(mean_energy))):\n # train_data.append(0)\n\n # train_file = open(txt_file)\n\n # for line in train_file:\n # index1 = int((int(line[4]) * 600) + ((int(line[5]) * 60) + (int(line[7]) * 10) + int(line[8])) / 2)\n # index2 = int((int(line[10]) * 600) + ((int(line[11]) * 60) + (int(line[13]) * 10) + int(line[14])) / 2)\n # if line[0] == 'F':\n # train_data[index1] = 1\n # train_data[index2] = 1\n\n # with open(os.path.join(csv_path, 'train.csv'), 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency',\n # 'False/True'])\n\n # # for cases where one parameter has more elements than another\n # for index in range(min(len(mean_energy), len(blink_data), len(microexpression_data))):\n # writer.writerow([index, microexpression_data[index], blink_data[index],\n # mean_energy[index], max_pitch_amp[index], vowel_duration[index], pitch_contour[index],\n # train_data[index]])\n\n # finalresults = [['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency' ]]\n final_results = []\n\n for index in range((min(len(mean_energy), len(blink_data), len(microexpression_data)))):\n final_results.append([microexpression_data[index], blink_data[index],\n mean_energy[index], max_pitch_amp[index], vowel_duration[index],\n pitch_contour[index]])\n\n prediction[0] = predict(final_results)\n\n return render_template('analysis.html', mean_energy=mean_energy, max_pitch_amp=max_pitch_amp,\n vowel_duration=vowel_duration, pitch_contour=pitch_contour, blink_data=blink_data,\n microexpression_data=microexpression_data, emotion_data=emotion_data)", "def btn_upload_callback(self):\n # Create File Select Dialog\n dialog = QFileDialog(parent=self, caption='Images')\n dialog.setMimeTypeFilters(\n [\"image/jpeg\", \"image/png\", \"image/tiff\", 'application/zip'])\n dialog.setFileMode(QFileDialog.ExistingFile)\n\n if dialog.exec_() == QDialog.Accepted:\n\n filename = dialog.selectedFiles()[0]\n\n with open(filename, 'rb') as f:\n file_b64s = fio_to_b64s(f)\n\n if ext_from_path(filename) == '.zip':\n ret = api.upload_zip(\n file_b64s,\n nameext_from_path(filename),\n self.user_hash\n )\n else:\n ret = api.upload_image(\n file_b64s,\n nameext_from_path(filename),\n self.user_hash\n )\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n self.update_table()", "def run(self):\n st.title('Acne Classifier')\n st.markdown(STYLE, unsafe_allow_html=True)\n file = st.file_uploader(\"Upload file\", type=self.fileTypes)\n show_file = st.empty()\n if not file:\n show_file.info(\"Please upload a file of type: \" + \", \".join([\"png\", \"jpg\"]))\n return\n\n file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)\n opencv_image = cv2.imdecode(file_bytes, 1)\n cv2.imwrite('out.jpg', opencv_image)\n df = get_score()\n\n df2 = df.set_index('Issue')\n st.dataframe(df2)\n st.bar_chart(df2)\n\n if isinstance(file, BytesIO):\n show_file.image(file)\n else:\n data = pd.read_csv(file)\n st.dataframe(data.head(10))\n file.close()", "def analyze(self):\n # turn off all indicator lights\n self._stop_all()\n \n # run, but catch exceptions and abort if necessary\n try:\n # setup\n self.analysis_led[1].blink\n ims_left = self.num_images\n fluid_left = True\n \n data_session = Data(self.data_path)\n \n # run motor & imaging\n while self.power.update() and ims_left > 0:\n # run pump\n self.motor.run(self.pump_runtime)\n \n if not self.power.update():\n break\n \n # image\n time.sleep(self.rest_time)\n self.cam_led.on\n self.camera.capture()\n data_session.fetch_data()\n self.cam_led.off\n \n # subtract from remaining images every cycle\n # if the fluid sensor turns off, set remaining\n # images to the maximum possible remaining\n ims_left -= 1\n if fluid_left and \\\n not self.fluid.update() and \\\n ims_left > self.samps_after_sensor_off:\n fluid_left = False\n ims_left = self.samps_after_sensor_off\n \n # change indicator lights, given complete or power off\n if ims_left == 0:\n # set analysis to green\n self.analysis_led[1].off\n self.analysis_led[0].on\n else:\n # set analysis to solid red\n self.analysis_led[1].on\n \n # transmit data whether or not power switched off\n self.data_led.blink\n data = data_session.prepare_broadcast()\n broadcast_session = Broadcast(self.peer_ip)\n broadcast_session.broadcast_data(data)\n self.data_led.off\n \n except:\n # turn on error indicator and turn off all else\n # do not transmit data\n self._stop_all()\n self.error.on", "def main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=hallo user=hallo password=hallo\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()", "def transfer(self):\n\n # Upload unverified matches to s3 bucket if unverified argument used (production only)\n if self.in_args.unverified:\n files = glob.glob(os.path.join(self.directories['unverified_matches_dir'].format(self.region_dir, self.proc_type), '*'))\n\n # Loop through files found in unverified_matches folder\n for filepath in files:\n filename = os.path.basename(filepath)\n # Upload each file to S3 bucket folder\n self.upload_file(filepath, self.bucket, 'UK_suppliers/Unverified_Matches/' + filename)\n self.unverified_file = filename\n\n # Zip file creation - note will only work for latest unverified file. Above loop is added just incase\n # any residual files get added manually to S3 bucket.\n\n # Get filepaths of stats file, filtered and excluded matches files\n stats_fp = self.directories['stats_file'].format(self.region_dir, self.proc_type)\n filtered_matches_fp = self.directories['filtered_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n excluded_matches_fp = self.directories['excluded_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n blacklisted_strings_fp = self.directories['blacklisted_string_matches'].format(self.region_dir)\n\n stats_file_fp = self.directories['script_performance_stats_file'].format(self.region_dir, self.proc_type)\n\n # Assign zip file which will contain above files\n files_zip = self.unverified_file[:10] + \"_files.zip\"\n\n with ZipFile(files_zip, 'w') as myzip:\n myzip.write(stats_fp, os.path.basename(stats_fp))\n myzip.write(filtered_matches_fp,os.path.basename(filtered_matches_fp))\n myzip.write(excluded_matches_fp, os.path.basename(excluded_matches_fp))\n myzip.write(blacklisted_strings_fp, os.path.basename(blacklisted_strings_fp))\n myzip.write(stats_file_fp, os.path.basename(stats_file_fp))\n\n self.upload_file(files_zip, self.bucket, 'UK_suppliers/Archive/' + files_zip)\n\n # Download verified matches from s3 bucket if verified argument (production only)\n if self.in_args.verified:\n self.process_verified_files()\n\n # Add confirmed matches/non-matches to training file\n if self.in_args.convert_training:\n self.runfile_mods.convert_training.ConvertToTraining.convert(self)", "def upload_all_completed_analyses(context: click.Context, pipeline: Pipeline = None):\n\n LOG.info(\"----------------- AUTO -----------------\")\n\n status_db: Store = context.obj.status_db\n\n exit_code = 0\n for analysis_obj in status_db.get_analyses_to_upload(pipeline=pipeline):\n if analysis_obj.family.analyses[0].uploaded_at is not None:\n LOG.warning(\n f\"Skipping upload for case {analysis_obj.family.internal_id}. \"\n f\"It has been already uploaded at {analysis_obj.family.analyses[0].uploaded_at}.\"\n )\n continue\n\n case_id = analysis_obj.family.internal_id\n LOG.info(\"Uploading analysis for case: %s\", case_id)\n try:\n context.invoke(upload, case_id=case_id)\n except Exception:\n LOG.error(f\"Case {case_id} upload failed\")\n LOG.error(traceback.format_exc())\n exit_code = 1\n\n sys.exit(exit_code)", "def main(input_filepath, import_url):\n # Logging set up\n start = time()\n logger = logging.getLogger(__name__)\n log_import = logger.getChild('import_files')\n logger.info('Importing from raw data')\n \n # Dataset variables\n db_engine = create_engine(import_url, client_encoding='utf8')\n csvs = get_twitter_files(input_filepath)\n \n # Upload data\n log_import.info('Starting to upload {} csvs...'.format(len(csvs)))\n with click.progressbar(csvs, label='CSV Imports: ') as csv_progress:\n for csv in csv_progress:\n import_file(csv, db_engine)\n\n log_import.info('{} files done in {:.2f} secs.'.format(len(csvs), time() - start))", "def perform_analysis_tagging(input_file, output_file, analysis_func, fields):\n stem_df = pd.read_csv(input_file, encoding='utf8')\n\n # Initialize Dataframe fields\n for field in fields:\n stem_df[field] = 0.0\n\n # Iterate over all tweets in dataset\n for index, row in stem_df.iterrows():\n # Clean + stem tweet\n stripped_text = row.text[2:-1]\n cleaned_text = clean_tweet(stripped_text)\n stemmed_tweet = stem_tweet(cleaned_text)\n\n # Analyze sentiment and record scores\n analysis_res = analysis_func(stemmed_tweet)\n for field in fields:\n stem_df[field][index] = analysis_res[field]\n\n if index % 100 == 0:\n print(\"Completed #\", index)\n\n # Save analyzed sentiment to CSV\n stem_df.to_csv(output_file, index=False)", "def load_batch(self, request, *args, **kwargs):\n try:\n # get a list of the files in the associated path\n base_path = self.request.user.profile.VideoExperiment_path\n file_list = listdir(base_path)\n # include only csv files\n file_list = [el[:-4] for el in file_list if ('.csv' in el) and ('sync' not in el)]\n # get a list of the existing file names (bonsai)\n existing_rows = [el[0] for el in VideoExperiment.objects.values_list('slug')]\n # for all the files\n for file in file_list:\n # check if the entry already exists\n if file.lower() in existing_rows:\n # if so, skip making a new one\n continue\n # get the data for the entry\n data_dict = parse_path_experiment(file, self, 'VideoExperiment_path')\n # get rid of the animal2 entry\n del data_dict['animal2']\n # of the screen one\n del data_dict['screen_path']\n # and of the motive one\n del data_dict['track_path']\n # check the paths in the filesystem, otherwise leave the entry empty\n for key, value in data_dict.items():\n # if the entry is already empty, don't check\n if data_dict[key] == '':\n continue\n if (isinstance(value, str)) and ('path' in key) and (not exists(value)):\n # print a warning\n print('Path not found for key %s and value %s' % (key, value))\n # clear the path\n data_dict[key] = ''\n\n # # if the tif file exists but the calcium_data file doesn't, log it in the notes\n # This is for when we didn't have calcium signal extraction as part of snakemake\n # if (data_dict['fluo_path'] == '') and (data_dict['tif_path'] != ''):\n # data_dict['imaging'] = 'no'\n # data_dict['notes'] += 'norois'\n # create the model instance with the data\n model_instance = VideoExperiment.objects.create(**data_dict)\n # get the model for the experiment type to use\n experiment_type = ExperimentType.objects.filter(experiment_name='Free_behavior')\n # add the experiment type to the model instance (must use set() cause m2m)\n model_instance.experiment_type.set(experiment_type)\n # save the model instance\n model_instance.save()\n\n return HttpResponseRedirect('/loggers/video_experiment/')\n except:\n print('Problem file:' + file)\n return HttpResponseBadRequest('loading file %s failed, check file names' % file)", "def process(self):\n # Opening and preprocessing of the input file\n if self.options.mbtiles_fromdisk or self.options.mbtiles_todisk:\n if self.options.mbtiles_fromdisk:\n i_parm=10\n if self.options.mbtiles_todisk:\n i_parm=11\n if self.options.verbose:\n print \"GDAL2MbTiles :mbtiles from/to disk [\",i_parm,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"]\"\n self.mbtiles_setup(i_parm)\n return\n else:\n if self.options.verbose:\n print \"GDAL2MbTiles :tile creation mbtiles[\",self.options.mbtiles,\"]\"\n self.open_input()\n # Generation of main metadata files and HTML viewers\n self.generate_metadata()\n # Generation of the lowest tiles\n self.generate_base_tiles()\n # Generation of the overview tiles (higher in the pyramid)\n self.generate_overview_tiles()\n # Generating of KML\n self.generate_kml()", "def imaging_preingestion(rec_series):\n\n status_update = config.status_update_idx['NO_CHANGE']\n update_value_dict = copy.deepcopy(config.default_update_value_dict)\n\n print(\"rec_series['query_key']\")\n print(rec_series['query_key'])\n\n #Populate ImagingPipelineSession and call matlab script that handles TiffSplits\n imaging_pipeline.ImagingPipelineSession.populate(rec_series['query_key'])\n imaging_pipeline.AcquiredTiff.populate(rec_series['query_key'])\n\n #Retrieve all fovs records ingested in matlab Script\n fovs_ingested = (imaging_pipeline.TiffSplit & rec_series['query_key']).fetch(\"KEY\", as_dict=True)\n\n if len(fovs_ingested) == 0:\n status_update = config.status_update_idx['ERROR_STATUS']\n update_value_dict['error_info']['error_message'] = 'Imaging TiffSplit process failed'\n return (status_update, update_value_dict)\n\n #Ingest Scan for each fov from the TiffSplit process\n for this_fov in fovs_ingested:\n\n # Scan_id always zero because TIFF splitted (FOVs) already on imaging_pipeline schema\n scan_id = 0\n # Acquisition type will have Mesoscope or 2Photon\n scanner = rec_series['acquisition_type']\n # Hardcoded acquisition software\n acq_software = 'ScanImage'\n\n #Insert Scan and ScanInfo \n imaging_pipeline.scan_element.Scan.insert1(\n {**this_fov, 'scan_id': 0, 'scanner': scanner, 'acq_software': acq_software}, skip_duplicates=True)\n \n #Populate ScanInfo for all fovs\n imaging_pipeline.scan_element.ScanInfo.populate(rec_series['query_key'], display_progress=True)\n\n #ingested_recording = (imaging_pipeline.scan_element.Scan & rec_series['query_key']).fetch(\"KEY\", as_dict=True)\n\n # Get fov directories for each recording process:\n fov_files_df = pd.DataFrame((imaging_pipeline.scan_element.ScanInfo.ScanFile & rec_series['query_key']).fetch(as_dict=True))\n\n fov_files = fov_files_df.groupby('tiff_split').first().reset_index()\n\n\n #Insert recording processes records\n old_recording_process = (recording_process.Processing() & rec_series['query_key']).fetch(\"KEY\", as_dict=True)\n if len(old_recording_process) == 0:\n\n connection = recording.Recording.connection \n with connection.transaction:\n \n # Get fov directories for each recording process:\n fov_files_df = pd.DataFrame((imaging_pipeline.scan_element.ScanInfo.ScanFile & rec_series['query_key']).fetch(as_dict=True))\n fov_files = fov_files_df.groupby('tiff_split').first().reset_index().to_dict('records')\n\n fov_files = [dict(item, recording_process_pre_path=pathlib.Path(item['file_path']).parent.as_posix()) for item in fov_files]\n\n recording_process.Processing().insert_recording_process(fov_files, 'tiff_split')\n\n #Get parameters for recording processes\n recording_processes = (recording_process.Processing() & rec_series['query_key']).fetch('job_id', 'recording_id', 'fragment_number', 'recording_process_pre_path', as_dict=True)\n default_params_record_df = pd.DataFrame((recording.DefaultParams & rec_series['query_key']).fetch(as_dict=True))\n params_rec_process = recording.DefaultParams.get_default_params_rec_process(recording_processes, default_params_record_df)\n\n recording_process.Processing.ImagingParams.insert(params_rec_process, skip_duplicates=True)\n\n #Update recording_process_post_path\n recording_process.Processing().set_recording_process_post_path(recording_processes)\n\n status_update = config.status_update_idx['NEXT_STATUS']\n\n return (status_update, update_value_dict)", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-nanodegree-data-engineer/\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def query_csv_upload(request):\n if request.method == \"POST\":\n queries_dataset = request.FILES['queries_dataset']\n handle_uploaded_file(queries_dataset, 'temp/queries.csv')\n\n return _start_analysis(request)\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )", "def upload(request):\n gi = GalaxyInstance(url=request.session.get('server'), email=request.session.get('galaxyemail'), password=request.session.get(\"galaxypass\"))\n selected = request.POST.get('selected')\n selectedmeta = request.POST.get('meta')\n filetype = request.POST.get('filetype')\n dbkey = request.POST.get('dbkey')\n workflowid = request.POST.get('workflowid')\n pid = request.POST.get('data_id')\n onlydata = request.POST.get('onlydata')\n makecol = request.POST.get('col')\n data_ids = []\n control = request.POST.get('samples')\n test = request.POST.get('samplesb')\n new_hist = request.POST.get('historyname')\n group = request.POST.get('group')\n investigation = request.POST.get('investigation')\n date = strftime(\"%d_%b_%Y_%H:%M:%S\", gmtime())\n select = selected.split(',')\n mselect = selectedmeta.split(',')\n gselect = group.split(',')\n iselect = investigation.split(',')\n files = get_selection(iselect, gselect, select, mselect)[0]\n mfiles = get_selection(iselect, gselect, select, mselect)[1]\n groups = get_selection(iselect, gselect, select, mselect)[2]\n investigations = get_selection(iselect, gselect, select, mselect)[3]\n history_id = create_new_hist(gi, request.session.get('galaxyemail'), request.session.get(\"galaxypass\"),\n request.session.get('server'), workflowid, files, new_hist)\n inputs = {}\n if len(filter(None, files)) <= 0:\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n if onlydata == \"true\":\n make_data_files(gi, files, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id, filetype, dbkey)\n else:\n make_data_files(gi, files, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id, filetype, dbkey)\n make_meta_files(gi, mfiles, request.session.get('username'), request.session.get('password'), request.session.get('galaxyemail'),\n request.session.get('galaxypass'), control, test, history_id)\n if workflowid != \"0\":\n in_count = 0\n resultid = uuid.uuid1()\n datamap = dict()\n mydict = {}\n jsonwf = gi.workflows.export_workflow_json(workflowid)\n for i in range(len(jsonwf[\"steps\"])):\n if jsonwf[\"steps\"][str(i)][\"name\"] == \"Input dataset\":\n try:\n label = jsonwf[\"steps\"][str(i)][\"inputs\"][0][\"name\"]\n except IndexError:\n label = jsonwf[\"steps\"][str(i)][\"label\"]\n mydict[\"in%s\" % (str(i + 1))] = gi.workflows.get_workflow_inputs(workflowid, label=label)[0]\n for k, v in mydict.items():\n datamap[v] = {'src': \"hda\", 'id': get_input_data(request.session.get('galaxyemail'), request.session.get('galaxypass'),\n request.session.get('server'))[0][in_count]}\n data_ids.append(get_input_data(request.session.get('galaxyemail'), request.session.get('galaxypass'),\n request.session.get('server'))[0][in_count])\n in_count += 1\n if makecol == \"true\":\n gi.histories.create_dataset_collection(history_id, make_collection(data_ids))\n gi.workflows.invoke_workflow(workflowid, datamap, history_id=history_id)\n gi.workflows.export_workflow_to_local_path(workflowid, request.session.get('username'), True)\n datafiles = get_output(request.session.get('galaxyemail'), request.session.get('galaxypass'), request.session.get('server'))\n store_results(1, datafiles, request.session.get('server'), request.session.get('username'),\n request.session.get('password'), request.session.get('storage'),\n groups, resultid, investigations, date)\n store_results(3, datafiles, request.session.get('server'), request.session.get('username'),\n request.session.get('password'), request.session.get('storage'),\n groups, resultid, investigations, date)\n ga_store_results(request.session.get('username'), request.session.get('password'), workflowid,\n request.session.get('storage'), resultid, groups, investigations)\n call([\"rm\", request.session.get('username') + \"/input_test\"])\n return render_to_response('results.html', context={'workflowid': workflowid, 'inputs': inputs, 'pid': pid,\n 'server': request.session.get('server')})\n else:\n if makecol == \"true\":\n history_data = gi.histories.show_history(history_id, contents=True)\n for c in range(0, len(history_data)):\n data_ids.append(history_data[c]['id'])\n gi.histories.create_dataset_collection(history_id, make_collection(data_ids))\n ug_store_results(\n request.session.get('galaxyemail'), request.session.get('galaxypass'), request.session.get('server'), workflowid,\n request.session.get('username'), request.session.get('password'), request.session.get('storage'), groups, investigations, date)\n return HttpResponseRedirect(reverse(\"index\"))", "def run(self, found_files):\n raise NotImplementedError", "def main():\n spark = create_spark_session()\n\n input_data = \"s3a://udacitydenanodegree2020/\"\n output_data = \"s3a://udacitydenanodegree2020/output/\"\n\n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def analyze_prior_results(self, analysis_source):\n #TODO: move to iota_run.py\n\n from iota.components.iota_analysis import Analyzer\n from libtbx import easy_pickle as ep\n\n if os.path.isdir(analysis_source):\n int_folder = os.path.abspath(analysis_source)\n else:\n try:\n int_folder = os.path.abspath(os.path.join(os.curdir,\n 'integration/{}/image_objects'.format(analysis_source)))\n except ValueError:\n int_folder = None\n print ('Run #{} not found'.format(analysis_source))\n\n if os.path.isdir(int_folder):\n with prog_message('Analyzing Results'):\n int_list = [os.path.join(int_folder, i) for i in os.listdir(int_folder)]\n img_objects = [ep.load(i) for i in int_list if i.endswith('.int')]\n\n self.logfile = os.path.abspath(os.path.join(int_folder, 'iota.log'))\n self.viz_base = os.path.join('/'.join(int_folder.split('/')),\n 'vizualization')\n\n self.params.analysis.cluster_write_files=False\n\n analysis = Analyzer(self, img_objects, self.iver)\n analysis.print_results()\n analysis.unit_cell_analysis()\n analysis.print_summary(write_files=False)\n else:\n print ('No results found in {}'.format(int_folder))", "def upload_submission(request, learner, trigger, no_thumbnail=True):\n base_dir_for_file_uploads = settings.MEDIA_ROOT\n thumbnail_file_name_django = ''\n entry_point = trigger.entry_point\n\n files = request.FILES.getlist('file_upload', None)\n if files is None:\n return None\n\n # Is the storage space reachable?\n deepest_dir = base_dir_for_file_uploads + 'uploads/{0}/tmp/'.format(\n entry_point.id)\n\n try:\n os.makedirs(deepest_dir)\n except OSError:\n if not os.path.isdir(deepest_dir):\n logger.error('Cannot create directory for upload: {0}'.format(\n deepest_dir))\n raise\n\n if len(files) == 1:\n filename = files[0].name\n extension = filename.split('.')[-1].lower()\n submitted_file_name_django = 'uploads/{0}/{1}'.format(entry_point.id,\n generate_random_token(token_length=16) + '.' + extension)\n full_path = base_dir_for_file_uploads + submitted_file_name_django\n with open(full_path, 'wb+') as dst:\n for chunk in files[0].chunks():\n dst.write(chunk)\n\n\n f_size = os.path.getsize(full_path)\n if f_size > trigger.max_file_upload_size_MB * 1024 * 1024:\n logger.warning('File too large {0}'.format(\n submitted_file_name_django))\n return None, ('File too large ({0} MB); it must be less than '\n '{1} MB.'.format(round(float(f_size/1024.0/1024.0), 1),\n trigger.max_file_upload_size_MB))\n\n\n else: #if trigger.allow_multiple_files: this is removed for now\n filename = ''\n extension = ''\n submitted_file_name_django = ''\n full_path = ''\n\n\n # Check that the file format is PDF, if that is required.\n strike1 = False\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'application/pdf' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PDF upload: {0} [{1}]'.format(mime,\n full_path))\n #return None, 'Invalid file uploaded. Uploaded file must be a PDF.'\n\n doc = PdfFileReader(full_path)\n if doc.isEncrypted:\n logger.debug('Encrypted PDF upload: {0}'.format(full_path))\n return None, ('An encrypted PDF cannot be uploaded. Please remove '\n 'the encryption and try again.')\n\n\n strike1 = False\n if (('jpeg' in trigger.accepted_file_types_comma_separated.lower()) or \\\n ('jpg' in trigger.accepted_file_types_comma_separated.lower())) and \\\n extension in ('jpg', 'jpeg'):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/jpeg' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid JPG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable JPEG file.')\n\n\n strike1 = False\n if ('png' in trigger.accepted_file_types_comma_separated.lower()) and \\\n extension in ('png',):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/png' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PNG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable PNG file.')\n\n\n strike2 = False\n if extension.lower() not in \\\n trigger.accepted_file_types_comma_separated.lower():\n logger.debug('Invalid file type upload: received \".{0}\"; [{1}]'.format(\\\n extension, full_path))\n return None, ('Invalid file uploaded. Uploaded file must be: {}'.format(\\\n trigger.accepted_file_types_comma_separated))\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n prior = Submission.objects.filter(status='S',\n submitted_by=learner,\n entry_point=entry_point,\n is_valid=True\n )\n else:\n prior_indiv = Q(status='S', submitted_by=learner, entry_point=entry_point,\n trigger=trigger, is_valid=True)\n\n # We need this here, but also for the code later in the next\n # if (trigger==entry_point) part\n\n # Default returned by this function is ``None`` if the user is not\n # enrolled in a group, or if this course simply does not use groups.\n group_submitted = is_group_submission(learner, entry_point)\n if is_group_submission(learner, entry_point):\n group_submitted = group_submitted.group\n\n prior_group = Q(status='S', group_submitted=group_submitted,\n entry_point=entry_point, trigger=trigger,\n is_valid=True)\n else:\n prior_group = Q()\n\n prior = Submission.objects.filter(prior_indiv | prior_group)\n\n\n for item in prior:\n logger.debug(('Setting prior submission to False: {0} and name '\n '\"{1}\"'.format(str(item), item.submitted_file_name)))\n item.is_valid = False\n item.save()\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n sub = Submission(submitted_by=learner,\n group_submitted=None,\n status='S',\n entry_point=entry_point,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n else:\n\n sub = Submission(submitted_by=learner,\n group_submitted=group_submitted,\n status='S',\n entry_point=entry_point,\n trigger=trigger,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n clean_PDF(sub)\n\n return sub", "def run(self):\n # read (x,y) - Set SIFT parameters\n action = self.cfg['param']['action']\n x = float(self.cfg['param']['x']) \n # Expressed en PIL coordinates system y| x-\n y = float(self.cfg['param']['y'])\n \n # read image size and store in 'param' dict to control html rendering\n work_dir = self.work_dir\n [w1, h1] = Image.open(work_dir+'input_0.orig.png').size\n [w2, h2] = Image.open(work_dir+'input_1.orig.png').size\n wdth = max(w1, w2)\n hght = max(h1, h2)\n wpair = int(w1+w2+max(w1, w2)/10)\n self.cfg['param']['hght'] = hght\n self.cfg['param']['wdth'] = wdth\n self.cfg.save()\n # Convert x y provided by the form <input type=image ..; > \n # We assume that the width of the html body is assumed width is 920px\n x = x*wpair/920\n y = y*wpair/920\n self.cfg['param']['x'] = x\n self.cfg['param']['y'] = y\n self.cfg.save()\n if (action == 'std_sift_matching'):\n try:\n self.load_standard_parameters()\n self.run_std_sift()\n self.run_matching()\n self.illustrate_std_sift()\n self.illustrate_matching()\n except TimeoutError:\n return self.error(errcode='timeout',\n errmsg=\"Try again with simpler images.\")\n except RuntimeError:\n return self.error(errcode='runtime',\n errmsg=\"Runtime error in std_sift_matching.\")\n elif (action == \"cust_sift_matching\"):\n try:\n self.run_sift()\n print \"after run_sift()\"\n self.run_matching()\n print \"after run_matching()\"\n self.illustrate_sift()\n print \"after illustrate_sift()\"\n self.illustrate_matching()\n self.detail_matching()\n except TimeoutError:\n return self.error(errcode='timeout',\n errmsg=\"Try with simpler images.\")\n except RuntimeError:\n return self.error(errcode='runtime',\n errmsg=\"Runtime error in cust_sift_matching.\")\n elif (action == \"cust_matching\"): \n try:\n self.run_matching()\n self.illustrate_matching()\n except TimeoutError:\n return self.error(errcode='timeout',\n errmsg=\"Try with simpler images.\")\n except RuntimeError:\n return self.error(errcode='runtime',\n errmsg=\"Runtime error in cust_matching.\")\n else:\n try:\n self.detail_matching()\n except TimeoutError:\n return self.error(errcode='timeout',\n errmsg=\"Try with simpler images.\")\n except RuntimeError:\n return self.error(errcode='runtime',\n errmsg=\"Runtime error in else (you know).\")\n ## archive\n if self.cfg['meta']['original']:\n ar = self.make_archive()\n ar.add_file(\"input_0.png\", info=\"first input image\")\n ar.add_file(\"input_1.png\", info=\"second input image\")\n ar.add_file(\"input_0.orig.png\", info=\"first uploaded image\")\n ar.add_file(\"input_1.orig.png\", info=\"second uploaded image\")\n ar.add_file(\"OUTmatches.png\", info=\"matches\")\n ar.add_file(\"keys_im0.txt\", compress=True)\n ar.add_file(\"keys_im1.txt\", compress=True)\n ar.add_file(\"OUTmatches.txt\", compress=True)\n ar.save()\n\n self.cfg.save()\n http.redir_303(self.base_url + 'result?key=%s' % self.key)\n return self.tmpl_out(\"run.html\")", "def run(self):\n\t\tlogger.info(\"Uploading data... @ %f, PID: %d\" % (time.time(), os.getpid()))\n\n\t\tself.dump_db()", "def main(IMAGES_PATH, action='model2'):\n if action == 'yolo':\n # Run yolo model on images in the given folder\n run_yolo_model(IMAGES_PATH)\n \n elif action == 'mask':\n # Run mask model on images in the given folder\n run_mask_model(IMAGES_PATH)\n \n elif action == 'model1':\n # Run yolo model on images in the given folder\n run_yolo_model(IMAGES_PATH)\n \n # Run mask model on images in the given folder\n run_mask_model(IMAGES_PATH)\n \n # Aggregate output of yolo and output of mask\n #yolo_coco.py\n\n elif action == 'model2':\n # For sake of time saving, we will run model2 on a previously saved table instead of the output of model1. \n \n #data_df = load_coco_json_into_table('yolo_mask_to_coco/Predictions_group3.json')\n #data_df = add_date_time(data_df)\n \n data_df = pd.read_pickle('inputs/table_labels_new.pkl')\n \n data_df.reset_index(drop=True, inplace=True)\n \n complete_concreting_df = get_concreting_periods(data_df)\n \n final_concreting_df = get_concreting_zone_and_workers_infos(complete_concreting_df)\n \n df_efficiency = efficiency_of_site(final_concreting_df)\n \n return df_efficiency", "def upload_file(self, file_path, file_name, output_path):", "def upload_function(request):\n application_key = request.args.get('applicationKey', None)\n api_key = request.args.get('apiKey', None)\n\n sensor_mapping = json.loads(request.environ.get(\"sensor_mapping\", \"{}\"))\n pprint.pprint('Using this sensor mapping: \\n' + pprint.pformat(sensor_mapping))\n\n host = request.environ.get(\"influxdb_host\")\n port = request.environ.get(\"influxdb_port\")\n user = request.environ.get(\"influxdb_user\")\n password = request.environ.get(\"influxdb_password\")\n dbname = request.environ.get(\"influxdb_database\")\n\n return process_data(sensor_mapping, application_key, api_key, host, port, user, password, dbname)", "def make_data_files(gi, files, username, password, galaxyemail, galaxypass, control, test, history_id, filetype, dbkey):\n uploaded_files = []\n ftp = gi.config.get_config()[\"ftp_upload_site\"]\n for file in files:\n nfile = str(file).split('/')\n filename = nfile[len(nfile)-1]\n with open(username + \"/input_\" + filename, \"w\") as dfile:\n cont = subprocess.Popen([\"curl -u \" + username + \":\" + password + \" -k -s \" + file], stdout=subprocess.PIPE, shell=True).communicate()[0]\n dfile.write(cont)\n dfile.close()\n with open(username + \"/input_\" + filename, \"r\") as tfile:\n # Trim file based on selected samples.\n matrix = False\n noheader = False\n samples_a = []\n samples_b = []\n linenr = 0\n if control != \"[]\" or test != \"[]\":\n with open(username + \"/input_A_\" + filename, \"w\") as ndfilea:\n with open(username + \"/input_B_\" + filename, \"w\") as ndfileb:\n for line in tfile:\n if linenr == 0:\n samples_a.append(0)\n samples_b.append(0)\n if \"!\" not in line:\n noheader = True\n if not noheader:\n if \"!Sample_geo_accession\" in line:\n line = line.split('\\t')\n for x in range(0, len(line)):\n if line[x].replace('\\n', '') in control:\n samples_a.append(x)\n if line[x].replace('\\n', '') in test:\n samples_b.append(x)\n else:\n if \"!series_matrix_table_begin\" in line:\n matrix = True\n samples_a.append(0)\n if matrix:\n line = line.split('\\t')\n for p in (p for p,x in enumerate(line) if p in samples_a):\n if \"!series_matrix_table_begin\" not in line[p] and \"!series_matrix_table_end\" not in line[p]:\n ndfilea.write(line[p].replace('\\\"', '').replace('\\n', '') + '\\t')\n for pb in (pb for pb,x in enumerate(line) if pb in samples_b):\n if \"!series_matrix_table_begin\" not in line[pb] and \"!series_matrix_table_end\" not in line[pb]:\n ndfilea.write(line[pb].replace('\\\"', '').replace('\\n', '') + '\\t')\n ndfilea.write('\\n')\n else:\n line.strip()\n else:\n line = line.split('\\t')\n if linenr == 0:\n column = 0\n control = control.split(',')\n test = test.split(',')\n for l in line:\n for c in control:\n if str(c.replace('[', '').replace(']', '').replace('\"', '')) == l.replace('\\n', ''):\n samples_a.append(column)\n for t in test:\n if str(t.replace('[', '').replace(']', '').replace('\"', '')) == l.replace('\\n', ''):\n samples_b.append(column)\n column += 1\n column = 0\n for l in line:\n if column in samples_a:\n ndfilea.write(line[column].replace('\\\"', '').replace('\\n', '') + '\\t')\n if column in samples_b:\n ndfileb.write(line[column].replace('\\\"', '').replace('\\n', '') + '\\t')\n column += 1\n ndfilea.write('\\n')\n ndfileb.write('\\n')\n linenr += 1\n if len(samples_a) > 1:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"put \" + ndfilea.name + \"; bye\\\"\"], shell=True)\n gi.tools.upload_from_ftp(ndfilea.name.split(\"/\")[-1], history_id, file_type=filetype, dbkey=dbkey)\n uploaded_files.append(ndfilea.name.split(\"/\")[-1])\n if len(samples_b) > 1:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"put \" + ndfileb.name + \"; bye\\\"\"], shell=True)\n gi.tools.upload_from_ftp(ndfileb.name.split(\"/\")[-1], history_id, file_type=filetype, dbkey=dbkey)\n uploaded_files.append(ndfileb.name.split(\"/\")[-1])\n ndfilea.close()\n ndfileb.close()\n call([\"rm\", ndfilea.name])\n call([\"rm\", ndfileb.name])\n else:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"put \" + tfile.name + \"; bye\\\"\"], shell=True)\n gi.tools.upload_from_ftp(tfile.name.split(\"/\")[-1], history_id, file_type=filetype, dbkey=dbkey)\n uploaded_files.append(tfile.name.split(\"/\")[-1])\n call([\"rm\", dfile.name])\n call([\"rm\", tfile.name])\n hist = gi.histories.show_history(history_id)\n state = hist['state_ids']\n dump = json.dumps(state)\n status = json.loads(dump)\n # Stop process after workflow is done\n while status['running'] or status['queued'] or status['new'] or status['upload']:\n time.sleep(20)\n hist = gi.histories.show_history(history_id)\n state = hist['state_ids']\n dump = json.dumps(state)\n status = json.loads(dump)\n if not status['running'] and not status['queued'] and not status['new'] and not status['upload']:\n for uf in uploaded_files:\n check_call([\"lftp -u \" + galaxyemail + \":\" + galaxypass + \" \" + ftp + \" -e \\\"rm -r \" + uf + \"; bye\\\"\"], shell=True)\n break", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def execute(self, images_and_density_maps):\n raise NotImplementedError(\"execute method not implemented in the child class\")", "def upload():\n\n file = request.files['query']\n filepath = upload_filepath(secure_filename(file.filename))\n file.save(filepath)\n classification = classify(filepath)\n classification['filename'] = file.filename\n return render_template('index.html', classification=classification)", "def analyze_run():\n file_datas_dict = load_datas(Args.data_files)\n plotables_dict = dict()\n for file_name, datas in file_datas_dict.viewitems():\n analized_datas = analyze_datas(datas,Args.analysis_attributes)\n plotables = ana_results_to_plotables(\n analized_datas,\n Args.analysis_attributes\n )\n if Args.dm_file_out:\n analysis_save_dm(\n analized_datas,\n plotables,\n Args.analysis_attributes,\n Args.dm_file_out\n )\n if Args.mat_file_out:\n analysis_save(\n plotables,\n Args.analysis_attributes,\n Args.mat_file_out\n )\n if Args.verbose:\n plotables_dict[file_name] = plotables\n if Args.verbose:\n ana_plot_figures(plotables_dict,Args.analysis_attributes)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('drive', 'v3', http=http)\n\n i = 0\n total = 0\n nextPageToken=None\n while True:\n results = service.files().list(\n pageSize=30,\n fields=\"nextPageToken, files(id, name, mimeType, modifiedTime)\",\n spaces='photos',\n pageToken=nextPageToken\n ).execute()\n\n items = results.get('files', [])\n nextPageToken = results.get(\"nextPageToken\")\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'].split('/')[0] != 'image':\n continue\n if vcoll.findBySrcId(item['id']) is not None:\n continue\n destination = 'image_tags/validation/' + item['name']\n file_content = get_file_stream(service, item['id'])\n if file_content and image_handler.is_valid_image(file_content):\n file_handler.upload_file_stream(destination, file_content)\n vcoll.insertValidationImage(destination, item['id'], item['modifiedTime'])\n total += 1\n print(\"Downloaded {0} photos\".format(total))\n i += 1", "def handle(self, *args, **options):\n today = datetime.datetime.today()\n print '****** Processing data analysis at %s.*******\\n' % today\n\n #print analysis.calculate_summary_stats()\n #print analysis.calculate_action_stats()\n #print analysis.calculate_user_stats()\n\n outfile = open('user_timestamps.csv', 'w')\n analysis.user_timestamps(None, \"2012-09-04\", \"2012-10-01\", outfile)\n outfile.close()\n\n outfile = open('user_point_timestamps.csv', 'w')\n analysis.user_point_timestamps(\"2012-09-04\", \"2012-10-01\", outfile)\n outfile.close()\n\n outfile = open('energy_goal_timestamps.csv', 'w')\n analysis.energy_goal_timestamps(\"2012-09-04\", \"2012-10-01\", outfile)\n outfile.close()\n\n today = datetime.datetime.today()\n print '****** End processing data analysis at %s.*******\\n' % today", "def ingest_results(self, results, isCsv=False):\n \n for r in results:\n image = Image()\n if isCsv:\n image.parse_row(r)\n else:\n image.parse_record(r)\n # print(\"metadata: %s\" % image.metadata)\n # answer = input(\"Press enter...\")\n self.img_lst.append(image)", "def main():\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()", "def main():\n\targuments_sent = sys.argv\n\tif len(arguments_sent) > 1:\n\t\tfile_path = arguments_sent[1]\n\t\tprocess_based_on_type(file_path)", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Starting crazy calculations...\")\n createlookuptable(args.imagefolder)\n _logger.info(\"Script ends here\")", "def run_single_simple_experiment(self, path_to_csv_files, algorithm, thresholds):\n\n splitted_path = os.path.abspath(__file__).split(\"/\")\n if not os.path.isdir(\"/\".join(splitted_path[:-1]) + \"/results\"):\n os.mkdir(\"/\".join(splitted_path[:-1]) + \"/results\")\n\n experiment_dict, list_dict = self.create_experiment_dict(algorithm.get_algorithm_name() + \"_actual\", thresholds)\n\n csv_paths_list = self.reader.read_all_csv_paths_from_path(path_to_csv_files)\n file_number = 1\n for csv_path in csv_paths_list:\n if 'total' in csv_path:\n continue\n df = pd.read_csv(csv_path)\n start_file_time = time.time()\n print(\"started file number:\", file_number)\n for index, row in df.iterrows():\n algorithm_column_name = algorithm.get_algorithm_name() + \"_actual\"\n actual_score = row['actual_score']\n for threshold in thresholds:\n algorithm_score = row[algorithm_column_name]\n if actual_score == 1:\n if algorithm_score >= threshold:\n experiment_dict[str(threshold)][algorithm_column_name][list_dict['TP']] += 1\n else:\n experiment_dict[str(threshold)][algorithm_column_name][list_dict['FN']] += 1\n else:\n if algorithm_score < threshold:\n experiment_dict[str(threshold)][algorithm_column_name][list_dict['TN']] += 1\n else:\n experiment_dict[str(threshold)][algorithm_column_name][list_dict['FP']] += 1\n finish_file_time = time.time()\n print(\"finished file number:\", file_number)\n print(\"file number:\", file_number, \"took:\", (finish_file_time - start_file_time) / 60, \"minutes\")\n file_number += 1\n\n # calculate the precision and recall for each threshold and algorithm\n experiment_dict = self.update_precision_and_recall(experiment_dict, list_dict,\n algorithm.get_algorithm_name() + \"_actual\", thresholds)\n\n # create a dictionary that can be ingested into a dataframe\n experiment_dict_for_csv = self.create_simple_experiment_dict_for_csv(experiment_dict, list_dict, algorithm,\n thresholds)\n\n # create a dataframe from the results\n experiment_dict_df = pd.DataFrame(experiment_dict_for_csv)\n\n # write the results into a csv file\n experiment_csv_file_name = os.path.dirname(\n \"/\".join(splitted_path)) + \"/results/precision_recall_results_\" + algorithm.get_algorithm_name() + \".csv\"\n experiment_dict_df.to_csv(experiment_csv_file_name, index=False, header=True)", "def main_one(string_path_to_folder, destination_folder):\n # .jpg and .JPG are the same\n # photos = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.JPG\") # Examples of location format\n # pho = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.jpg\")\n photos = glob.glob(string_path_to_folder+\"/*.JPG\")\n print(\"Number of files: \", len(photos))\n for k in photos:\n print(get_photo_date(k))\n process_all(k, destination_folder)", "def process_image(self, **kwargs):\n try:\n img = self.current_image\n\n if self.is_vis:\n result = self._process_job_vis(img, **kwargs)\n elif self.is_nir:\n result = self._process_job_nir(img, **kwargs)\n elif self.is_fluo:\n result = self._process_job_fluo(img, **kwargs)\n else:\n raise NotImplementedError\n\n except Exception as e:\n print(\n 'Failed to process image: \"{}\", because \"{}\"'.format(\n self.file_path, repr(e)\n )\n )\n self.print_images()\n return False\n\n self.print_images()\n\n return result", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n # use the func to insert data from these files to database's fact and dim tables\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def main():\n logging.info(\"Executing data quality module\")\n\n calculate_quality()", "def main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='operation')\n\n pparser = subparsers.add_parser('process',\n description='Processes tweets')\n inputgroup = pparser.add_mutually_exclusive_group()\n inputgroup.add_argument('-f', '--files', nargs='+')\n inputgroup.add_argument('-q', '--queries', nargs='+')\n pparser.add_argument('-wc', '--wordcloud', action='store_true')\n pparser.add_argument('-n', type=int, default=300, dest='count')\n\n gparser = subparsers.add_parser('get',\n description='Gets tweets and writes them into files')\n gparser.add_argument('queries', nargs='+')\n gparser.add_argument('-n', type=int, default=300, dest='count')\n iparser = subparsers.add_parser('interactive',\n description='Manually input messages for evaluation')\n args = parser.parse_args()\n \n \n if args.operation == 'get':\n query_tweets_to_files(args.queries, args.count)\n print(\"Tweets successfully saved to disk\")\n elif args.operation == 'process':\n model, w2v_model = load_models()\n if args.queries:\n tweets = [(query,\n tc.query_tweets(query, args.count)) for query in args.queries]\n else:\n tweets = [(os.path.splitext(os.path.basename(f))[0],\n tc.read_tweets_from_file(f)) for f in args.files]\n analyze_tweets(tweets, model, w2v_model)\n print(\"Images successfully saved to disk.\")\n elif args.operation == 'interactive':\n model, w2v_model = load_models()\n interactive(model, w2v_model)", "def run(self):\n\t\tlog = logging.getLogger()\n\t\tsuccess = True\n\t\tself.task[\"custom\"] = str(self.task[\"custom\"])\n\t\tself.db = CuckooDatabase()\n\n\t\t# Generate analysis results storage folder path with current task id.\n\t\tresults_path = CuckooConfig().get_analysis_results_path()\n\t\tsave_path = os.path.join(results_path, str(self.task[\"id\"]))\n\n\t\tif (self.task[\"custom\"] == \"sleep\"):\n\t\t\timport time\n\t\t\t# sleep longer than default timeout of hsn2-cuckoo\n\t\t\ttime.sleep(905)\n\t\t# Additional check to verify that the are not saved results with the\n\t\t# same task ID.\n\t\tif os.path.exists(save_path):\n\t\t\tlog.error(\"There are already stored results for current task \" \\\n\t\t\t\t\t \"with ID %d at path \\\"%s\\\". Abort.\"\n\t\t\t\t\t % (self.task[\"id\"], save_path))\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target file exists.\n\t\tlog.debug(os.path.exists(self.task[\"custom\"]))\n\t\tif not os.path.exists(self.task[\"custom\"]):\n\t\t\tlog.error(\"Cannot find custom file \\\"%s\\\". Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target is a directory.\n\t\tif os.path.isdir(self.task[\"custom\"]):\n\t\t\tlog.error(\"Specified target \\\"%s\\\" is a directory. Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# 4. Extract appropriate log archive as mock logs analysis results\n\t\t# Modified _save_results so that it extracts the tar file passed in target\n\t\tself._save_results(self.task[\"custom\"], save_path)\n\n\t\t# 5. Update task in database with proper status code.\n\t\tif success:\n\t\t\tself.db.complete(self.task[\"id\"], True)\n\t\telse:\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\tlog.info(\"Analyis completed.\")\n\n\t\treturn True", "def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"data/analytics\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)", "def upload_sample_and_get_results(a1000):\n file_entry = demisto.getFilePath(demisto.getArg('entryId'))\n\n try:\n with open(file_entry['path'], 'rb') as f:\n response_json = a1000.upload_sample_and_get_summary_report_v2(file_source=f,\n custom_filename=file_entry.get('name'),\n tags=demisto.getArg('tags'),\n comment=demisto.getArg('comment')).json()\n except Exception as e:\n return_error(str(e))\n\n command_result = a1000_report_output(response_json)\n\n file_result = fileResult('A1000 report file', json.dumps(response_json, indent=4),\n file_type=EntryType.ENTRY_INFO_FILE)\n\n return [command_result, file_result]", "def run_analysis(self, analysis_function, results_shapes, results_names, files_dir, zemax_file, results_path,\n wavelength_idx=None, configuration_idx=None, surface=None, *args, **kwargs):\n\n # check that the file name is correct and the zemax file exists\n if os.path.exists(os.path.join(files_dir, zemax_file)) is False:\n raise FileExistsError(\"%s does NOT exist\" % zemax_file)\n\n print(\"\\nOpening Zemax File: \", zemax_file)\n self.zosapi.OpenFile(os.path.join(files_dir, zemax_file), False)\n file_name = zemax_file.split(\".\")[0] # Remove the \".zmx\" suffix\n\n # Check if the results directory already exists\n results_dir = os.path.join(results_path, file_name)\n print(\"Results will be saved in: \", results_dir)\n if not os.path.exists(results_dir):\n os.mkdir(results_dir) # If not, create the directory to store results\n\n # Get some info on the system\n system = self.zosapi.TheSystem # The Optical System\n MCE = system.MCE # Multi Configuration Editor\n LDE = system.LDE # Lens Data Editor\n N_surfaces = LDE.NumberOfSurfaces\n N_configs = MCE.NumberOfConfigurations\n\n # print(\"\\nSystem Data:\")\n print(\"Number of Surfaces: \", N_surfaces)\n # print(\"Number of Configurations / Slices: \", N_configs)\n _wavelengths = get_wavelengths(system, info=False)\n N_waves = _wavelengths.shape[0]\n # field_points = get_fields(system, info=False)\n\n # Let's deal with the data format\n if wavelength_idx is None: # We use all the available wavelengths\n wavelengths = _wavelengths\n wavelength_idx = np.arange(1, N_waves + 1)\n else: # We have received a list of wavelength indices\n wavelengths = [_wavelengths[idx - 1] for idx in wavelength_idx]\n N_waves = len(wavelength_idx)\n\n if configuration_idx is None:\n configurations = np.arange(1, N_configs + 1)\n else: # We have received a list of configurations\n configurations = configuration_idx\n N_configs = len(configuration_idx)\n N_slices = N_configs\n\n if surface is None: # we go to the final image plane\n surface = system.LDE.NumberOfSurfaces - 1\n\n N_surfaces = system.LDE.NumberOfSurfaces\n if surface != (N_surfaces - 1):\n # The Surface is not the final Image Plane. We must force Zemax to IGNORE\n # all surfaces after our surface of interest so that the RWCE operands works\n surfaces_to_ignore = np.arange(surface + 1, N_surfaces)\n for surf_number in surfaces_to_ignore:\n _surf = system.LDE.GetSurfaceAt(surf_number)\n _surf.TypeData.IgnoreSurface = True\n\n last_surface = system.LDE.GetSurfaceAt(surface)\n thickness = last_surface.Thickness\n print(\"Thickness: \", last_surface.Thickness)\n # image = system.LDE.GetSurfaceAt(N_surfaces - 1)\n # print(\"\\nImage Name: \", image.Comment)\n\n # Temporarily set the Thickness to 0\n last_surface.Thickness = 0.0\n print(\"Thickness: \", last_surface.Thickness)\n\n # Double check we are using the right surface\n print(\"\\nSurface Name: \", system.LDE.GetSurfaceAt(surface).Comment)\n\n # Customize the initilization of the results array so that we can vary the shapes\n # some analysis may be [spaxels_per_slice, N_slices, N_waves] such as an RMS WFE map\n # others might be [N_fields, N_rays, N_slices, N_waves] such as a Spot Diagram\n\n # print(\"\\nDynamically creating Global Variables to store results\")\n for _name, _shape in zip(results_names, results_shapes):\n # print(\"Variable: %s with shape (N_waves, N_configs) + \" % _name, _shape)\n globals()[_name] = np.empty((N_waves, N_slices) + _shape)\n # print(globals()[_name].shape)\n\n # print(\"\\nApplying 'analysis_function': \", analysis_function.__name__)\n print(\"At Surface #%d | Image Plane is #%d\" % (surface, N_surfaces - 1))\n print(\"For Wavelength Numbers: \", wavelength_idx)\n print(\"For Configurations %d -> %d\" % (configurations[0], configurations[-1]))\n\n # Main part of the Analysis. Here we loop over the Wavelengths and Configurations\n start = time()\n print(\"\\nAnalysis Started\")\n for k, (wave_idx, wavelength) in enumerate(zip(wavelength_idx, wavelengths)):\n print(\"Wavelength #%d: %.3f\" % (wave_idx, wavelength))\n\n for j, config in enumerate(configurations):\n\n # print(\"Wavelength #%d: %.3f | config: %d\" %(wave_idx, wavelength, config))\n\n # Here is where the arbitrary \"analysis_function\" will be called for each Wavelength and Configuration\n results = analysis_function(system=system, wave_idx=wave_idx, config=config, surface=surface,\n *args, **kwargs)\n\n # Store the result values in the arrays:\n for _name, res in zip(results_names, results):\n globals()[_name][k, j] = res\n\n # Unignore the surfaces\n if surface != (N_surfaces - 1):\n last_surface = system.LDE.GetSurfaceAt(surface)\n last_surface.Thickness = thickness\n for surf_number in surfaces_to_ignore:\n _surf = system.LDE.GetSurfaceAt(surf_number)\n _surf.TypeData.IgnoreSurface = False\n\n self.zosapi.CloseFile(save=False)\n analysis_time = time() - start\n print(\"\\nAnalysis done in %.1f seconds\" % analysis_time)\n time_per_wave = analysis_time / len(wavelengths)\n print(\"%.2f seconds per Wavelength\" % time_per_wave)\n\n # local_keys = [key for key in globals().keys()]\n # print(local_keys)\n\n # We return a list containing the results, and the value of the wavelengths\n list_results = [globals()[_name] for _name in results_names]\n list_results.append(wavelengths)\n\n return list_results", "def process_and_save(db: Broker, uid, tiff_path: str, data_key: str) -> None:\n run = db[uid]\n dk_uid = run.start.get(\"sc_dk_field_uid\", \"\")\n dk_run = db[dk_uid] if dk_uid else None\n dk_image = _mean(dk_run.data(data_key)) if dk_run else None\n image = _mean(run.data(data_key))\n image -= dk_image\n tw = TiffWriter(tiff_path)\n tw.write(image)\n return", "def process_files(compress, files):\n [compress.add_file(file) for file in files]\n\n compress.execute() # upload files to iLovePDF\n compress.download() # download resultant file\n print(\"Compression saved {}% of disk space.\".format(\n PDFWorkshop.__percentage_storage_saved(compress))\n )\n compress.delete_current_task()", "def main():\n try:\n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n lookoutvision_client = boto3.client(\"lookoutvision\")\n s3_resource = boto3.resource('s3')\n\n parser = argparse.ArgumentParser(usage=argparse.SUPPRESS)\n parser.add_argument(\n \"project\", help=\"The project containing the model that you want to use.\")\n parser.add_argument(\n \"version\", help=\"The version of the model that you want to use.\")\n parser.add_argument(\n \"image\",\n help=\"The file that you want to analyze. Supply a local file path or a \"\n \"path to an S3 object.\")\n args = parser.parse_args()\n\n if args.image.startswith(\"s3://\"):\n photo = Inference.download_from_s3(s3_resource, args.image)\n else:\n photo = args.image\n\n print(f\"Analyzing {photo}.\")\n anomalous, confidence = Inference.detect_anomalies(\n lookoutvision_client, args.project, args.version, photo)\n\n if args.image.startswith(\"s3://\"):\n os.remove(photo)\n\n state = \"anomalous\" if anomalous else \"normal\"\n print(\n f\"Your model is {confidence:.0%} confident that the image is {state}.\")\n except ClientError as err:\n print(f\"Service error: {err.response['Error']['Message']}\")\n except FileNotFoundError as err:\n print(f\"The supplied file couldn't be found: {err.filename}.\")\n except ValueError as err:\n print(f\"A value error occurred: {err}.\")\n else:\n print(\"Successfully completed analysis.\")", "def run(self,image, label, featureClasses, settings, enabledImageTypes,csvFile):\n print('Processing started')\n import time\n startTime = time.time()\n # grayscaleImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(imageNode.GetName()))\n grayscaleImage = sitk.ReadImage(image)\n #sitkUtils.PushToSlicer(label, labelNode.GetName(), overwrite=True, compositeView=2)\n labelsDict = {}\n if label:\n print(\"label={}\".format(label))\n labelsDict = self.prepareLabelsFromLabelmap(label, grayscaleImage, labelsDict)\n # if segmentationNode:\n # labelsDict = self.prepareLabelsFromSegmentation(segmentationNode, grayscaleImage, labelsDict)\n\n #self.featureValues = extractor.execute(grayscaleImage, labelImage, images, **kwargs)\n featuresDict = {}\n for l in labelsDict.keys():\n print(\"Calculating features for \"+l)\n try:\n featuresDict[l] = self.calculateFeatures(grayscaleImage,\n labelsDict[l],\n featureClasses,\n settings,\n enabledImageTypes)\n except:\n print('calculateFeatures() failed')\n traceback.print_exc()\n self.saveFeatures2CSVFile(featuresDict,csvFile)\n print(\"Completed\")\n endtime = time.time()\n print(\"totalTime={}\".format(endtime-startTime))\n # return featuresDict", "def _process_images(\n raw_image_paths: pd.Series,\n raw_images_dir: str,\n ROI_definitions: Dict[str, Tuple],\n flat_field_filepath_or_none: Union[str, None],\n save_ROIs: bool,\n save_dark_frame_corrected_images: bool,\n save_flat_field_corrected_images: bool,\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n def _process_image_local(raw_image_path):\n \"\"\" Version of process_image with all of the local configuration variables packed in.\n Also encapsulates the opening of the image.\n \"\"\"\n return process_image(\n original_rgb_image=raw.open.as_rgb(raw_image_path),\n original_image_filepath=raw_image_path,\n raw_images_dir=raw_images_dir,\n ROI_definitions=ROI_definitions,\n flat_field_filepath_or_none=flat_field_filepath_or_none,\n save_ROIs=save_ROIs,\n save_dark_frame_corrected_image=save_dark_frame_corrected_images,\n save_flat_field_corrected_image=save_flat_field_corrected_images,\n )\n\n with ThreadPoolExecutor() as executor:\n # We want identical warnings to be shown only for the first image they occur on (the default),\n # but we also want subsequent calls to process_experiment to start with a fresh warning store\n # so that warnings don't stop showing after the first run.\n # catch_warnings gives us this fresh warning store.\n with warnings.catch_warnings():\n # process_image returns roi_summary_data df, image_diagnostics df -> this will be a list of 2-tuples\n roi_summary_data_and_image_diagnostics_dfs_for_files = list(\n tqdm(\n executor.map(_process_image_local, raw_image_paths),\n total=len(raw_image_paths),\n )\n )\n roi_summary_data_for_files, image_diagnostics_for_files = zip(\n *roi_summary_data_and_image_diagnostics_dfs_for_files\n )\n\n roi_summary_data_for_all_files = _stack_dataframes(roi_summary_data_for_files)\n image_diagnostics_for_all_files = _stack_serieses(image_diagnostics_for_files)\n\n return roi_summary_data_for_all_files, image_diagnostics_for_all_files", "def _submitInstance( self, imageName, workDir ):\n return S_OK()" ]
[ "0.7374393", "0.6846931", "0.6268699", "0.62572134", "0.6137036", "0.6073903", "0.6000757", "0.5980023", "0.59539145", "0.5949638", "0.5929469", "0.59196985", "0.59115666", "0.59078693", "0.5898189", "0.5814665", "0.57921034", "0.5774928", "0.57512265", "0.5742385", "0.5740885", "0.57282376", "0.5711028", "0.56975174", "0.5693386", "0.568474", "0.56810266", "0.5676646", "0.56763774", "0.56760883", "0.56744325", "0.5656469", "0.5656197", "0.5636814", "0.5636012", "0.5633209", "0.562034", "0.56198823", "0.56131977", "0.5608583", "0.55921566", "0.5571366", "0.5561578", "0.55554944", "0.5553126", "0.5552547", "0.5547863", "0.5546579", "0.55425864", "0.55376506", "0.5535497", "0.5534073", "0.55332017", "0.5532027", "0.5519534", "0.55141777", "0.5512195", "0.5499605", "0.5494144", "0.54939765", "0.54866433", "0.5459331", "0.5455769", "0.5443764", "0.5440872", "0.54275745", "0.54126775", "0.5409684", "0.54093325", "0.5403984", "0.5401993", "0.5400502", "0.5398597", "0.53947335", "0.5390983", "0.53898275", "0.5385886", "0.53853375", "0.5384451", "0.5381522", "0.5372328", "0.536306", "0.5358293", "0.53569424", "0.5356451", "0.5354702", "0.53538", "0.5352421", "0.5351808", "0.5340537", "0.5340273", "0.5337105", "0.5335979", "0.5334658", "0.53332484", "0.53313005", "0.5328114", "0.5327914", "0.53256685", "0.53247994" ]
0.68059576
2
Run image analysis for zip archive
def run_zip_analysis(filepath, ID, method): with zipfile.ZipFile(filepath[0]) as zf: for entry in zf.namelist(): if not entry.startswith("__"): # Get rid hidden files in zip with zf.open(entry) as file: data = file.read() fh = io.BytesIO(data) Image.open(fh) filename, extension = get_file_name(file.name) # Save raw image to database msg = client.upload_file(ID, filename, extension, fh.getvalue()) err, msg = check_msg(msg) if err is False: # if no error in uploading image # Request to process image client.process_image(ID, filename, method) return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def Run(self, args):\n\n with RecoverFromDiagnosticException(args.image_name):\n img_name = util.GetDigestFromName(args.image_name)\n return util.TransformContainerAnalysisData(img_name,\n args.occurrence_filter)", "def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)", "def runDataExtraction():\r\n config = CONFIG['steps']['DataExtraction']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n columns = ci['columns']\r\n nrows = ci['nrows']\r\n input_bucket = ci['bucket']\r\n no_of_files = ci['no_of_files']\r\n\r\n output_bucket = co['bucket']\r\n csv_name_prefix = co['csv_name_prefix']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n zip_files = get_files(input_bucket, boto_client, file_type='zip')\r\n\r\n no_of_files_to_process = no_of_files if no_of_files is not None else len(\r\n zip_files)\r\n for zip_file in tqdm(zip_files[:no_of_files_to_process], total=no_of_files_to_process):\r\n process_file(zip_file, input_bucket, output_bucket, minioClient, columns,\r\n nrows=nrows, output_csv_name_prefix=csv_name_prefix)", "def run_analysis(filepath, ID, method):\n filename, extension = get_file_name(filepath[0])\n if extension == '.zip':\n msg = run_zip_analysis(filepath, ID, method)\n else:\n msg = run_images_analysis(filepath, ID, method)\n return msg", "def run_images_analysis(filepath, ID, method):\n for path in filepath:\n try:\n Image.open(path)\n except IOError:\n msg = 'Please import images files, or just a single zip archive'\n else:\n filename, extension = get_file_name(path)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename, extension, path)\n\n err, msg = check_msg(msg)\n\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def execute(args, **kwargs):\n p = set_options()\n a = p.parse_args(args)\n # logging.info(str(a))\n\n ifiles = ImageFiles(a)\n\n if a.info:\n ifiles.describe()\n else:\n ifiles.write()", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Starting crazy calculations...\")\n createlookuptable(args.imagefolder)\n _logger.info(\"Script ends here\")", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def main():\n time_start = perf_counter()\n\n args = parse_args(sys.argv[1:]).ordered()\n _, opts = next(args)\n log_level = 0\n try:\n log_level = (0, 20, 10)[opts.verbosity]\n mpl_log_level = log_level + 10 if log_level > 0 else log_level\n except IndexError:\n log_level = 10\n mpl_log_level = log_level\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n # set level for all loggers\n # separate log level for matplotlib because it's so verbose\n for logger in loggers:\n if logger.name.startswith(\"matplotlib\"):\n logger.setLevel(mpl_log_level)\n else:\n logger.setLevel(log_level)\n\n LOG.debug(\"Program opts:\\n%s\", pformat(vars(opts)))\n\n # main vars\n inputs = []\n processed = []\n # im: Optional[Image.Image] = None\n im: Image.Image | np.ndarray | None = None\n in_file_path: Optional[str]\n in_image_size = Size(0, 0)\n in_file_size = 0\n in_dpi = 0\n in_exif: Optional[dict] = None\n out_exif: bytes = b\"\"\n out_exif_size = 0\n out_file_path = None\n out_image_size = Size(0, 0)\n out_file_size = 0\n no_op = False\n\n for cmd, arg in args:\n LOG.debug(\"Processing command %s with args:\\n%s\", cmd, pformat(vars(arg)))\n\n if cmd == \"open\":\n in_file_path = arg.input.name\n in_file_size = os.path.getsize(in_file_path) # type: ignore\n im = Image.open(arg.input)\n in_image_size = Size(*im.size)\n LOG.info(\"Input dims: %s\", in_image_size)\n try:\n in_exif = piexif.load(in_file_path)\n del in_exif[\"thumbnail\"]\n # LOG.debug(\"Exif: %s\", in_exif)\n in_dpi = im.info[\"dpi\"]\n except KeyError:\n pass\n LOG.info(\"Input file size: %s\", humanize_bytes(in_file_size))\n LOG.info(\"Input dpi: %s\", in_dpi)\n if arg.show_histogram:\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n im = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"open2\":\n # Test of opening multiple images for some operations, such as matting\n for item in arg.input:\n _im = Image.open(item)\n try:\n ex = piexif.load(item.name)\n dpi = _im.info[\"dpi\"]\n del ex[\"thumbnail\"]\n except KeyError:\n ex = None\n dpi = (0, 0)\n _im = np.asarray(_im)\n _im = cv2.cvtColor(_im, cv2.COLOR_RGB2BGR)\n inputs.append(\n Img(\n _im,\n file_path=item.name,\n dpi=dpi,\n exif=ex,\n )\n )\n LOG.debug(\"Imgs: %s\", inputs)\n im = inputs[0].data\n in_file_path = inputs[0].file_path\n in_file_size = inputs[0].file_size\n in_image_size = inputs[0].size\n if arg.show_histogram:\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"mat\":\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n im = mat.create_mat(im, size_inches=arg.size)\n out_image_size = Size.from_np(im)\n elif cmd == \"resize\":\n im = Image.fromarray(im) if type(im) == np.ndarray else im\n if is_ndarray(im) or im is None:\n raise TypeError('Expected Image, not ndarray')\n orig_size = Size(*im.size) # type: ignore\n out_image_size = orig_size\n try:\n resize_method, new_size = resize.get_method(\n orig_size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n else:\n # Resize/resample\n try:\n im = resize.resize(\n resize_method,\n im,\n new_size,\n )\n except ImageTooSmallError as e:\n LOG.warning(e)\n out_image_size = Size(*im.size) # type: ignore\n elif cmd == \"resize2\":\n for item in inputs:\n try:\n resize_method, new_size = resize.get_method(\n item.size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n force=arg.force,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n except ResizeAttributeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n else:\n try:\n _im = resize.resize_opencv(\n resize_method, item.data, new_size, resample=cv2.INTER_AREA\n )\n if _im is not None:\n processed.append(Img(_im))\n else:\n LOG.error('Expected image from resize_opencv(), got None')\n except ImageTooSmallError as e:\n LOG.warning(e)\n LOG.info(processed)\n out_image_size = processed[0].size\n im = processed[0].data\n elif cmd == \"text\":\n if im is None:\n LOG.error('Image is None')\n return\n im = watermark.with_text(\n im,\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n ) # type: ignore\n elif cmd == \"text2\":\n im = watermark.with_text(\n Image.fromarray(im),\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n )\n im = np.asarray(im)\n elif cmd == \"watermark\":\n im = watermark.with_image(\n im,\n Image.open(arg.image),\n scale=arg.scale,\n position=arg.position,\n padding=arg.margin,\n opacity=arg.opacity,\n invert=arg.invert,\n )\n elif cmd == \"watermark2\":\n watermark_image = cv2.imread(arg.image.name, cv2.IMREAD_UNCHANGED)\n # im = watermark.with_image_opencv(\n # im,\n # watermark_image,\n # scale=arg.scale,\n # position=arg.position,\n # opacity=arg.opacity,\n # padding=arg.margin,\n # )\n try:\n im = watermark.overlay_transparent(\n im,\n watermark_image,\n scale=arg.scale,\n padding=arg.margin,\n position=arg.position,\n alpha=arg.opacity,\n invert=arg.invert,\n )\n except OverlaySizeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n elif cmd == \"sharpen\":\n im = sharpen.unsharp_mask(im, amount=arg.amount, threshold=arg.threshold)\n elif cmd == \"save\":\n # if type(im) == np.ndarray:\n # im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n use_progressive_jpg = in_file_size > 10000\n if use_progressive_jpg:\n LOG.debug(\"Large file; using progressive jpg\")\n\n # Exif\n if arg.keep_exif:\n out_exif = piexif.dump(piexif.load(in_file_path))\n out_exif_size = sys.getsizeof(out_exif)\n\n outbuf = BytesIO()\n try:\n im.save(\n outbuf,\n \"JPEG\",\n quality=arg.jpg_quality,\n dpi=in_dpi,\n progressive=use_progressive_jpg,\n optimize=True,\n exif=out_exif,\n )\n except AttributeError:\n write_params = [\n cv2.IMWRITE_JPEG_QUALITY,\n arg.jpg_quality,\n cv2.IMWRITE_JPEG_OPTIMIZE,\n ]\n if use_progressive_jpg:\n write_params += [\n cv2.IMWRITE_JPEG_PROGRESSIVE,\n ]\n _, buf = cv2.imencode(\".jpg\", im, write_params)\n outbuf = BytesIO(buf)\n image_buffer = outbuf.getbuffer()\n out_file_size = image_buffer.nbytes + out_exif_size\n LOG.info(\"Buffer output size: %s\", humanize_bytes(out_file_size))\n\n if arg.output is None:\n root, _ = os.path.splitext(in_file_path)\n out_file_path = f\"{root}{arg.suffix}.jpg\"\n else:\n out_file_path = arg.output.name\n\n if arg.no_op:\n no_op = True\n continue\n LOG.info(\"Saving buffer to %s\", out_file_path)\n if (out_path := Path(out_file_path)).exists():\n if not arg.force:\n LOG.critical(\n \"file '%s' exists and force argument not found\", out_path\n )\n print(\n f\"{fg.red}{ef.bold}Error: file '{out_path}' exists;\",\n f\" use -f option to force overwrite.{rs.all}\",\n file=sys.stderr,\n )\n return\n # Create output dir if it doesn't exist\n out_path.parent.mkdir(parents=True, exist_ok=True)\n\n with out_path.open(\"wb\") as f:\n f.write(image_buffer)\n if arg.keep_exif:\n piexif.insert(out_exif, out_file_path)\n out_file_size = os.path.getsize(out_file_path)\n\n elapsed = perf_counter() - time_start\n report = generate_report(\n in_file_size,\n out_file_size,\n in_file_path,\n out_file_path,\n in_image_size,\n out_image_size,\n elapsed,\n no_op,\n )\n print(report)", "def run(self):\n \n try:\n with zipfile.ZipFile(self.tmpArchiveFilePath, 'w', zipfile.ZIP_DEFLATED) as z:\n z.write(self.resultFilePath, self.fileType+'.tif')\n if os.path.isfile(self.resultPrjFilePath):\n z.write(self.resultPrjFilePath, self.fileType+'.prj')\n except:\n msg = traceback.format_exc()\n self.emit( SIGNAL('failed(QString)'), msg)\n return\n \n with open(self.tmpArchiveFilePath, \"rb\") as binFile:\n base64Data = base64.b64encode(binFile.read())\n fileData = ('file_data', self.tmpArchiveFilePath, base64Data)\n \n self.emit( SIGNAL('prep_completed(int, QString, QString, QString, QString)'), self.eventId, self.fileType, fileData[0], fileData[1], fileData[2])\n return", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def zip_imagenet100c():\n #First make sure the directory we are given is correct!\n if not os.path.isdir(DATA_SRC_ROOT):\n raise Exception(\"Bad filepath given\")\n\n #create the destiantion directories if they don't exist\n if not os.path.isdir(IMAGENET100_DIR):\n os.mkdir(IMAGENET100_DIR)\n\n #grab the subset wnids for the 100 class-subset\n with open(IMAGENET100_CLASSES) as f:\n subset_wnids = f.readlines()\n subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab\n\n #Grab the names of all of the folders inside the root data source\n #Structure is distortion/sub_distortion/level/wnids\n for distortion in os.listdir(DATA_SRC_ROOT):\n if distortion != \"meta.bin\":\n print(distortion)\n\n folder_path = os.path.join(DATA_SRC_ROOT, distortion)\n\n if not os.path.isdir(folder_path):\n continue\n\n for sub_distortion in os.listdir(folder_path):\n print(sub_distortion)\n\n subfolder_path = os.path.join(folder_path, sub_distortion)\n\n if not os.path.isdir(subfolder_path):\n continue\n\n for level in os.listdir(subfolder_path):\n print(level)\n\n level_path = os.path.join(subfolder_path, level)\n\n #grab the correcrt validation d9recotires\n for wnid in os.listdir(level_path):\n wnid_path = os.path.join(level_path, wnid)\n\n if not os.path.isdir(wnid_path):\n continue\n\n if wnid in subset_wnids:\n dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid)\n\n shutil.copytree(wnid_path, dest_path)\n\n #copy the metadata bin file\n meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin')\n meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin')\n\n shutil.copy(meta_file, meta_dest)\n\n #Zip the destinatio file\n shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)", "def analyze_prior_results(self, analysis_source):\n #TODO: move to iota_run.py\n\n from iota.components.iota_analysis import Analyzer\n from libtbx import easy_pickle as ep\n\n if os.path.isdir(analysis_source):\n int_folder = os.path.abspath(analysis_source)\n else:\n try:\n int_folder = os.path.abspath(os.path.join(os.curdir,\n 'integration/{}/image_objects'.format(analysis_source)))\n except ValueError:\n int_folder = None\n print ('Run #{} not found'.format(analysis_source))\n\n if os.path.isdir(int_folder):\n with prog_message('Analyzing Results'):\n int_list = [os.path.join(int_folder, i) for i in os.listdir(int_folder)]\n img_objects = [ep.load(i) for i in int_list if i.endswith('.int')]\n\n self.logfile = os.path.abspath(os.path.join(int_folder, 'iota.log'))\n self.viz_base = os.path.join('/'.join(int_folder.split('/')),\n 'vizualization')\n\n self.params.analysis.cluster_write_files=False\n\n analysis = Analyzer(self, img_objects, self.iver)\n analysis.print_results()\n analysis.unit_cell_analysis()\n analysis.print_summary(write_files=False)\n else:\n print ('No results found in {}'.format(int_folder))", "def main():\n\n #Parse input arguments\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--image\", dest=\"image\",\n help=\"specify the name of the image\", metavar=\"IMAGE\")\n\n args = parser.parse_args()\n\n #Load image\n if args.image is None:\n print(\"Please specify the name of image\")\n print(\"use the -h option to see usage information\")\n sys.exit(2)\n else:\n image_name = args.image.split(\".\")[0]\n input_image = cv2.imread(args.image, 0)\n\n\n bin_img = bi.binary_image()\n hist = bin_img.compute_histogram(input_image)\n\n outputDir = 'output/cellct/'\n outputDir_compress = 'output/Compression/'\n\n #Saving histogram to output directory \n hist_fig = plt.plot(hist)\n plt.savefig(outputDir+\"hist.png\")\n\n threshold = bin_img.find_optimal_threshold(hist)\n print(\"Optimal threshold: \", threshold)\n\n binary_img = bin_img.binarize(input_image)\n output_image_name = outputDir + \"binary_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, binary_img)\n\n #blobcoloring\n cell_count_obj = cc.cell_counting()\n\n regions = cell_count_obj.blob_coloring(binary_img)\n stats = cell_count_obj.compute_statistics(regions)\n\n cell_stats_img = cell_count_obj.mark_regions_image(binary_img, stats)\n output_image_name = outputDir + \"cell_stats_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, cell_stats_img)\n\t\n #Compression\n rle_obj = rle.rle()\n rle_code = rle_obj.encode_image(binary_img)\n print(\"-------------- Runlength Code -------------------\")\n print(rle_code)\n\n [height, width] = binary_img.shape\n\n decoded_image = rle_obj.decode_image(rle_code, height, width)\n\n output_image_name = outputDir_compress + \"decoded_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, decoded_image)", "def main():\n\n\n\n skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)\n\n # fetch and sort the .mnc and .tag files\n mnc_files = [f for f in skulls_folder if 'mnc' in f]\n tag_files = [f for f in skulls_folder if 'tag' in f]\n mnc_names = [i.split('.mnc')[0] for i in mnc_files]\n \n mnc_files.sort()\n tag_files.sort()\n mnc_names.sort()\n\n # Process and package ndarrays as tuples inside npy file\n package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)\n \n print('\\n' * 5)\n\n # Push the npy files to GCP Cloud Storage\n upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME)", "def main():\n\n parser = argparse.ArgumentParser(description='codec_compare')\n parser.add_argument('path', metavar='DIR',\n help='path to images folder')\n args = parser.parse_args()\n classpath = args.path\n classname = classpath.split('/')[1]\n\n images = set(listdir_full_path(classpath))\n if len(images) <= 0:\n print \"\\033[91m[ERROR]\\033[0m\" + \" no source files in ./images.\"\n sys.exit(1)\n\n codeclist_full = set(['aom', 'deepcoder', 'deepcoder-lite', 'fuif', 'fvdo', 'hevc', 'kakadu', 'jpeg',\n 'pik', 'tat', 'xavs', 'xavs-fast', 'xavs-median', 'webp'])\n\n bpp_targets = set([0.06, 0.12, 0.25, 0.50, 0.75, 1.00, 1.50, 2.00])\n for image in images:\n width, height, depth = get_dimensions(image, classname)\n name, imgfmt = os.path.splitext(image)\n imgfmt = os.path.basename(image).split(\".\")[-1]\n derivative_images = []\n if classname[:6] == 'classB':\n derivative_images = create_derivatives(image, classname)\n else:\n derivative_images.append((image, imgfmt))\n\n for derivative_image, pix_fmt in derivative_images:\n json_dir = 'metrics'\n mkdir_p(json_dir)\n json_file = os.path.join(json_dir,\n os.path.splitext(os.path.basename(derivative_image))[0] + \".\" + pix_fmt + \".json\")\n # if os.path.isfile(json_file):\n # print \"\\033[92m[JSON OK]\\033[0m \" + json_file\n # continue\n main_dict = dict()\n derivative_image_metrics = dict()\n for codecname in codeclist_full:\n convertflag = 1\n caseflag = pix_fmt\n if (codecname == 'webp' or codecname == 'tat' or 'deepcoder' in codecname) and depth != '8':\n continue\n if 'xavs' in codecname and depth != '8' and depth != '10':\n continue\n if 'classE' in classname and ('tat' in codecname or 'xavs' in codecname or 'deepcoder' in codecname):\n continue\n if codecname == 'kakadu' and classname[:6] == 'classB':\n convertflag = 0\n caseflag = imgfmt\n bpp_target_metrics = dict()\n for bpp_target in bpp_targets:\n print(codecname)\n if codecname == 'aom' and classname[:6] == 'classB':\n # ('AERIAL2' in image or 'CATS' in image or 'XRAY' in image or 'GOLD' in image or 'TEXTURE1' in image):\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + 'av1'\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'kakadu' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif 'xavs' in codecname and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'fvdo' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_pgm' + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.pgm')\n original_image = image\n else:\n if codecname == 'fuif' and 'tif' in imgfmt:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '.tif_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n elif codecname == 'webp' or codecname == 'tat':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_yuv420p.' + codecname\n else:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image_path = os.path.join('outputs', codecname, 'decoded')\n decoded_image = ''\n for decodedfile in os.listdir(decoded_image_path):\n encoderoot = '_'.join(os.path.splitext(os.path.basename(encoded_image_name))[0].split('_')[:-1])\n if encoderoot in decodedfile:\n if ('tat' in codecname or 'webp' in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] == '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n print(decoded_image)\n if ('tat' not in codecname or 'webp' not in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] != '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n if 'classE' not in classname and 'classB' not in classname and os.path.isfile(decoded_image):\n decoded_image = convert_decoded(decoded_image, width, height, depth, codecname)\n original_image = convert_decoded(derivative_image, width, height, depth, 'reference')\n else:\n original_image = derivative_image\n\n print('Reference:' + original_image)\n print('Encoded:' + encoded_image)\n print('Decoded:' + decoded_image)\n if (os.path.isfile(original_image) and os.path.isfile(decoded_image) and os.path.isfile(encoded_image)):\n if 'classE' in classname:\n metrics = compute_metrics_HDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width, height, pix_fmt, depth)\n\n elif 'classB' in classname:\n metrics = compute_metrics(original_image, decoded_image, encoded_image, bpp_target, codecname,\n width, height, pix_fmt)\n else:\n metrics = compute_metrics_SDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width,\n height, imgfmt, depth)\n measured_bpp = (os.path.getsize(encoded_image) * 1.024 * 8) / (float((int(width) * int(height))))\n bpp_target_metrics[measured_bpp] = metrics\n else:\n continue\n \n derivative_image_metrics[codecname] = bpp_target_metrics\n main_dict[derivative_image] = derivative_image_metrics\n\n mkdir_p(json_dir)\n with open(json_file, 'w') as f:\n f.write(json.dumps(main_dict, indent=2))", "async def extractimages(self, ctx):\n if self.extract_images_running:\n await ctx.send(inline('Extract images already running'))\n return\n\n event_loop = asyncio.get_event_loop()\n running_load = event_loop.run_in_executor(self.executor, self.do_extract_images)\n\n self.extract_images_running = True\n await ctx.send(inline('Running image extract pipeline: this could take a while'))\n await running_load\n self.extract_images_running = False\n await ctx.send(inline('Image extract finished'))", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def run(self):\n self.compress(\n self.__config.public_key(),\n self.__config.input_dir(),\n self.__config.output_dir(),\n self.__config.suffix()\n )", "def process(image):\n pass", "def main(data_dir):\n\n extract_count = 0\n\n try:\n # Validate argument. Exit if invalid.\n if not os.path.isdir(data_dir):\n print('\"{}\" is not a directory'.format(data_dir), file=stderr)\n exit(1)\n\n # Create directory for output images, if it does not already exist.\n output_dir = '{}/original_sequences_faces/{}/images'.format(\n data_dir, COMPRESSION_LEVEL)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print(\"Extracting faces...\")\n seqs = get_orig_sequences(data_dir, COMPRESSION_LEVEL)\n for s in seqs:\n output_fn = '{}/{}.png'.format(output_dir, s.seq_id)\n if os.path.exists(output_fn):\n # Do not recreate an image if it already exists.\n # If the user wants to recreated an image,\n # the old image must be deleted first.\n continue\n\n print('Extracting face for sequence {}...'.format(s.seq_id))\n face_img = extract_face(s)\n if face_img is None:\n print(\" No face found\")\n else:\n # Write face image to disk.\n try:\n cv2.imwrite(output_fn, face_img)\n extract_count += 1\n except KeyboardInterrupt as e:\n # Safely handle premature termination. Remove unfinished file.\n if os.exists(output_fn):\n os.remove(output_fn)\n raise e\n except KeyboardInterrupt:\n print('Program terminated prematurely')\n finally:\n if extract_count == 0:\n print('No faces extracted')\n else:\n print('{} faces extracted'.format(extract_count))", "def extract_artifacts (self, layout):\n print('Extracting artifacts according to layout:')\n for path, afs in layout.items():\n artifact = afs[0][0]\n member = afs[0][1]\n print(' %s (from %s) -> %s' % (member, artifact, path))\n outf = os.path.join(self.stpath, path)\n zfile.ZFile.extract(artifact.lpath, member, outf)\n\n self.add_file(outf)\n\n # Rename files, if needed.\n for root, _, filenames in os.walk(self.stpath):\n for filename in filenames:\n fname = os.path.basename(filename)\n if fname in rename_files:\n bpath = os.path.join(root, os.path.dirname(filename))\n oldfile = os.path.join(bpath, fname)\n newfile = os.path.join(bpath, rename_files[fname])\n print('Renaming %s -> %s' % (oldfile, newfile))\n os.rename(oldfile, newfile)\n\n # And rename them in the files map too\n rename_these = [x for x in self.files.keys() if os.path.basename(x) in rename_files]\n for oldfile in rename_these:\n newfile = os.path.join(os.path.dirname(oldfile),\n rename_files[os.path.basename(oldfile)])\n self.files[newfile] = self.files[oldfile]\n del self.files[oldfile]", "def test_archive_run(self):\n pass", "def main():\n me = SimpleImage(\"images/me.JPG\")\n dinosaur = SimpleImage(\"images/dinosaur.jpg\")\n\n dinosaur.make_as_big_as(me)\n combine = magic(me, dinosaur)\n combine.show()", "def main():\n args = parse_args()\n images_dir = get_images_dir(args)\n log(\"INFO\", \"Images destination: {}\".format(os.path.abspath(images_dir)))\n try:\n manifest = parse_manifest(get_manifest_raw(args))\n if args.list_targets:\n print_target_list(\n manifest,\n args\n )\n return True\n log(\"TRACE\", \"Manifest:\\n{}\".format(\n \"\\n\".join(\"{}\".format(item) for item in manifest.items())\n ))\n\n # Read the inventory into a dictionary we can perform lookups on\n if os.path.isfile(args.inventory_location):\n inventory_fn = args.inventory_location\n else:\n inventory_fn = os.path.join(images_dir, _INVENTORY_FILENAME)\n inventory = parse_inventory(inventory_fn=inventory_fn)\n log(\"TRACE\", \"Inventory: {}\\n{}\".format(\n os.path.abspath(inventory_fn),\n \"\\n\".join(\"{}\".format(item) for item in inventory.items())\n ))\n\n # Determine the URLs to download based on the input regular expressions\n if not args.types:\n types_regex_l = [_DEFAULT_TARGET_REGEX]\n else:\n types_regex_l = args.types\n\n log(\"TRACE\", \"RegExs for target selection: {}\".format(types_regex_l))\n targets_info = lookup_urls(types_regex_l, manifest, inventory, args.refetch)\n # Exit early if we don't have anything to download\n if targets_info:\n target_urls = [info.get(\"url\") for info in targets_info]\n log(\"DEBUG\", \"URLs to download:\\n{}\".format(\n \"\\n\".join(\"{}\".format(item) for item in target_urls)\n ))\n else:\n return True\n\n ## Now download all the images archives into a temp directory\n if args.dry_run:\n for target_info in targets_info:\n log(\"INFO\", \"[Dry Run] Fetch target: {}\".format(\n target_info.get(\"filename\")))\n return True\n with TemporaryDirectory() as temp_dir:\n for target_info in targets_info:\n update_target(\n target_info,\n temp_dir,\n images_dir,\n inventory,\n args\n )\n ## Update inventory with all the new content\n write_inventory(inventory, inventory_fn)\n\n except Exception as ex:\n log(\"ERROR\", \"Downloader raised an unhandled exception: {ex}\\n\"\n \"You can run this again with the '--verbose' flag to see more information\\n\"\n \"If the problem persists, please email the output to: {contact}\"\n .format(contact=_CONTACT, ex=ex))\n # Again, we wait on Windows systems because if this is executed in a\n # window, and immediately fails, the user doesn't have a way to see the\n # error message, and if they're not very savvy, they won't know how to\n # execute this in a shell.\n if not _YES and platform.system() == 'Windows':\n input('Hit Enter to continue.')\n return False\n log(\"INFO\", \"Images download complete.\")\n return True", "def main(dataset, n, ms=False, out=sys.stdout):\n # build lists of paths previews files, tif, rpc and dzi files\n prv_paths = ' '.join([os.path.join(dataset, 'prv_%02d.jpg' % (i+1)) for i in xrange(n)])\n tif_paths = ' '.join([os.path.join(dataset, 'im_panchro_%02d.tif' % (i+1)) for i in xrange(n)])\n rpc_paths = ' '.join([os.path.join(dataset, 'rpc_%02d.xml' % (i+1)) for i in xrange(n)])\n dzi8_paths, dzi16_paths = None, None\n if ms:\n ms_paths = ' '.join([os.path.join(dataset, 'im_ms_%02d.tif' % (i+1)) for i in xrange(n)])\n if os.path.isfile(os.path.abspath(os.path.join(dataset,\n 'im_panchro_8BITS_01.dzi'))):\n dzi8_paths = ' '.join([os.path.join('input', dataset,\n 'im_panchro_8BITS_%02d.dzi' %\n (i+1)) for i in xrange(n)])\n if os.path.isfile(os.path.abspath(os.path.join(dataset,\n 'im_panchro_16BITS_01.dzi'))):\n dzi16_paths = ' '.join([os.path.join('input', dataset,\n 'im_panchro_16BITS_%02d.dzi' %\n (i+1)) for i in xrange(n)])\n\n # read infos in DIM*.XML file\n dim_xml_file = os.path.join(dataset, 'dim_01.xml')\n tif_file = os.path.join(dataset, 'im_panchro_01.tif')\n if os.path.isfile(dim_xml_file): # check if the link points to an existing file\n date = grep_xml(dim_xml_file, \"IMAGING_DATE\")\n satellite = grep_xml(dim_xml_file, \"INSTRUMENT_INDEX\")\n elif os.path.isfile(tif_file):\n date = extract_date_from_pleiades_filename(os.readlink(tif_file))\n satellite = extract_satellite_from_pleiades_filename(os.readlink(tif_file))\n else:\n date = 'DD-MM-YYYY'\n satellite = 'Pleiades 1X'\n\n # print to stdout\n if dzi8_paths or dzi16_paths:\n print('[%s]' % dataset, file=out)\n print('files = ', prv_paths, file=out)\n print('tif = ', tif_paths, file=out)\n print('rpc = ', rpc_paths, file=out)\n if ms:\n print('clr = ', ms_paths, file=out)\n if dzi8_paths:\n print('dzi8 = ', dzi8_paths, file=out)\n if dzi16_paths:\n print('dzi16 = ', dzi16_paths, file=out)\n s = dataset.split(os.path.sep)\n if len(s) == 3: # ie the path is of the kind 'pleiades/reunion/dataset_1'\n print('title = %s (%s)' % (s[1].capitalize(), s[2][-1]), file=out) # ie 'Reunion (1)'\n elif len(s) == 2: # ie the path is of the kind 'pleiades/reunion'\n print('title = %s' % s[1].capitalize(), file=out) # ie 'Reunion'\n else:\n print('path %s not expected by the author of the script: ' % dataset, s, file=sys.stderr)\n print('date = %s' % date, file=out)\n print('satellite = Pleiades %s' % satellite, file=out)\n print('nb_img = %d' % n, file=out)\n if ms:\n print('color = panchro_xs', file=out)\n else:\n print('color = panchro', file=out)", "def extract_zip(dataset_path, target_path):\n dataset_path = os.path.join(dataset_path,'covidx-cxr2.zip')\n print(f'Extracting zip file: {dataset_path}')\n with ZipFile(file=dataset_path) as zip_file:\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n zip_file.extract(member=file, path=os.path.join(target_path, 'xray'))\n os.remove(dataset_path)", "def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running zscan on pixel %d\" % (self.pixel))\n\n runzscan = RunZScan(self.config)\n if not os.path.isfile(runzscan.filename):\n runzscan.run()\n runzscan.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()", "def main(url):\n print(f\"Running main with URL = {url}...\")\n imagehits(downloaddata(url))", "def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)", "def main():\n if not os.path.exists(testcase.TestCase.dir_results):\n os.makedirs(testcase.TestCase.dir_results)\n if env.get('DEBUG').lower() == 'true':\n logging.config.fileConfig(config.get_xtesting_config(\n 'logging.debug.ini', constants.DEBUG_INI_PATH_DEFAULT))\n else:\n logging.config.fileConfig(config.get_xtesting_config(\n 'logging.ini', constants.INI_PATH_DEFAULT))\n logging.captureWarnings(True)\n os.chdir(testcase.TestCase.dir_results)\n Campaign.zip_campaign_files()", "def analyze_test_image_corpus(args):\n\n # First: index the existing corpus\n\n name_to_refs = {}\n\n for refdir in glob(os.path.join(DATA, 'refimg_*')):\n name = os.path.basename(refdir).replace('refimg_', '')\n refs = {}\n\n for p in os.listdir(refdir):\n if not p.endswith('.png'):\n continue\n\n refs[p.replace('.png', '')] = set()\n\n name_to_refs[name] = refs\n\n # Now trawl the samples and see which refimgs we hit\n\n def directory_to_paths(dir_path):\n for filename in os.listdir(dir_path):\n name = filename.replace('.png', '')\n if name in name_to_refs:\n yield name, os.path.join(dir_path, filename)\n\n def zip_to_paths(zip_path):\n from zipfile import ZipFile\n\n with tempfile.TemporaryDirectory() as tmpdir, ZipFile(zip_path) as zip:\n for zipname in zip.namelist():\n zip.extract(zipname, tmpdir)\n name = os.path.basename(zipname).replace('.png', '')\n if name in name_to_refs:\n yield name, os.path.join(tmpdir, zipname)\n\n for path in args:\n if os.path.isdir(path):\n paths = directory_to_paths(path)\n sampname = os.path.basename(path)\n elif path.endswith('.zip'):\n paths = zip_to_paths(path)\n sampname = os.path.basename(path).replace('.zip', '')\n else:\n raise Exception(f'don\\'t know how to handle input path `{path}`')\n\n for name, imgpath in paths:\n refs = name_to_refs[name]\n refnames = sorted(refs.keys())\n found_it = False\n results = []\n\n for refname in refnames:\n refpath = os.path.join(DATA, 'refimg_' + name, refname + '.png')\n rv = compare_images(\n refpath,\n imgpath,\n tol=IMAGE_COMPARISON_TOLERANCE,\n in_decorator=True\n )\n\n if rv is None:\n refs[refname].add(sampname)\n found_it = True\n break\n\n failpath = imgpath.replace('.png', '-failed-diff.png')\n os.unlink(failpath)\n results.append((refname, rv['rms']))\n\n if not found_it:\n print(f'no refimg found for {sampname}::{name}:', ', '.join('%s=%.2f' % t for t in results))\n\n # Now report\n\n print()\n print('Report:')\n any_reports = False\n\n for name in sorted(name_to_refs.keys()):\n refs = name_to_refs[name]\n\n for refname in sorted(refs.keys()):\n refdata = refs[refname]\n\n if not refdata:\n print(f'- no hits to {name}::{refname}')\n any_reports = True\n\n if not any_reports:\n print('- no suggested modifications')", "def process(self, image):", "def main():\n run_time_str = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n log = _prepare_logging()\n Args = collections.namedtuple(\n \"Args\",\n (\n \"input_paths\",\n \"output_path\",\n \"root_directory\",\n \"ignore_dotfiles\",\n \"ignore_windows_volume_folders\",\n ),\n )\n # If we are running from Mac Automator, take file paths from sys.argv\n if check_running_from_automator():\n # Example sys.argv for two files selected: ['-c', '/absolute/path/1.txt',\n # '/absolute/path/to/2.txt']\n args = Args(\n input_paths=sys.argv[1:],\n output_path=None,\n root_directory=False,\n ignore_dotfiles=False,\n ignore_windows_volume_folders=False,\n )\n # Otherwise, use argparse and allow for some additional options\n else:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_paths\", nargs=\"+\", help=\"Items to compress\")\n parser.add_argument(\"-o\", \"--output_path\", \"--output\", help=\"Filename for zip\")\n parser.add_argument(\n \"-d\",\n \"--root-directory\",\n action=\"store_true\",\n help=\"Place all files in zip within a shared parent folder\",\n )\n parser.add_argument(\n \"--ignore-dotfiles\",\n action=\"store_true\",\n help=\"Ignore files and folders beginning with '.' (typically these are hidden folders)\",\n )\n parser.add_argument(\n \"--ignore-windows-volume-folders\",\n action=\"store_true\",\n help=(\n \"Ignore folders named 'System Volume Information' and '$RECYCLE.BIN' (typically\"\n \" these contain hidden system information)\"\n ),\n )\n\n parsed_args = parser.parse_args()\n args = Args(**vars(parsed_args))\n\n # Check passed arguments and return if issues\n if get_missing_sources(args.input_paths):\n printer(\n \"Path(s) {} not found\".format(get_list_as_str(get_missing_sources(args.input_paths))),\n \"error\",\n True,\n )\n return\n\n # Set path separator based on OS\n if platform.system() == \"Windows\":\n path_separator = \"\\\\\"\n else:\n path_separator = \"/\"\n\n # Convert input paths into absolute paths\n input_paths = [os.path.abspath(path) for path in args.input_paths]\n\n # Set output path\n if args.output_path is not None:\n output_path = args.output_path\n output_directory = os.path.dirname(output_path)\n else:\n if check_running_from_automator():\n # Last item in the list of arguments will be the last item clicked in Finder\n output_directory = os.path.dirname(input_paths[-1])\n else:\n output_directory = \".\"\n if len(input_paths) == 1:\n output_filename = os.path.basename(\"{}.zip\".format(input_paths[0]))\n else:\n output_filename = \"{}_archive.zip\".format(run_time_str)\n output_path = get_safe_file_path(os.path.join(output_directory, output_filename))\n printer(\"Zip file will be created at path '{}'\".format(output_path), \"info\")\n\n # Create zipfile and get file_hash_dict info for subsequent verification\n try:\n file_hash_dict, total_file_count = create_zip(\n output_path,\n input_paths,\n args.ignore_dotfiles,\n args.ignore_windows_volume_folders,\n args.root_directory,\n path_separator,\n )\n except:\n # Log the exception to a file, so we can view later if running from Automator\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n error_log_handler = logging.FileHandler(error_log_file_path)\n error_log_handler.setLevel(logging.ERROR)\n error_log_handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n )\n log.addHandler(error_log_handler)\n log.exception(\"Exception occurred during creation of zip file '%s':\", output_path)\n printer(\n \"Error occurred - see '{}'\".format(os.path.abspath(error_log_file_path)), \"error\", True\n )\n if os.path.isfile(output_path):\n os.remove(output_path)\n return\n printer(\"'{}' finalised - will now be verified\".format(output_path), \"info\")\n\n # Get hashes of files within finalised zip\n zip_hash_dict = {}\n with zipfile.ZipFile(output_path, \"r\") as zip_handler:\n zip_file_listing = zip_handler.namelist()\n zip_file_count = 0\n for file_within_zip in zip_file_listing:\n # Todo: confirm no 'file_info.is_dir()' type check needed here - don't believe so, as\n # only files with paths are being added, rather than directories as separate archive\n # items\n zip_file_count += 1\n hash_value = hash_file_in_zip(zip_handler, file_within_zip)\n if hash_value not in zip_hash_dict:\n zip_hash_dict[hash_value] = []\n zip_hash_dict[hash_value].append(file_within_zip)\n\n # Verify that hashes from source files match those for compressed files within newly-created zip\n if file_hash_dict == zip_hash_dict and total_file_count == zip_file_count:\n printer(\"Verification complete; no discrepancies identified\", \"info\")\n printer(\"'{}' created successfully\".format(output_path), \"info\", True)\n else:\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n with open(error_log_file_path, \"w\") as error_log_file_handler:\n for hash_value, file_paths in file_hash_dict.items():\n if hash_value not in zip_hash_dict:\n error_log_file_handler.write(\n \"Hash '{}' not present in zip file (with expected files {})\\n\".format(\n hash_value, get_list_as_str(file_paths)\n )\n )\n elif sorted(file_paths) != sorted(zip_hash_dict[hash_value]):\n error_log_file_handler.write(\n \"Files for hash '{}' do not match between source and zip ({} in source - {}\"\n \" in zip)\\n\".format(hash_value, file_paths, zip_hash_dict[hash_value])\n )\n printer(\n \"'{}' failed verification - see error log at '{}'\".format(\n output_path, os.path.abspath(error_log_file_path)\n ),\n \"error\",\n True,\n )\n os.remove(output_path) # Delete the zip that failed verification", "def __extract_zip(self):\n archive_binaries_dir = None\n zip_file = zipfile.ZipFile(self.archive)\n try:\n extract_dir = tempfile.mkdtemp()\n archive_binaries_dir = self.__create_extraction_dir(\n zip_file.namelist(), extract_dir, zip_file.extract)\n finally:\n zip_file.close()\n return archive_binaries_dir, extract_dir", "def _DownloadResultResources(self):\n\n target_zip = \"%s/layout-test-results-%s.zip\" % (self.output_dir,\n self.build)\n if self.zip_file:\n filename = self.zip_file\n self.delete_zip_file = False\n else:\n revision, build_name = self._GetRevisionAndBuildFromArchiveStep()\n zip_url = GetZipFileURL(revision, build_name)\n if self.verbose:\n print \"Downloading zip file from %s to %s\" % (zip_url, target_zip)\n filename = self._DownloadFile(zip_url, target_zip, \"b\")\n if not filename:\n if self.verbose:\n print \"Could not download zip file from %s. Does it exist?\" % zip_url\n return False\n\n if zipfile.is_zipfile(filename):\n zip = zipfile.ZipFile(filename)\n if self.verbose:\n print 'Extracting files...'\n directory = \"%s/layout-test-results-%s\" % (self.output_dir, self.build)\n CreateDirectory(directory)\n self._UnzipZipfile(zip, TEMP_ZIP_DIR)\n\n for failure in self.failures:\n failure.test_expectations_line = (\n self._GetTestExpectationsLine(failure.test_path))\n if self.exclude_wontfix and failure.IsWontFix():\n self.failures.remove(failure)\n continue\n if failure.text_diff_mismatch:\n self._PopulateTextFailure(failure, directory, zip)\n if failure.image_mismatch:\n self._PopulateImageFailure(failure, directory, zip)\n if not self.use_local_baselines:\n failure.test_age = self._GetFileAge(failure.GetTestHome())\n failure.flakiness = self._GetFlakiness(failure.test_path, self.platform)\n zip.close()\n if self.verbose:\n print \"Files extracted.\"\n if self.delete_zip_file:\n if self.verbose:\n print \"Cleaning up zip file...\"\n path_utils.RemoveDirectory(TEMP_ZIP_DIR)\n os.remove(filename)\n return True\n else:\n if self.verbose:\n print \"Downloaded file '%s' doesn't look like a zip file.\" % filename\n return False", "def main():\n # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer\n # compression time. If read raw images during training, use 0 for faster IO speed.\n\n # create output folders if they don't already exist\n for dir in [save_folder, save_mask_folder,save_hist_plot_folder]:\n if dir != None:\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('mkdir [{:s}] ...'.format(dir))\n\n else:\n # print('Folder [{:s}] already exists. Exit...'.format(save_folder))\n # sys.exit(1)\n pass # uncomment above two lines for ease of working, if necessary\n\n img_list = []\n for root, dirsfoo, file_list in sorted(os.walk(input_folder)): # +'/*SR.tif'\n for x in file_list: # assume only images in the input_folder\n if x.endswith(\"SR.tif\"):\n path = os.path.join(root, x) \n img_list.append(path)\n break\n # img_list = ['/data_dir/Scenes/20190619_191648_25_106f_3B_AnalyticMS_SR.tif'] # for testing\n def update(arg):\n pbar.update(arg)\n # img_list=img_list[238:270] # for testing\n pbar = ProgressBar(len(img_list))\n pool = Pool(n_thread) # (n_thread)\n for path in img_list:\n if input_mask_folder==None:\n path_mask=None\n else:\n path_mask=name_lookup(path) # lookup mask path\n pool.apply_async(worker,\n args=(path, save_folder, crop_sz, step, thres_sz, compression_level, path_mask, save_mask_folder),\n callback=update)\n pool.close()\n pool.join()\n print('All subprocesses done.')", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--scans_dir', metavar='<scans_dir>', dest='SCANS_DIR', action='store', required=True,\n help='Full path to directory where scan tiff stacks are. This directory should ONLY contain '\n 'scan tiff stacks.')\n parser.add_argument('--masks_dir', metavar='<scans_dir>', dest='MASKS_DIR', action='store', required=False,\n help='Full path to directory where stroke masks are. Stroke masks should be 8-bit grayscale '\n 'tiff stacks with the .tif extension. There should be one stroke mask for each scan in t'\n 'he <scans_dir> directory and this pairing should have identical ZYX dimensions. The str'\n 'oke mask tiffs should be named following this example: If <scans_dir> has a file called'\n 'scan1.tif, the corresponding stroke mask should be named scan1_stroke_mask.tif')\n parser.add_argument('--W', metavar='<INIT_W', dest='INIT_W', action='store', required=True,\n help='An integer value representing the width of the cropping box.')\n\n parser.add_argument('--H', metavar='<INIT_H', dest='INIT_H', action='store', required=True,\n help='An integer value representing the height of the cropping box.')\n\n args = vars(parser.parse_args())\n crop_all_stacks(args)\n return 0", "def test_unarchive_run(self):\n pass", "def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)", "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)\n archive_folder.mkdir(exist_ok=True)\n logger.info(\"Converting Dicom to Nifty - START\")\n converter = NiftiConverter(\n padding=\"whole_image\",\n resampling_spacing=-1,\n list_labels=[\"GTVt\"],\n cores=10,\n )\n _ = converter(input_folder, output_folder=output_images_folder)\n\n logger.info(\"Converting Dicom to Nifty - END\")\n logger.info(\"Removing extra VOI - START\")\n move_extra_vois(output_images_folder, archive_folder)\n logger.info(\"Removing extra VOI - END\")\n logger.info(\"Renaming files- START\")\n correct_names(output_images_folder, name_mapping)\n logger.info(\"Renaming files- END\")\n logger.info(\"Cleaning the VOIs - START\")\n clean_vois(output_images_folder)\n logger.info(\"Cleaning the VOIs - END\")\n\n logger.info(\"Computing the bounding boxes - START\")\n bb_df = compute_bbs(output_images_folder)\n bb_df.to_csv(bb_file)\n logger.info(\"Computing the bounding boxes - END\")", "def main(args):\n\n data = {\n 'id': '00353',\n 'expanded_folder': '00353.1/9a0f0b0d-1f0b-47c8-88ef-050bd9cdff92',\n 'version': '1',\n 'status': 'VOR',\n 'updated_date': datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%S\")\n }\n\n settings = settings_lib.get_settings('exp')\n identity = \"resize_%s\" % int(random.random() * 1000)\n log_file = \"worker.log\"\n logger = log.logger(log_file, settings.setLevel, identity)\n conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)\n act = activity_ArchiveArticle(settings, logger, conn=conn)\n act.do_activity(data)", "def main(args):\n input_image = make_input_image(args.input_format, args.input_images)\n output_pattern = f\"output/{args.prefix}{{}}.png\"\n \n # Exit early if input == output\n if args.input_format == args.output_format:\n print(\"Input format = output format. Copying files.\")\n input_image.write(output_pattern)\n return\n\n output_image = make_output_image(args.output_format, args.output_shape)\n\n input_is_sphere = args.input_format == \"sphere\"\n output_is_sphere = args.output_format == \"sphere\"\n\n try:\n if input_is_sphere:\n output_image.project_from(input_image)\n output_image.write(output_pattern)\n elif output_is_sphere:\n input_image.unproject_to(output_image)\n output_image.write(output_pattern)\n else:\n raise NotImplementedError\n except NotImplementedError:\n print(f\"Sorry, {args.input_format} -> {args.output_format} not supported.\")\n return", "def extract_metadata():\n\n create_output(ARGS.out)\n index = pre.pixz.read_index(ARGS.traffic)\n\n try:\n tmp = tempfile.mkdtemp(prefix=\"ictf2017_cache_\")\n print(\"Using temporary cache for extracted files at {}\".format(tmp))\n\n file_indexes = [i for i in range(len(index))\n if (i >= ARGS.start and i <= ARGS.stop)]\n\n # a wrapper which measures execution times and calculates eta\n eta = pre.timing.ETACalculator(len(file_indexes))\n\n for count, i in enumerate(file_indexes):\n print(\"\\nProcessing index {} from [{}, {}]\"\n .format(i, min(file_indexes), max(file_indexes)))\n\n def extract_read_append_remove():\n pcapfile = pre.pixz.extract_pcap(ARGS.traffic, index[i], tmp)\n metadata = pre.pcap.read(pcapfile)\n append_output(metadata, ARGS.out)\n os.remove(pcapfile)\n\n eta.execute(count, extract_read_append_remove)\n\n finally:\n shutil.rmtree(tmp)\n print(\"Cleaned up temporary cache {}\\n\\n\".format(tmp))", "def analyze_images_in_folder(self, folder, generate_zmax = False, show_result = True, save_mask = True, save_excel = True):\r\n flat_cell_counted_in_folder = 0 \r\n total_cells_counted_in_folder = 0\r\n \r\n # If need to do zmax projection first\r\n if generate_zmax == True:\r\n ProcessImage.cam_screening_post_processing(folder)\r\n # Here a new folder for maxProjection is generated inside, change the path\r\n folder = os.path.join(folder, 'maxProjection')\r\n \r\n # If background images are taken\r\n if os.path.exists(os.path.join(folder, 'background')):\r\n # If the background image is taken to substract out\r\n background_substraction = True\r\n \r\n # Get all the background files names\r\n background_fileNameList = []\r\n for file in os.listdir(os.path.join(folder, 'background')):\r\n if \"tif\" in file: \r\n background_fileNameList.append(os.path.join(folder, 'background', file))\r\n \r\n background_image = ProcessImage.image_stack_calculation(background_fileNameList, operation = \"mean\")\r\n \r\n # Get a list of file names\r\n fileNameList = []\r\n for file in os.listdir(folder):\r\n if \"tif\" in file and \"LED\" not in file:\r\n fileNameList.append(file)\r\n \r\n print(fileNameList)\r\n \r\n # Analyse each image\r\n for image_file_name in fileNameList:\r\n print(image_file_name)\r\n Rawimage = imread(os.path.join(folder, image_file_name))\r\n\r\n if background_substraction == True:\r\n Rawimage = np.abs(Rawimage - background_image)\r\n \r\n # Analyze each image\r\n # Run the detection on input image.\r\n MLresults = self.DetectionOnImage(Rawimage, axis = None, show_result = show_result)\r\n\r\n if save_mask == True:\r\n \r\n if not os.path.exists(os.path.join(folder, 'ML_masks')):\r\n # If the folder is not there, create the folder\r\n os.mkdir(os.path.join(folder, 'ML_masks')) \r\n \r\n fig, ax = plt.subplots()\r\n # Set class_names = [None,None,None,None] to mute class name display.\r\n visualize.display_instances(Rawimage, MLresults['rois'], MLresults['masks'], MLresults['class_ids'],\r\n class_names = [None,None,None,None], ax=ax,\r\n centre_coors = MLresults['Centre_coor'], Centre_coor_radius = 2, \r\n WhiteSpace = (0, 0))#MLresults['class_ids'],MLresults['scores'], \r\n # ax.imshow(fig)\r\n fig.tight_layout()\r\n # Save the detection Rawimage\r\n fig_name = os.path.join(folder, 'ML_masks', 'ML_mask_{}.png'.format(image_file_name[0:len(image_file_name)-4]))\r\n plt.savefig(fname = fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight')\r\n \r\n if flat_cell_counted_in_folder == 0:\r\n cell_Data, flat_cell_counted_in_folder, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder)\r\n else: \r\n Cell_Data_new, flat_cell_counted_in_folder, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder)\r\n if len(Cell_Data_new) > 0:\r\n cell_Data = cell_Data.append(Cell_Data_new)\r\n total_cells_counted_in_folder += total_cells_counted_in_coord\r\n \r\n if save_excel == True:\r\n # Save to excel\r\n cell_Data.to_excel(os.path.join(folder, 'CellsProperties_{}flat_outof_{}cells.xlsx'.format(flat_cell_counted_in_folder, total_cells_counted_in_folder)))\r\n \r\n return cell_Data", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1", "def imageProcessing():\n\n # Parser initialization\n parser = argparse.ArgumentParser(description=colourers.toCyan('Image processor for reading/writing images into BMP/PNG formats and applying transformations on it.'))\n \n # Formats Parser\n group = parser.add_argument_group(colourers.toGreen('formats'))\n formatParser = group.add_mutually_exclusive_group(required=True)\n formatParser.add_argument('--bmp',\n type=str,\n metavar=colourers.toRed('<bmp file name>'), \n help=colourers.toMagenta('bmp file to parse'))\n formatParser.add_argument('--png',\n type=str,\n metavar=colourers.toRed('<png file name>'),\n help=colourers.toMagenta('png file to parse'))\n\n # Printers Parser\n group = parser.add_argument_group(colourers.toYellow('printers'))\n printers = group.add_mutually_exclusive_group()\n printers.add_argument('--header',\n help=colourers.toMagenta('print the file format header'),\n action='store_true')\n printers.add_argument('--print-color',\n '-pc',\n type=int,\n nargs=2,\n metavar=(colourers.toRed('<width>'), colourers.toRed('<height>')),\n help=colourers.toMagenta('pixel to print'))\n printers.add_argument('--histogram',\n action='store_true',\n help=colourers.toMagenta('print histogram associated'))\n printers.add_argument('--output',\n '-o',\n type=str,\n metavar=colourers.toRed('<output file>'),\n help=colourers.toMagenta('image output file'))\n\n # Transformers Parser\n transformers = parser.add_argument_group(colourers.toBlue('transformers'))\n transformers.add_argument('--half',\n action='store_true',\n help='applying the filter on one half of the image')\n transformers.add_argument('--rotate',\n '-r',\n type=int,\n choices=[90, 180, 270],\n metavar=colourers.toRed('<degree of rotation>'),\n help=colourers.toMagenta('rotate the image'))\n transformers.add_argument('--scale',\n '-s',\n type=int,\n nargs='+',\n action=required_length(1, 2),\n metavar=(colourers.toRed('<scaleRatio> | [<width>'), colourers.toRed('<height>')),\n help=colourers.toMagenta('scale/shrink the image'))\n transformers.add_argument('--contrast',\n '-c',\n type=float,\n metavar=colourers.toRed('<contrast factor>'),\n help=colourers.toMagenta('apply a factor contrast'))\n transformers.add_argument('--grayscale',\n '-gs',\n action='store_true',\n help=colourers.toMagenta('to grayscale image'))\n transformers.add_argument('--binary',\n '-b',\n action='store_true',\n help=colourers.toMagenta('to binary image'))\n transformers.add_argument('--invert',\n '-i',\n action='store_true',\n help=colourers.toMagenta('to inverted image, equivalent to --contrast -1'))\n transformers.add_argument('--channel',\n type=str,\n choices=['blue', 'green', 'red'],\n metavar=colourers.toRed('<channel>'),\n nargs='+',\n action=required_length(1, 2),\n help=colourers.toMagenta('to the specified channel'))\n \n # Filters Parser\n filters = parser.add_argument_group(colourers.toCyan('filters'))\n filters.add_argument('--edge-detection',\n '-ed',\n type=str,\n choices=['canny', 'sobel', 'prewitt', 'roberts', 'kirsch'],\n metavar=colourers.toRed('<filter name>'),\n help=colourers.toMagenta('perform an edge detection'))\n filters.add_argument('--retrieve-color',\n '-rv',\n action='store_true',\n help=colourers.toMagenta('retrieve the colors of a grayscale image'))\n filters.add_argument('--edge-enhancement',\n '-ee',\n action='store_true', \n help=colourers.toMagenta('applying increased edge enhancement filter'))\n filters.add_argument('--sharpen',\n action='store_true',\n help=colourers.toMagenta('sharpening the image'))\n filters.add_argument('--unsharp',\n action='store_true',\n help=colourers.toMagenta('unsharp the image')) \n filters.add_argument('--denoise',\n action='store_true',\n help=colourers.toMagenta('denoise the image'))\n filters.add_argument('--texture-detection',\n '-td',\n action='store_true',\n help=colourers.toMagenta('applying texture detection (Gabor Filter)'))\n filters.add_argument('--blur',\n type=str,\n choices=['simple', 'more', 'average', 'gaussian', 'motion'],\n metavar=colourers.toRed('<type of blur>'),\n help=colourers.toMagenta('perform the selected blur'))\n filters.add_argument('--blur-iteration',\n '-bi',\n type=int,\n default=1,\n metavar=colourers.toRed('<number of iteration>'),\n help=colourers.toMagenta('apply N times the blur function'))\n filters.add_argument('--emboss',\n action='store_true',\n help=colourers.toMagenta('perform an embossing filter'))\n filters.add_argument('--overlap',\n type=str,\n nargs='+',\n metavar=colourers.toRed('<image to overlap>'),\n help=colourers.toMagenta('overlap an image given on the selected image'))\n\n # Args parsing\n args = parser.parse_args()\n\n filename = \"\"\n # BMP Block\n if args.bmp:\n filename = args.bmp\n\n if not os.path.isfile(filename):\n colourers.error('\"{}\" does not exist !'.format(filename))\n sys.exit(-1)\n colourers.success('Success Opening {}...'.format(filename))\n\n bmp = BMP(filename)\n half = args.half\n\n if args.print_color:\n width, height = args.print_color\n colourers.info(f'Printing pixel color of ({width}, {height})')\n Printers.printPixel(bmp, width, height)\n sys.exit(0)\n \n elif args.header:\n colourers.info(f'Printing BMP header of {bmp.filename}')\n Printers.printHeader(bmp)\n sys.exit(0)\n \n elif args.histogram:\n colourers.info(f'Printing color histogram of {bmp.filename}')\n Printers.printHistogram(bmp)\n sys.exit(0)\n \n if (args.rotate or args.scale or args.contrast or args.grayscale or \n args.binary or args.channel or args.edge_detection or args.retrieve_color or\n args.edge_enhancement or args.blur or args.emboss or args.overlap or args.texture_detection or\n args.denoise or args.sharpen or args.unsharp):\n if not hp.atLeastOne(args.output, (\n args.rotate,\n args.scale,\n args.contrast,\n args.grayscale,\n args.binary,\n args.channel,\n args.edge_detection,\n args.retrieve_color,\n args.edge_enhancement,\n args.blur,\n args.emboss,\n args.overlap,\n args.texture_detection,\n args.denoise,\n args.sharpen,\n args.unsharp\n )):\n parser.error('--rotate/--scale/--contrast/--grayscale/--binary/--channel/--edge-detection/--retrieve-color/--edge-enhancement/--blur/--emboss/--overlap/--texture-detection/--denoise/--sharpen/--unsharp and --output must be given together')\n \n if args.rotate:\n degree = args.rotate\n colourers.info(f'Rotating image to {degree} degree')\n bmp.imageData = Transformers.rotate(bmp, degree)\n\n if args.scale:\n if len(args.scale) == 2:\n width, height = args.scale\n colourers.info(f'Scaling image to {width}x{height} pixels')\n bmp.imageData = Transformers.scale(bmp, height, width)\n else:\n scaleRatio = args.scale[0]\n\n colourers.info(f'Scaling image to {scaleRatio} scale ratio')\n\n height = int(hp.readLittleEndian(bmp.height))\n width = int(hp.readLittleEndian(bmp.width))\n\n bmp.imageData = Transformers.scale(bmp, height * scaleRatio, width * scaleRatio)\n \n if args.contrast:\n factor = args.contrast\n colourers.info(f'Applying a factor contrast of {factor}')\n bmp.imageData = Transformers.contrast(bmp, factor)\n \n if args.grayscale:\n colourers.info(f'Applying grayscale mask to the image')\n bmp.imageData = Transformers.grayscale(bmp, half)\n \n if args.binary:\n colourers.info(f'Applying binary mask to the image')\n bmp.imageData = Transformers.binary(bmp, half)\n \n if args.invert:\n colourers.info(f'Inverting image colours')\n bmp.imageData = Transformers.invert(bmp, half)\n \n if args.channel:\n if len(args.channel) == 2:\n c1, c2 = args.channel\n colourers.info(f'Keeping only {c1} and {c2} channels of the image')\n bmp.imageData = Transformers.toChannel(bmp, [c1, c2], half)\n else:\n channel = args.channel[0]\n colourers.info(f'Keeping only {channel} channel of the image')\n bmp.imageData = Transformers.toChannel(bmp, channel, half)\n \n if args.denoise:\n colourers.info(f'Denoising the image')\n bmp.imageData = Filters.wienerFilter(bmp.imageData, gaussianKernel(9, sigma=0.33), K=10)\n \n if args.texture_detection:\n colourers.info(f'Applying texture detection (Gabor Filter)')\n bmp.imageData = Filters.gaborFilter(bmp.imageData, gaborKernel(0))\n \n if args.edge_enhancement:\n colourers.info(f'Applying increased edge enhancement filter')\n bmp.imageData = Filters.iee(bmp.imageData)\n\n if args.edge_detection:\n filterName = args.edge_detection\n if filterName == 'canny':\n colourers.info(f'Performing Canny filter for edge detection')\n bmp.imageData = Filters.ced(bmp.imageData, sigma=0.33, kernelSize=9, weakPix=50)\n if filterName == 'sobel':\n colourers.info(f'Performing Sobel filter for edge detection')\n bmp.imageData = Filters.sed(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'prewitt':\n colourers.info(f'Performing Prewitt filter for edge detection')\n bmp.imageData = Filters.ped(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'roberts':\n colourers.info(f'Performing Roberts filter for edge detection')\n bmp.imageData = Filters.red(bmp.imageData, sigma=0.33, kernelSize=9)\n if filterName == 'kirsch':\n colourers.info(f'Performing Kirsch filter for edge detection')\n bmp.imageData = Filters.ked(bmp.imageData, sigma=0.33, kernelSize=9)\n\n if args.sharpen:\n colourers.info(f'Sharpening the image')\n bmp.imageData = Filters.sharpen(bmp.imageData)\n \n if args.unsharp:\n colourers.info(f'Unsharpening the image')\n bmp.imageData = Filters.unsharp(bmp.imageData)\n\n if args.retrieve_color:\n colourers.info(f'Retrieving color')\n bmp.imageData = Filters.retrieveColor(bmp.imageData)\n \n if args.blur:\n blurType = args.blur\n colourers.info(f'Performing a {blurType} blur')\n for _ in range(args.blur_iteration):\n blurFunc = Filters.blur.switcher.get(blurType)\n bmp.imageData = blurFunc(bmp.imageData)\n \n if args.emboss:\n colourers.info(f'Performing emboss filter')\n bmp.imageData = Filters.emboss(bmp.imageData)\n \n if args.overlap:\n overlappers = []\n for ov in args.overlap:\n overlappers.append(BMP(ov).imageData)\n colourers.info(f'Performing an overlapping between {bmp.filename} and {args.overlap}')\n bmp.imageData = Filters.overlap(bmp.imageData, overlappers)\n \n if args.output:\n outputFile = args.output\n hp.saveBMP(bmp, bmp.imageData, outputFile)\n colourers.success(f'Succesfully saved into {outputFile}')\n sys.exit(0)\n \n parser.error('Give at least one more argument')\n \n # PNG Block\n else:\n filename = args.png\n\n if not os.path.isfile(filename):\n print('\"{}\" does not exist'.format(filename), file=sys.stderr)\n sys.exit(-1)\n print('Success Opening {}...'.format(filename))\n \n png = PNG(filename)", "def handleImageRunner(self) :\n tag = self.readByte()\n if tag == ord(self.imagerunnermarker1[-1]) :\n oldpos = self.pos-2\n codop = self.minfile[self.pos:self.pos+2]\n length = unpack(\">H\", self.minfile[self.pos+6:self.pos+8])[0]\n self.pos += 18\n if codop != self.imagerunnermarker2 :\n self.pos += length\n self.logdebug(\"IMAGERUNNERTAG SKIP %i AT %08x\" % (self.pos-oldpos, self.pos))\n else :\n self.pos -= 1 # Adjust position", "def extractZipFiles(rootDir, zipDir):\n for root, dirs, files in os.walk(zipDir, topdown=False):\n for name in files:\n \n zipFiles = os.path.join(root, name)\n \n #Check file extension here\n if \".zip\" not in zipFiles:\n continue\n \n else:\n zipPath = zipfile.ZipFile(zipFiles, 'r')\n #print(zipPath) \n \n filesInZip = zipPath.namelist()\n i = 0 \n for i in range(len(filesInZip)):\n #print(filesInZip[i])\n #print(zipPath.getinfo(filesInZip[i]))\n \n if \".mp3\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".m4a\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".mp4\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".png\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".jpg\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n \n elif \".pdf\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n else:\n print(\"No media found in zip file {0}\".format(name))\n \n zipPath.close()", "def ZipResultFolder(md5_key, cnt):\n subfoldername = md5_key[:2]\n md5_subfolder = \"%s/%s\"%(path_cache, subfoldername)\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n zipfile_cache = cachedir + \".zip\"\n if os.path.exists(cachedir) and not os.path.exists(zipfile_cache):\n origpath = os.getcwd()\n os.chdir(md5_subfolder)\n targetfile = os.path.join(cachedir, \"query.result.txt\")\n if os.path.exists(targetfile):\n cmd = [\"zip\", \"-rq\", \"%s.zip\"%(md5_key), md5_key]\n cmdline = \" \".join(cmd)\n try:\n print((\"%d: %s\"%(cnt, cmdline)))\n subprocess.check_call(cmd)\n print((\"%d: %s\"%(cnt, \"rmtree(%s)\"%(md5_key) )))\n os.system(\"chown %s:%s %s\"%(user, group, \"%s.zip\"%(md5_key)))\n shutil.rmtree(md5_key)\n except:\n print(\"Failed to zip folder %s\"%(cachedir), file=sys.stderr)\n raise\n else:\n print((\"%d: %s\"%(cnt, \"bad result! just rmtree(%s)\"%(md5_key) )))\n shutil.rmtree(md5_key)\n os.chdir(origpath)\n elif os.path.exists(zipfile_cache):\n #check weather the zipped file is a valid prediction result\n try:\n with ZipFile(zipfile_cache, \"rb\") as myzip:\n li = myzip.namelist()\n target = \"%s/query.result.txt\"%(md5_key)\n if target in li:\n print((\"%d: %s\"%(cnt, \"Valid zipped result for %s\"%(md5_key) )))\n else:\n print((\"%d: %s\"%(cnt, \"bad zipped result! just delete zipfile(%s)\"%(md5_key) )))\n os.remove(zipfile_cache)\n except Exception as e:\n print((\"%d: %s\"%(cnt, \"BadZipFile! just delete zipfile(%s)\"%(md5_key) )))\n os.remove(zipfile_cache)", "def main(folder, outputfile):\n parser = argument_parser()\n args = parser.parse_args()\n\n show_all = args.show_all\n verbose = args.verbose\n\n random.seed(args.rng_seed)\n\n args.files = folder\n print args.files\n\n try:\n image = Image.open(args.files[0])\n except IOError, msg:\n print >> sys.stderr, msg\n return 1\n if image.mode == 'P':\n image = image.convert('RGB')\n \n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n\n if not show_all:\n def nothing(a, b):\n pass\n do_something = nothing\n elif args.saving:\n do_something = Imsave(\"saved/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]) + \"/\").save\n else:\n import im_debug\n do_something = im_debug.show\n\n if verbose:\n import time\n class Logger:\n def __init__(self):\n self.t = 0\n\n def __call__(self, m):\n t_n = time.time()\n if self.t > 0:\n print >> sys.stderr, \"\\t\" + str(t_n - self.t)\n print >> sys.stderr, m\n self.t = t_n\n logger = Logger()\n\n else:\n def logger(m):\n pass\n \n if args.manual_mode:\n import manual\n try:\n lines = manual.find_lines(image)\n except manual.UserQuitError:\n #TODO ask user to try again\n return 1\n else:\n if args.l_cache:\n filename = (\"saved/cache/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]))\n cache_dir = \"/\".join(filename.split('/')[:-1])\n if os.path.exists(filename):\n lines, l1, l2, bounds, hough = pickle.load(open(filename))\n print >> sys.stderr, \"using cached results\"\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n d_file = open(filename, 'wb')\n pickle.dump((lines, l1, l2, bounds, hough), d_file)\n d_file.close()\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n\n grid, lines = gridf.find(lines, image.size, l1, l2, bounds, hough,\n show_all, do_something, logger)\n if show_all:\n im_g = image.copy()\n draw = ImageDraw.Draw(im_g)\n for l in grid[0] + grid[1]:\n draw.line(l, fill=(64, 255, 64), width=1)\n do_something(im_g, \"grid\", name=\"grid\")\n\n intersections = intrsc.b_intersects(image, lines, show_all, do_something, logger)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n\n logger(\"finished\")\n\n # TODO! refactor this mess:\n if len(args.files) == 1:\n\n if args.sgf_output:\n print board.asSGFsetPos()\n else:\n print board\n \n else:\n game = output.Game(19, board) #TODO size parameter\n #for f in args.files[1:]:\n for i, f in enumerate(args.files):\n try:\n image = Image.open(f)\n except IOError, msg:\n print >> sys.stderr, msg\n continue\n if verbose:\n print >> sys.stderr, \"Opening\", f\n if image.mode == 'P':\n image = image.convert('RGB')\n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n if args.sgf_output:\n game.addMove(board)\n else:\n with open(outputfile + str(i) + \".txt\", \"w\") as f:\n f.write(str(board))\n\n if args.sgf_output:\n print game.asSGF()\n\n return 0", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n\n download_images_from_grid_vector(grid_vector=args.grid_vector,\n output_dir=args.output_dir,\n type_id=args.type,\n product_type_id=args.product_type,\n num_jobs=args.num_jobs)", "def main():\n\t#ps = PackageScanner()\n\t#packages = ps.getInstalledPackages()\n\t#print(packages)\n\t#ps.saveScanResults()\n\n\tan = Analyzer()\n\tan.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)\n\t#an.loadFromPackageCont(packages)\n\tan.analyze()\n\tan.saveAnalysisResults()", "def main():\r\n parser = CommonArgParser(__file__)\r\n parser.add_argument('src_dir', help='Source directory')\r\n parser.add_argument(\r\n 'out_dir',\r\n default='.',\r\n help=\"\"\"The directory the files to be extracted.\r\n (Default: Current directoty\"\"\")\r\n args = parser.parse_all()\r\n for f in next_file(args.src_dir, ['*.tgz', '*.tar.gz']):\r\n untgz(f, args.out_dir)", "def __init__(self):\r\n self.label = \"ExamineOutputs\"\r\n self.description = \"This tool takes the output zip file from the ProcessGeogrid script\" + \\\r\n \"and creates a raster from each output NetCDF file.\" + \\\r\n \"\" + \\\r\n \"The Input should be a .zip file that was created using the WRF Hydro pre-\" + \\\r\n \"processing tools. The Output Folder parameter should be set to a non-existent \" +\\\r\n \"folder location. The tool will create the folder which will contain the results.\"\r\n self.canRunInBackground = True\r\n self.category = \"Utilities\"", "def main():\n try:\n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n lookoutvision_client = boto3.client(\"lookoutvision\")\n s3_resource = boto3.resource('s3')\n\n parser = argparse.ArgumentParser(usage=argparse.SUPPRESS)\n parser.add_argument(\n \"project\", help=\"The project containing the model that you want to use.\")\n parser.add_argument(\n \"version\", help=\"The version of the model that you want to use.\")\n parser.add_argument(\n \"image\",\n help=\"The file that you want to analyze. Supply a local file path or a \"\n \"path to an S3 object.\")\n args = parser.parse_args()\n\n if args.image.startswith(\"s3://\"):\n photo = Inference.download_from_s3(s3_resource, args.image)\n else:\n photo = args.image\n\n print(f\"Analyzing {photo}.\")\n anomalous, confidence = Inference.detect_anomalies(\n lookoutvision_client, args.project, args.version, photo)\n\n if args.image.startswith(\"s3://\"):\n os.remove(photo)\n\n state = \"anomalous\" if anomalous else \"normal\"\n print(\n f\"Your model is {confidence:.0%} confident that the image is {state}.\")\n except ClientError as err:\n print(f\"Service error: {err.response['Error']['Message']}\")\n except FileNotFoundError as err:\n print(f\"The supplied file couldn't be found: {err.filename}.\")\n except ValueError as err:\n print(f\"A value error occurred: {err}.\")\n else:\n print(\"Successfully completed analysis.\")", "def process_image(self):\n pass", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def source_extract_image(data, bkgArr=None, thresh=0.05, sortType='center'):\n data = data.byteswap().newbyteorder()\n if bkgArr is None:\n bkgArr = np.zeros(data.shape)\n o = sep.extract(data, thresh, segmentation_map=True)\n if sortType == 'size':\n print('Sorting extracted objects by radius from size')\n sizeSortedObjects = sorted(\n enumerate(o[0]), key=lambda src: src[1]['npix']\n )\n return sizeSortedObjects, o[1]\n elif sortType == 'center':\n print('Sorting extracted objects by radius from center')\n centerSortedObjects = sorted(\n enumerate(o[0]),\n key=lambda src: (\n (src[1]['x'] - data.shape[0] / 2)**2 +\n (src[1]['y'] - data.shape[1] / 2)**2\n )\n )[::-1]\n return centerSortedObjects, o[1]", "def execute(self):\n cwd = self.fm.thisdir\n marked_files = cwd.get_selection()\n\n if not marked_files:\n return\n\n def refresh(_):\n cwd = self.fm.get_directory(original_path)\n cwd.load_content()\n\n original_path = cwd.path\n parts = self.line.split()\n au_flags = parts[1:]\n\n descr = \"compressing files in: \" + os.path.basename(parts[1])\n obj = CommandLoader(args=['apack'] + au_flags + \\\n [os.path.relpath(f.path, cwd.path) for f in marked_files], descr=descr)\n\n obj.signal_bind('after', refresh)\n self.fm.loader.add(obj)", "def gzip_assets():\n run('cd %(repo_path)s; python gzip_assets.py' % env)", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def main(filepath, maskpath):\n analytics.result = {}\n img_mask = nib.load(maskpath).get_fdata()\n print(\"loading\\n\", flush=True)\n # segmentation\n print(\"loading segmentation...\\n\", flush=True)\n seg = nib.load(filepath).get_fdata()\n # post processing\n print(\"applying some post processing...\\n\", flush=True)\n seg = apply_mask(seg, img_mask)\n seg_2d = binarize(seg, img_mask)\n print(\"End of slice processing\\n\", flush=True) \n distance_map, skel = analytics.distance(seg_2d)\n print(\"distance\\n\", flush=True)\n dist_per_label , skel= analytics.label_value(distance_map, skel)\n print(\"label_value\\n\", flush=True) \n analytics.get_analytics(seg, img_mask, dist_per_label, skel, verbose=True)\n print(\"got analytics\\n\", flush=True)", "def _unzip_files(self) -> None:\n for file in self.input_path.iterdir():\n if is_zipfile(file):\n with ZipFile(file, mode=\"r\") as archive:\n archive.extractall(path=self.temp_path)", "def main():\n\n # Load the API credentials\n with open('./flickr_api.txt') as f:\n keys = yaml.safe_load(f)\n\n # Set the API credentials\n flickr = flickrapi.FlickrAPI(keys['key'], keys['secret'])\n\n # Load the data\n df = pd.read_csv('./beauty-icwsm15-dataset.tsv', sep=\"\\t\", index_col=False)\n total_images = df.shape[0] * 1.0\n df['downloaded'] = None\n\n query_counter = 0.0\n for i, photo_id in enumerate(df['#flickr_photo_id']):\n if query_counter % 100.0 == 0:\n print(str(i) + '/' + str(total_images) + ' images (i.e. ' +\n str(np.round(i / total_images, 3) * 100) + \"%) complete.\")\n time.sleep(15)\n path = OUTPUT_FOLDER + str(photo_id) + \".jpg\"\n if os.path.exists(path):\n df.ix[i, 'downloaded'] = True\n continue\n try:\n query_counter += 1.0\n photo_response = flickr.photos.getInfo(photo_id=photo_id)\n download_photo(photo_id, photo_response)\n df.ix[i, 'downloaded'] = True\n except flickrapi.exceptions.FlickrError:\n df.ix[i, 'downloaded'] = False\n continue\n\n df.to_csv('./download_summary.tsv', sep=\"\\t\", index=False)", "def run_main():\n\n parser = argparse.ArgumentParser(description=\"Scan a run directory and create files to \")\n parser.add_argument('--run-directory', dest='run_directory',\n action='store', default='',\n help='path to directory with xed files to process')\n args = parser.parse_args(sys.argv[1:])\n\n if not os.path.isdir(args.run_directory):\n sys.stderr.write(\"{0} is not a directory, exiting\\n\".format(args.run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n\n if not os.path.exists('info'):\n os.mkdir('info')\n\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = \"info/{0}_{1}_files.csv\".format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = \"srm://ceph-se.osgconnect.net:8443/srm/v2/\" + \\\n \"server?SFN=/cephfs/srm/xenon/\" + \\\n entry.replace('/xenon/', '')\n csv_writer.writerow([run_name, directory, uri])", "def main(args):\n # Extract keys\n logger.info(\"Extracting brass bedpe file key from tarfile...\")\n bedpe, bedpe_index = extract_tar_keys(args.results_archive)\n # process bedpe\n logger.info(\"Processing brass bedpe {0}...\".format(bedpe))\n process_bedpe(args.results_archive, bedpe, bedpe_index, args.output_prefix)", "def main():\n if not Path(EXPORT_DIR).exists():\n print(\"Kindle is not connected.\", file=sys.stderr)\n sys.exit(1)\n\n type_zip = ('zip file', '*.zip')\n type_pdf = ('pdf file', '*.pdf')\n\n files = filedialog.askopenfiles(filetypes=[type_zip, type_pdf], initialdir=os.path.expanduser('~'))\n for f in files:\n export_path = Path(EXPORT_DIR) / f\"{Path(f.name).stem}.pdf\"\n if f.name.endswith('.zip'):\n with open(export_path, 'wb') as pdf, zipfile.ZipFile(f.name, 'r') as _zip:\n pdf.write(img2pdf.convert([_zip.open(img) for img in _zip.infolist()]))\n else:\n shutil.copy(f.name, export_path)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"input_filename\", help=\"transaction data input\")\n parser.add_argument(\n \"output_by_zip_filename\", help=\"running median output by zip code\")\n parser.add_argument(\n \"output_by_date_filename\", help=\"median by date for summary output\")\n args = parser.parse_args()\n\n r = Record()\n with open(args.input_filename, 'r') as f:\n f_out_running = open(args.output_by_zip_filename, \"a+\")\n for line in f:\n output_str = r.parse_single_entry(line) \n if output_str: # skip invalid input\n f_out_running.write(output_str)\n f_out_running.close()\n\n with open(args.output_by_date_filename, \"a+\") as f_date:\n summary_str = r.calc_and_export_medianvals_by_date(f_date)\n\n return", "def process_imgdir(self,imgdir):\n #Write images into resultdir\n resultdir = os.path.join(imgdir, 'results')\n #Read images from input dir\n inputdir = os.path.join(imgdir, 'inputs')\n shutil.rmtree(resultdir)\n os.mkdir(resultdir)\n #Read files from input images\n for fullname in os.listdir(inputdir):\n filepath = os.path.join(inputdir, fullname)\n if os.path.isfile(filepath):\n basename = os.path.basename(filepath)\n image = cv2.imread(filepath, cv2.IMREAD_COLOR)\n if len(image.shape) == 3 and image.shape[2] == 3:\n print('Processing %s ...' % basename)\n else:\n sys.stderr.write('Skipping %s, not RGB' % basename)\n continue\n #Extract haze from the scene and then save the image\n dehazed = self.get_scene_radiance(image)\n cv2.imwrite(os.path.join(resultdir, basename), dehazed)\n return os.path.join(resultdir, basename)", "def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def analyze(self):\n self.grayscale = (input(\"[G]rayscale or [C]olor? \").lower()[0] == \"g\")\n for i in range(1, 6):\n for j in range(1, 10):\n network_name = \"acas_%d_%d\" % (i, j)\n try:\n distance_classified = self.read_artifact(\n \"%s/distance\" % network_name)\n theta_classified = self.read_artifact(\n \"%s/theta\" % network_name)\n sample_pre, sample_post = self.read_artifact(\n \"%s/sample\" % network_name)\n single_line_data = self.read_artifact(\n \"%s/single_lines\" % network_name)\n except KeyError:\n # Skip due to missing data.\n continue\n print(\"Analyzing network:\", network_name)\n self.distance_plot(distance_classified)\n self.finalize_plot(\"%s/distance\" % network_name)\n self.theta_plot(theta_classified)\n self.finalize_plot(\"%s/theta\" % network_name)\n self.overlapping_plot(distance_classified, theta_classified)\n self.finalize_plot(\"%s/overlapping\" % network_name)\n self.sample_plot(sample_pre, sample_post)\n self.finalize_plot(\"%s/sample\" % network_name)\n\n self.single_line_plots(network_name, single_line_data)\n return True", "def extract_data(archive: ZipFile, dir_name: str) -> Data:\n with archive.open(f\"{dir_name}/caption.txt\", \"r\") as f:\n captions = f.readlines()\n data = []\n for line in captions:\n tmp = line.decode().strip().split()\n img_name = tmp[0]\n formula = tmp[1:]\n with archive.open(f\"{dir_name}/{img_name}.bmp\", \"r\") as f:\n # move image to memory immediately, avoid lazy loading, which will lead to None pointer error in loading\n img = Image.open(f).copy()\n data.append((img_name, img, formula))\n\n print(f\"Extract data from: {dir_name}, with data size: {len(data)}\")\n\n return data", "def main():\n if len(sys.argv) != 2:\n print('Usage: release.py <version>', file=sys.stderr)\n exit(1)\n version = sys.argv[1]\n with open('./manifest.json', 'r+') as f:\n manifest = json.load(f)\n manifest['version'] = version\n f.seek(0)\n json.dump(manifest, f, indent=2)\n f.truncate()\n\n os.system(f'zip cses-filter-v{version}.zip -r icons/ src/ manifest.json')", "def extraction(userinputs):\n #Set up required variables\n target_dir = userinputs['OUTDIR']\n seximage = userinputs['IMAGE']\n logging.info('Running sextractor on {}'.format(userinputs['IMAGE']))\n\n print 'Executing SExtractor on user selected image : ', seximage\n\n # Verify that file exists\n if os.path.exists(userinputs['DATA'] + '/' + seximage) == False:\n print 'File ' + seximage + ' could not be found in ' + userinputs['DATA']\n logging.critical(' Could not find {}. Quitting'.format(seximage))\n logging.debug('Looking for {} but unable to locate'.format(userinputs['DATA'] + '/' + seximage))\n filemanagement.shutdown('Quitting now...',userinputs)\n\n # Run sextractor\n logging.info('Start sextractor')\n os.chdir(target_dir + '/s_extraction')\n logging.debug('Changed dir to {}'.format(os.getcwd()))\n command = 'sex ' + userinputs['DATA'] + '/' + seximage + '[1] -c R2_wl_aa.config'\n os.system(command)\n os.chdir(target_dir)\n logging.debug('Changed working directory back to {}'.format(target_dir))\n\n # Read in results and make regions file of objects sextracted\n logging.info('Read in Sextractor catalog')\n xx, yy = np.loadtxt(target_dir + '/s_extraction/R2_wl_dpop_detarea.cat', unpack=True,\n skiprows=5, usecols=(0,1))\n\n outputfile = target_dir + '/s_extraction/catalog_ds9_sextractor.reg'\n\n logging.info('Writing region file from source extractor data')\n logging.debug('Sextractor file: {}'.format(target_dir + '/s_extraction/R2_wl_dpop_detarea.cat'))\n with open(outputfile, 'w') as file:\n file.write('global color=blue width=5 font=\"helvetica 15 normal roman\" highlite=1 \\n')\n file.write('image\\n')\n\n for i in range(len(xx)):\n newline = 'circle(' + str(xx[i]) + ',' + str(yy[i]) + ',7) \\n'\n file.write(newline)\n print ''\n print 'Check catalog_ds9_sextractor.reg in the /s_extraction directory for'\n print 'the quality of source extraction.'\n print ''\n\n return target_dir + '/s_extraction/R2_wl_dpop_detarea.cat'", "def Run(self, args):\n\n with RecoverFromDiagnosticException(args.image_name):\n img_name = util.GetDigestFromName(args.image_name)\n return container_data_util.ContainerData(\n registry=img_name.registry,\n repository=img_name.repository,\n digest=img_name.digest)", "def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running runcat on pixel %d\" % (self.pixel))\n\n runcat = RunCatalog(self.config)\n if not os.path.isfile(runcat.filename):\n runcat.run(do_percolation_masking=self.config.runcat_percolation_masking)\n runcat.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()", "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "def main():\n\n # Just grab all files - we'll use try/except to filter\n images = glob.glob(os.path.join(args.input_dir, '*.*'))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n for img_file in images:\n print(img_file)\n try:\n np_img = plt.imread(img_file)\n print(np_img.shape)\n img_name = img_file.split(os.sep)[-1]\n new_img_file = os.path.join(args.output_dir, img_name)\n pad_image(np_img, new_img_file)\n except Exception as e:\n print('Warning: {}. Skpping file.'.format(e))\n continue", "def multi_run_wrapper(args):\n\treturn img_preprocessing(*args)", "def run_archive_viewer(self, obj):\n\n safe = [\n 'pyi_carchive',\n 'pyi_rth_win32comgenpy',\n '_pyi_bootstrap',\n '_pyi_egg_install.py'\n ]\n\n # This doesn't work. Everything is showing as an invalid CArchive file.\n with self._write_to_file() as tmp_file:\n try:\n arch = get_archive(tmp_file)\n if type(arch.toc) == type({}):\n toc = arch.toc\n else:\n toc = arch.toc.data\n for t in toc:\n d = {'Position': t[0],\n 'Length': t[1],\n 'Uncompressed': t[2],\n 'IsCompressed': t[3],\n 'Type': t[4],\n 'RawData': \"\"\n }\n if t[4] == 's' and t[5] not in safe:\n try:\n block = self.get_data(t[5], arch).encode('utf-8',\n \"ignore\")\n except:\n self._info(\"%s: Block not valid utf-8. Trying utf-16.\" % t[5])\n try:\n block = self.get_data(t[5], arch).encode('utf-16',\n \"ignore\")\n except:\n self._info(\"%s: Block not valid utf-16. Trying utf-32.\" % t[5])\n try:\n block = self.get_data(t[5], arch).encode('utf-32',\n \"ignore\")\n except:\n self._info(\"%s: Block not valid utf-32. Trying latin-1.\" % t[5])\n try:\n block = self.get_data(t[5], arch).encode('latin-1',\n 'ignore')\n except:\n self._info(\"%s: Block not valid latin-1. Done trying.\" % t[5])\n block = None\n if block is not None:\n bmd5 = md5(block).hexdigest()\n bsha1 = sha1(block).hexdigest()\n bsha256 = sha256(block).hexdigest()\n block = block.replace('http', 'hxxp')\n description = '\"%s\" pulled from Sample\\n\\n' % t[5]\n description += 'MD5: %s\\n' % bmd5\n description += 'SHA1: %s\\n' % bsha1\n description += 'SHA256: %s\\n' % bsha256\n title = t[5]\n data_type = \"Python\"\n tool_name = \"pyinstaller_service\"\n result = handle_raw_data_file(\n block,\n obj.source,\n user=self.current_task.username,\n description=description,\n title=title,\n data_type=data_type,\n tool_name=tool_name,\n )\n if result['success']:\n self._info(\"RawData added for %s\" % t[5])\n res = obj.add_relationship(\n rel_item=result['object'],\n rel_type=RelationshipTypes.CONTAINED_WITHIN,\n rel_confidence=\"high\",\n analyst=self.current_task.username\n )\n if res['success']:\n obj.save(username=self.current_task.username)\n result['object'].save(username=self.current_task.username)\n url = reverse('crits.core.views.details',\n args=('RawData',\n result['_id']))\n url = '<a href=\"%s\">View Raw Data</a>' % url\n d['RawData'] = url\n self._info(\"Relationship added for %s\" % t[5])\n else:\n self._info(\"Error adding relationship: %s\" % res['message'])\n else:\n self._info(\n \"RawData addition failed for %s:%s\" % (t[5],\n result['message'])\n )\n self._add_result(\"Info\", t[5], d)\n except Exception, e:\n self._info(\"Error: %s\" % str(e))", "def analyze_run():\n file_datas_dict = load_datas(Args.data_files)\n plotables_dict = dict()\n for file_name, datas in file_datas_dict.viewitems():\n analized_datas = analyze_datas(datas,Args.analysis_attributes)\n plotables = ana_results_to_plotables(\n analized_datas,\n Args.analysis_attributes\n )\n if Args.dm_file_out:\n analysis_save_dm(\n analized_datas,\n plotables,\n Args.analysis_attributes,\n Args.dm_file_out\n )\n if Args.mat_file_out:\n analysis_save(\n plotables,\n Args.analysis_attributes,\n Args.mat_file_out\n )\n if Args.verbose:\n plotables_dict[file_name] = plotables\n if Args.verbose:\n ana_plot_figures(plotables_dict,Args.analysis_attributes)", "def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True,\n check_parfile=True, check_randfile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n self.config.start_file_logging()\n self.config.logger.info(\"Running zmask on pixel %d\" % (self.pixel))\n\n rand_zmask = RunRandomsZmask(self.config)\n\n if not os.path.isfile(rand_zmask.filename):\n rand_zmask.run()\n rand_zmask.output(savemembers=False, withversion=False)\n\n # All done\n self.config.stop_file_logging()", "def __call__(self, images, targets):\n pass", "def bam_extract_main(args: argparse.Namespace) -> int:\n return bam_extract_run(BamExtractConfig.from_namespace(args))", "def catalog_image(imagedir, package_filtering_enabled=True):\n all_results = run_syft(imagedir)\n return convert_syft_to_engine(all_results, package_filtering_enabled)", "def main():\n\n inputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_raw'\n outputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_faces'\n\n # detects all faces from all images in inputDirectory and outputs\n # to outputDirectory\n FaceDetection.extractFaces(\n inputDirectory=inputDirectory, outputDirectory=outputDirectory)", "def Extract_zip_file (path_to_zip,dir_to_save_into):\n with zipfile.ZipFile(path_to_zip) as zf:\n \n for member in tqdm(zf.namelist(), desc='Extracting'):\n try:\n if ('annotations' in member) and (member.endswith('.json')): \n zf.extract(member, dir_to_save_into)\n shutil.move(os.path.join(dir_to_save_into,member),dir_to_save_into)\n if ('train' in member):\n zf.extract(member, dir_to_save_into)\n if ('test' in member):\n zf.extract(member, dir_to_save_into)\n if ('val' in member):\n zf.extract(member, dir_to_save_into)\n except zipfile.error as e:\n pass\n\n #delete zip\n os.remove(path_to_zip)\n if(os.path.isdir(os.path.join(dir_to_save_into,'annotations'))):\n # remove the tmp annotations directory\n shutil.rmtree(os.path.join(dir_to_save_into,'annotations'))", "def run_packaging(self):\n if self.check_dest_root():\n self.view.show_message('Collecting source files information......')\n collect_thread = CollectThread(self.view.folder_line.text())\n collect_thread.finish.connect(partial(self.collect_finish,\n collect_thread))\n self.view.thread_pool.append(collect_thread)\n collect_thread.start()", "def main():\n\n dir_path =r'/Users/dustin/CS/projects/ship_detector/data/ships-in-satellite-imagery/shipsnet/'\n\n data_array, label_array = read_images(dir_path)\n\n array_info(data_array, label_array)\n\n image_info(data_array[0,:], plot_image=False)\n\n split_ratios = [0.8, 0.1, 0.1] #splitting the dataset into 80% train, 10% dev, 10% test\n\n X_train, X_dev, X_test, Y_train, Y_dev, Y_test = dataset_split(data_array, label_array, split_ratios)", "def main():\n parser = argparse.ArgumentParser(description=\"Extract images from a ROS bag.\")\n parser.add_argument(\"bag_file\", help=\"Input ROS bag.\")\n parser.add_argument(\"base_dir\", nargs='?', default=\"./dataset\", help=\"Output directory.\")\n args = parser.parse_args()\n bag = rosbag.Bag(args.bag_file, \"r\")\n\n print \"Extract topics from %s into %s\" %(args.bag_file, args.base_dir)\n if not os.path.exists(os.path.join(args.base_dir,\"image_dvs/\")):\n os.makedirs(os.path.join(args.base_dir,\"image_dvs\"), mode=0o777)\n if not os.path.exists(os.path.join(args.base_dir,\"image_rgbd/\")):\n os.makedirs(os.path.join(args.base_dir,\"image_rgbd\"), mode=0o777)\n if not os.path.exists(os.path.join(args.base_dir,\"depth/\")):\n os.makedirs(os.path.join(args.base_dir,\"depth\"), mode=0o777)\n\t\n text_image_dvs = open(os.path.join(args.base_dir,\"image_dvs.txt\"), 'w')\n text_image_rgbd = open(os.path.join(args.base_dir,\"image_rgbd.txt\"), 'w')\n text_depth = open(os.path.join(args.base_dir,\"depth.txt\"), 'w')\n text_events = open(os.path.join(args.base_dir,\"events.txt\"), 'w')\n text_imu = open(os.path.join(args.base_dir,\"imu.txt\"), 'w')\n text_gt_pose = open(os.path.join(args.base_dir,\"pose.txt\"), 'w')\n\n text_image_dvs.write(\"# DVS images\\n\")\n text_image_dvs.write(\"# timestamp filename\\n\")\n text_image_rgbd.write(\"# RGBD images\\n\")\n text_image_rgbd.write(\"# timestamp filename\\n\")\n text_depth.write(\"# RGBD depth\\n\")\n text_depth.write(\"# timestamp filename\\n\")\n text_events.write(\"# events\\n\")\n text_events.write(\"# timestamp x y polarity\\n\")\n text_imu.write(\"# imu\\n\")\n text_imu.write(\"# acceleration gyroscope\\n\")\n text_imu.write(\"# timestamp ax ay az gx gy gz\\n\")\n text_gt_pose.write(\"# timestamp x y z qx qy qz qw\\n\")\n \n\n for topic, msg, t in bag.read_messages(topics=[\"/camera/rgb/image_color\", \"/camera/depth_registered/image\", \"/dvs/events\", \"/dvs/image_raw\", \"/dvs/imu\", \"/vicon/\"]):\n\n if topic == \"/dvs/image_raw\":\n save_image(msg, t, args.base_dir, \"image_dvs\", text_image_dvs)\n elif topic == \"/camera/rgb/image_color\":\n save_image(msg, t, args.base_dir, \"image_rgbd\", text_image_rgbd)\n elif topic == \"/camera/depth_registered/image\":\n save_depth(msg, t, args.base_dir, \"depth\", text_depth)\n elif topic == \"/dvs/events\":\n save_event(msg, text_events)\n elif topic == \"/dvs/imu\":\n save_imu(msg, t, text_imu)\n elif topic == \"/vicon/\":\n save_pose(msg, t, text_gt_pose)\n\n print \"\\rTime passed: %i.%09i [s]\" %(t.secs, t.nsecs),\n\n text_image_dvs.close()\n text_image_rgbd.close()\n text_depth.close()\n text_events.close()\n text_imu.close()\n text_gt_pose.close()\n bag.close()\n\n return", "def main():\n print(\"Program version: 1.5\")\n StartTime = datetime.now()\n args = parseArguments()\n\n verbose = args.verbose\n images = args.images\n ignore_warnings = args.ignore_warnings\n if(args.silent):\n verbose = False\n images = False\n ignore_warnings = True\n\n if(args.images):\n plt.ioff()\n\n if(args.ignore_warnings):\n warnings.simplefilter('ignore', UserWarning)\n\n #sample header keywords\n # OBJECT = 'P016+03_P1_JKdeep' / Original target\n # RA = ' 01:06:37.759' / 01:06:37.7 RA (J2000) pointing\n # DEC = ' 03:32:36.096' / 03:32:36.0 DEC (J2000) pointing\n # EQUINOX = 2000. / Standard FK5 (years)\n # RADECSYS= 'FK5 ' / Coordinate reference frame\n # CRVAL1 = 16.65733 / 01:06:37.7, RA at ref pixel\n # CRVAL2 = 3.54336 / 03:32:36.0, DEC at ref pixel\n # CRPIX1 = 447. /Ref pixel in X\n # CRPIX2 = 452. / Ref pixel in Y\n # CDELT1 = -8.0000000000000E-5 / SS arcsec per pixel in RA\n # CDELT2 = 8.00000000000003E-5 / SS arcsec per pixel in DEC\n # CTYPE1 = 'RA---TAN' / pixel coordinate system\n # CTYPE2 = 'DEC--TAN' / pixel coordinate system\n # PC1_1 = 0.000000 / Translation matrix element\n # PC1_2 = 1.000000 / Translation matrix element\n # PC2_1 = -1.000000 / Translation matrix element\n # PC2_2 = 0.000000 / Translation matrix element\n\n fits_image_filenames = args.input\n\n #if directory given search for appropriate fits files\n\n if(os.path.isdir(fits_image_filenames[0])):\n print(\"detected a directory. Will search for fits files in it\")\n path = fits_image_filenames[0]\n fits_image_filenames = []\n for file in os.listdir(path):\n if file.endswith(\".fits\") and \"_astro\" not in file:\n fits_image_filenames.append(path+\"/\"+file)\n print(fits_image_filenames)\n\n multiple = False\n if(len(fits_image_filenames)>1):\n multiple = True\n not_converged = []\n converged_counter = 0\n for fits_image_filename in fits_image_filenames:\n\n result,_ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=0, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent =args.silent, sigma_threshold_for_source_detection= args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if((not result) and args.rotation_scaling):\n print(\"Did not converge. Will try again with full rotation and scaling\")\n result, _ = astrometry_script(fits_image_filename, catalog=args.catalog, rotation_scaling=args.rotation_scaling, xy_transformation=args.xy_transformation, fine_transformation=args.fine_transformation,\n images=images, vignette=args.vignette,vignette_rectangular=args.vignette_rectangular, cutouts=args.cutout, ra=args.ra, dec=args.dec, projection_ra=args.projection_ra, projection_dec=args.projection_dec, verbose=verbose, save_images=args.save_images, ignore_header_rot=args.ignore_header_rot, radius = args.radius, save_bad_result=args.save_bad_result, silent=args.silent, sigma_threshold_for_source_detection=args.sigma_threshold_for_source_detection, high_res=args.high_resolution, hdul_idx=args.hdul_idx, filename_for_sources=args.filename_for_sources, FWHM=args.seeing)\n\n if(result):\n print(\"Astrometry was determined to be good.\")\n converged_counter = converged_counter+1\n else:\n print(\"Astrometry was determined to be bad.\")\n not_converged.append(fits_image_filename)\n if(args.save_bad_result):\n print(\"Result was saved anyway\")\n else:\n print(\"Result was not saved.\")\n # print(\"\")\n # print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n # print(\"> Astrometry for {} \".format(fits_image_filename))\n #\n # with fits.open(fits_image_filename) as hdul:\n # #print(hdul.info())\n # if(args.verbose):\n # print(\"if image is not at first position in the fits file the program will break later on\")\n # #print(hdul[0].header)\n #\n # hdu = hdul[0]\n # #hdu.verify('fix')\n # hdr = hdu.header\n #\n #\n # image_or = hdul[0].data.astype(float)\n # median = np.nanmedian(image_or)\n # image_or[np.isnan(image_or)]=median\n # image = image_or - median\n #\n # observation = find_sources(image, args.vignette)\n # #print(observation)\n #\n # positions = (observation['xcenter'], observation['ycenter'])\n # apertures = CircularAperture(positions, r=4.)\n #\n #\n # #world coordinates\n # print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n # print(WCS(hdr))\n #\n # hdr[\"NAXIS1\"] = image.shape[0]\n # hdr[\"NAXIS2\"] = image.shape[1]\n #\n # #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n # wcsprm = WCS(hdr).wcs\n # wcsprm_original = WCS(hdr).wcs\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n # wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, args.ra, args.dec, args.projection_ra, args.projection_dec)\n # if(args.verbose):\n # print(WCS(wcsprm.to_header()))\n #\n # #print(wcsprm)\n # #wcsprm.pc = [[2, 0],[0,1]]\n #\n #\n # #Possibly usefull examples of how to use wcsprm:\n # #print(wcsprm.set())\n # #print(wcsprm.get_pc())\n # #pc = wcsprm.get_pc()\n # #print(np.linalg.det(pc))\n # #print(wcsprm.get_cdelt())\n # #wcs.fix()\n # #print(wcsprm.print_contents())\n # #print(repr(hdr.update(wcsprm.to_header().encode('utf-8')))) #not working\n #\n # #hdu.verify(\"fix\")\n # #print(repr(hdr))\n # #wcs.wcs_pix2world(pixcrd, 1)\n # #wcs.wcs_world2pix(world, 1)\n # #wcs.wcs.crpix = [-234.75, 8.3393]\n # # wcs.wcs.cdelt = np.array([-0.066667, 0.066667])\n # # wcs.wcs.crval = [0, -90]\n # # wcs.wcs.ctype = [\"RA---AIR\", \"DEC--AIR\"]\n # # wcs.wcs.set_pv([(2, 1, 45.0)])\n # # For historical compatibility, three alternate specifications of the linear transformations\n # # are available in wcslib. The canonical PCi_ja with CDELTia, CDi_ja, and the deprecated CROTAia\n # # keywords. Although the latter may not formally co-exist with PCi_ja,\n # # the approach here is simply to ignore them if given in conjunction with PCi_ja.\n # # has_pc, has_cd and has_crota can be used to determine which of these alternatives are present in the header.\n # # These alternate specifications of the linear transformation matrix are translated immediately to PCi_ja by set\n # # and are nowhere visible to the lower-level routines. In particular, set resets cdelt to unity if CDi_ja is present\n # # (and no PCi_ja). If no CROTAia is associated with the latitude axis, set reverts to a unity PCi_ja matrix.\n #\n #\n #\n #\n #\n # #get rough coordinates\n # #print(hdr[\"RA\"])\n # #coord = SkyCoord(hdr[\"RA\"], hdr[\"DEC\"], unit=(u.hourangle, u.deg), frame=\"icrs\")\n # coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # if(not PIXSCALE_UNCLEAR):\n # if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n # print(\"central value outside of the image, moving it to the center\")\n # coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n # coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n # #print(wcsprm)\n #\n #\n #\n # #better: put in nice wrapper! with repeated tries and maybe try synchron!\n # print(\">Dowloading catalog data\")\n # radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n # catalog_data = query.get_data(coord, radius, args.catalog)\n # #reference = reference.query(\"mag <20\")\n # max_sources = 500\n # if(INCREASE_FOV_FLAG):\n # max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n # if(catalog_data.shape[0]>max_sources):\n # catalog_data = catalog_data.nsmallest(400, \"mag\")\n #\n # if(args.catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n # print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n # catalog_data2 = query.get_data(coord, radius, \"PS\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n # elif(args.catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n # print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n # catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n # catalog_data = pd.concat([catalog_data, catalog_data2])\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n #\n # #remove duplicates in catalog?\n #\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n #\n # #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Input for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n #\n # plt.xlim(-200,image.shape[0]+200)\n # plt.ylim(-200,image.shape[1]+200)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_before.pdf\")\n #\n # ###tranforming to match the sources\n # print(\"---------------------------------\")\n # print(\">Finding the transformation\")\n # if(args.rotation_scaling):\n # print(\"Finding scaling and rotation\")\n # wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=args.verbose)\n # if(args.xy_transformation):\n # print(\"Finding offset\")\n # wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= args.verbose)\n #\n # #correct subpixel error\n # obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=3)\n # rms = np.sqrt(np.mean(np.square(distances)))\n # best_score = len(obs_x)/(rms+10) #start with current best score\n # fine_transformation = False\n # if(args.fine_transformation):\n # for i in [2,3,5,8,10,6,4, 20,2,1,0.5]:\n # wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i)\n # if(score> best_score):\n # wcsprm = wcsprm_new\n # best_score = score\n # fine_transformation = True\n # if not fine_transformation:\n # print(\"Fine transformation did not improve result so will be discarded.\")\n # else:\n # print(\"Fine transformation applied to improve result\")\n # #register.calculate_rms(observation, catalog_data,wcs)\n #\n # #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n # wcs =WCS(wcsprm.to_header())\n # if(args.verbose):\n # print(wcs)\n #\n # from astropy.wcs import utils\n # scales = utils.proj_plane_pixel_scales(wcs)\n # print(scales)\n # cdelt = wcsprm.get_cdelt()\n # print(cdelt)\n # scale_ratio = scales/cdelt\n # #print(scale_ratio)\n # pc = np.array(wcsprm.get_pc())\n # pc[0,0] = pc[0,0]/scale_ratio[0]\n # pc[1,0] = pc[1,0]/scale_ratio[1]\n # pc[0,1] = pc[0,1]/scale_ratio[0]\n # pc[1,1] = pc[1,1]/scale_ratio[1]\n # wcsprm.pc = pc\n # wcsprm.cdelt = scales\n # if(args.verbose):\n # print(\"moved scaling info to CDelt\")\n # print(WCS(wcsprm.to_header()))\n #\n # #WCS difference before and after\n # print(\"> Compared to the input the Wcs was changed by: \")\n # scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n # print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n # #sources:\n # #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n # #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n # def unit_vector(vector):\n # \"\"\" Returns the unit vector of the vector. \"\"\"\n # return vector / max(np.linalg.norm(vector), 1e-10)\n # def matrix_angle( B, A ):\n # \"\"\" comment cos between vectors or matrices \"\"\"\n # Aflat = A.reshape(-1)\n # Aflat = unit_vector(Aflat)\n # Bflat = B.reshape(-1)\n # Bflat = unit_vector(Bflat)\n # #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n # return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n # #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n # rotation_angle = matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360\n # if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n # text = \"counterclockwise\"\n # else:\n # text = \"clockwise\"\n # print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n # old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n # print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n #\n #\n # #check final figure\n # if(args.images):\n # fig = plt.figure()\n # fig.canvas.set_window_title('Result for {}'.format(fits_image_filename))\n # plt.xlabel(\"pixel x direction\")\n # plt.ylabel(\"pixel y direction\")\n # plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n # plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n # apertures.plot(color='blue', lw=1.5, alpha=0.5)\n # #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n # apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #\n # apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n # if(args.save_images):\n # name_parts = fits_image_filename.rsplit('.', 1)\n # plt.savefig(name_parts[0]+\"_image_after.pdf\")\n #\n # print(\"--- Evaluate how good the transformation is ----\")\n # register.calculate_rms(observation, catalog_data,wcsprm)\n #\n #\n # #updating file\n # write_wcs_to_hdr(fits_image_filename, wcsprm)\n #\n #\n # print(\"overall time taken\")\n # print(datetime.now()-StartTime)\n # if(args.images):\n # plt.show()\n if(multiple):\n print(\">> Final report:\")\n print(\"Processed {} files, {} of them did converge. The following files failed:\".format(len(fits_image_filenames), converged_counter))\n print(not_converged)\n print(\"-- finished --\")", "def _unpack_archive(self, dir, filters):\n ext = os.path.splitext(self.path)[1]\n if ext in [\".zip\", \".xpi\"]:\n if filters:\n raise GbpError(\"Can only filter tar archives: %s\", (ext, self.path))\n self._unpack_zip(dir)\n else:\n self._unpack_tar(dir, filters)", "def __extract_images(self, images_file, labels_file, phase):\n images, max_size = self.__readImages(\n os.path.join(self.outdir, images_file))\n assert len(labels) == len(images), '%d != %d' % (\n len(labels), len(images))\n\n map_size = len(images) * 28 * 28 * 10\n env = lmdb.open(self.outdir, map_size=map_size)\n\n with env.begin(write=True) as txn:\n # txn is a Transaction object\n for i, image in enumerate(images):\n datum = annfab.utils.image_to_datum(image, labels[i])\n str_id = '{:08}'.format(i)\n\n # The encode is only essential in Python 3\n txn.put(str_id.encode('ascii'), datum.SerializeToString())", "def mri_dixon_analysis(data_objects, working_dir, settings):\n\n logger.info(\"Running Dixon analysis Calculation\")\n logger.info(\"Using settings: %s\", settings)\n\n output_objects = []\n\n fat_obj = None\n water_obj = None\n for data_obj in data_objects:\n\n if data_obj.meta_data[\"image_type\"] == \"fat\":\n fat_obj = data_obj\n\n if data_obj.meta_data[\"image_type\"] == \"water\":\n water_obj = data_obj\n\n if fat_obj is None or water_obj is None:\n logger.error(\"Both Fat and Water Images are required\")\n return []\n\n # Read the image series\n fat_load_path = fat_obj.path\n if fat_obj.type == \"DICOM\":\n fat_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(fat_obj.path)\n fat_img = sitk.ReadImage(fat_load_path)\n\n water_load_path = water_obj.path\n if water_obj.type == \"DICOM\":\n water_load_path = sitk.ImageSeriesReader().GetGDCMSeriesFileNames(water_obj.path)\n water_img = sitk.ReadImage(water_load_path)\n\n # Cast to float for calculation\n fat_img = sitk.Cast(fat_img, sitk.sitkFloat32)\n water_img = sitk.Cast(water_img, sitk.sitkFloat32)\n\n # Let's do the calcuation using NumPy\n fat_arr = sitk.GetArrayFromImage(fat_img)\n water_arr = sitk.GetArrayFromImage(water_img)\n\n # Do the calculation\n divisor = water_arr + fat_arr\n fat_fraction_arr = (fat_arr * 100) / divisor\n fat_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n water_fraction_arr = (water_arr * 100) / divisor\n water_fraction_arr[divisor == 0] = 0 # Sets those voxels which were divided by zero to 0\n\n fat_fraction_img = sitk.GetImageFromArray(fat_fraction_arr)\n water_fraction_img = sitk.GetImageFromArray(water_fraction_arr)\n\n fat_fraction_img.CopyInformation(fat_img)\n water_fraction_img.CopyInformation(water_img)\n\n # Create the output Data Objects and add it to output_ob\n fat_fraction_file = os.path.join(working_dir, \"fat.nii.gz\")\n sitk.WriteImage(fat_fraction_img, fat_fraction_file)\n water_fraction_file = os.path.join(working_dir, \"water.nii.gz\")\n sitk.WriteImage(water_fraction_img, water_fraction_file)\n\n fat_data_object = DataObject(type=\"FILE\", path=fat_fraction_file, parent=fat_obj)\n output_objects.append(fat_data_object)\n\n water_data_object = DataObject(type=\"FILE\", path=water_fraction_file, parent=water_obj)\n output_objects.append(water_data_object)\n\n return output_objects" ]
[ "0.64381856", "0.63635296", "0.6358149", "0.6324381", "0.62869585", "0.6232183", "0.6155203", "0.6140258", "0.6044015", "0.6022487", "0.5966659", "0.5904075", "0.5880147", "0.58736247", "0.5790913", "0.5768327", "0.57619715", "0.5741277", "0.57412493", "0.5739594", "0.5689725", "0.56729126", "0.56600904", "0.5644759", "0.5640531", "0.5608747", "0.56026876", "0.55996144", "0.5592414", "0.5592389", "0.55861074", "0.5578356", "0.55687636", "0.5564896", "0.55485654", "0.55392885", "0.5520978", "0.5508276", "0.5505437", "0.54990315", "0.5471352", "0.54636717", "0.54625064", "0.5450366", "0.54461706", "0.5442952", "0.54397285", "0.54239684", "0.5422873", "0.5422536", "0.5416287", "0.5405465", "0.5401195", "0.5388142", "0.5384241", "0.53826326", "0.53731984", "0.5370947", "0.53633416", "0.53618807", "0.5360411", "0.5353784", "0.534766", "0.5346103", "0.53395367", "0.5337996", "0.533463", "0.533237", "0.5317334", "0.5316934", "0.5316518", "0.53143936", "0.53130776", "0.53116524", "0.5307702", "0.5294804", "0.5292406", "0.5288684", "0.52861136", "0.5275935", "0.52756625", "0.5273787", "0.52702564", "0.5268652", "0.5266614", "0.5253274", "0.52407813", "0.52380484", "0.5228869", "0.52287847", "0.52286935", "0.5225656", "0.52235806", "0.52227074", "0.5221515", "0.52200925", "0.52193165", "0.52187634", "0.521628", "0.5207633" ]
0.693419
0
Check if message contains Error or Warning for run_analysis functions.
def check_msg(msg): err = bool(0) if 'Error' in msg: err = bool(1) elif "Warning" in msg: msg = 'Success! Warning: Image already exists. ' \ 'Processing ran on existing image' else: msg = 'Image saved successfully' return err, msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_check(self, message):\n matches = ERROR_SYNTAX.match(message)\n if matches:\n error_code = int(matches.group(1))\n error_message = matches.group(2)\n return error_code, error_message\n return None", "def collect_errors_and_warnings(self) -> str:\n # Complete error message\n message = \"----------------ERRORS----------------\\n\"\n if self.errors == \"\":\n message = \"YOUR FILE IS VALIDATED!\\n\"\n logger.info(message)\n else:\n for error in self.errors.split(\"\\n\"):\n if error != \"\":\n logger.error(error)\n message += self.errors\n if self.warnings != \"\":\n for warning in self.warnings.split(\"\\n\"):\n if warning != \"\":\n logger.warning(warning)\n message += \"-------------WARNINGS-------------\\n\" + self.warnings\n return message", "def check_warnings(code_type, args):\n if code_type.startswith(FRAGMENT) and args.file is not None and args.code is False:\n writer(\n \"Warning: Analysing a deferred fragment in -F/--file \"\n + \"mode will most likely cause errors\", FORMAT['WARNING']\n )\n if code_type.startswith(PERMUTATION) and args.file is not None and args.code is False:\n writer(\n \"Warning: Individual permutation files in -F/--file \"\n + \"mode do not include deferred fragments\", FORMAT['WARNING']\n )", "def has_errors(self) -> bool:", "def error_check(command):\r\n\r\n # TODO\r", "def check_errors(self) -> None:", "def has_warnings(self) -> bool:", "def check_message(self, msg):\n pass", "def _has_error(self):\n # Public interface is given by get_status instead.\n # TODO: Cache result if reached execution with no error\n try:\n # If there is something in the error file:\n if path.getsize(path.join(self.run_dir, \"err.txt\")) > 0:\n return True\n except FileNotFoundError:\n pass\n try:\n with open(path.join(self.run_dir, \"out.txt\"), \"r\") as f:\n text = f.read()\n # TODO: Depending on the file size, the following might be better. Investigate this.\n # text = \"\".join(head(path.join(self.run_dir, \"out.txt\"), 300)+tail(path.join(self.run_dir, \"out.txt\"), 10))\n\n # TODO: The commented option is slower (even if compiled) than this one. Investigate.\n if \"(*error*)\" in text or re.search(\"Error reading .* parameters\", text) or re.search(\n \"MPI_ABORT was invoked\", text):\n # if re.search(\"\\(\\*error\\*\\)|Error reading .* parameters|MPI_ABORT was invoked\",text):\n return True\n else:\n return False\n except FileNotFoundError:\n return False", "def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False", "def has_warning(self):\n \n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n else:\n fraction = 0.0\n \n if self['skipped_subchannel'] > 0:\n return True\n elif fraction > 1.0e-4:\n return True\n else:\n return False", "def check_errors(self, data):\n for entry in data:\n if entry.find('ERROR') != -1:\n return entry\n return False", "def error_mess():\n print(\"Sorry, I didn't understand that.\")", "def _check_rst_syntax_error(self):\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(\n os.path.join(self.module_path, rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n r'No directive entry for \"([\\w|\\-]+)\"|'\n r'Unknown directive type \"([\\w|\\-]+)\"|'\n r'No role entry for \"([\\w|\\-]+)\"|'\n r'Unknown interpreted text role \"([\\w|\\-]+)\"', msg)\n # TODO: Add support for sphinx directives after fix\n # https://github.com/twolfson/restructuredtext-lint/issues/29\n if res:\n # Skip directive errors\n continue\n self.msg_args.append((\n \"%s:%d\" % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True", "def run():\n parser = create_arguments_parser()\n arguments = parser.parse_args()\n\n num_warnings = 0\n for filename in arguments.filenames:\n num_warnings += analyse_file(filename, arguments)\n \n if num_warnings == 0:\n print (\"No warnings\")\n elif num_warnings == 1:\n print (\"There was a single warning\")\n else:\n print (\"There were \" + str(num_warnings))", "def check_analysis(self, analysis, ecosystem, package, version):\n try:\n assert analysis is not None, \"Analysis not available\"\n assert \"result\" in analysis, \"Can not find the 'result' node.\"\n result = analysis[\"result\"]\n self.check_recommendation_part(result)\n self.check_data_part(result, ecosystem, package, version)\n return \"OK\"\n except Exception as e:\n return \"Failed: \" + str(e)", "def _check_xml_syntax_error(self):\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True", "def error(cls, message, *args, **kwargs):\n warnings.warn(\n cls.marker_theme.error() + cls.time() + cls.parse(message), *args, **kwargs\n )", "def has_errors_fatal(self) -> bool:", "def is_valid_analysis(info, logger=default_logger):\n\n if(info[\"center_name\"] not in [\"BCM\", \"BCCAGSC\", \"BI\", \"HMS-RK\", \"UNC-LCCC\", \"WUGSC\", \"USC-JHU\"]):\n logger.error(\"The center %s is not in the defined center vocabulary\" %(info.get(\"CN\", \"None\")))\n return False\n if(info[\"platform\"] not in [\"CAPILLARY\", \"LS454\", \"ILLUMINA\", \"SOLID\", \"HELICOS\", \"IONTORRENT\", \"PACBIO\"]):\n logger.error(\"The platform %s is not in the defined platform vocabulary\" %(info.get(\"PL\", \"None\")))\n return False\n if(info[\"platform_model\"] not in [\"Illumina Genome Analyzer II\", \"Illumina HiSeq\", \"Illumina HiSeq 2000\", \"Illumina HiSeq 2500\"]):\n logger.error(\"The platform unit %s is not in the defined platform vocabulary\" %(info.get(\"PU\", \"None\")))\n return False\n return True", "def isWarning(self):\n return _libsbml.XMLError_isWarning(self)", "def check_error_output(self, output):\n\n # Display info message\n log.info(\"check_error_output\")\n\n # Check if output has some data\n if output:\n\n # Yes\n\n # Display info message\n log.info(\"check_error_output: output has some data\")\n\n # Check all elements in the list of output\n for element in self._send_command_error_in_returned_output:\n\n # Display info message\n log.info(f\"check_error_output: element: {element}\")\n\n # Display info message\n log.info(f\"check_error_output: output[0]: {output[0]}\")\n\n # Check if the output starts with a string with an error message (like \"% Invalid input detected at '^' marker.\")\n\n # Error message?\n if output.startswith(element):\n\n # Yes\n\n # Raise an exception\n raise Exception(output)", "def has_warnings_active(self) -> bool:", "def has_failures_or_errors(self):\r\n return (self._num_failures() > 0) or (self._num_script_errors() > 0)", "def _check_input_data(self):\n\n n0, n1, corr, pval = np.nan, np.nan, np.nan, np.nan\n\n error_code_test = 0\n error_text_test = 'No error occurred'\n try:\n error_code_test, error_msg = self._check_notnull()\n if error_code_test == 0:\n error_code_test, error_msg, n0, n1 = self._check_group_obs(self.test_min_data)\n if error_code_test == 0:\n error_code_test, error_msg, corr, pval = \\\n self._check_spearman_corr(self.min_corr, self.max_p)\n if error_code_test != 0:\n error_text_test = str(error_msg)\n except:\n error_code_test = 9\n error_text_test = 'Unknown Error'\n\n self.checkstats = {'n0': n0, 'n1': n1, 'frame_spearmanR': corr, 'frame_corrPval': pval}\n\n self.error_code_test = error_code_test\n self.error_text_test = error_text_test\n\n return self.error_code_test, self.error_text_test", "def check_and_warn(sample_sheet=None,sample_sheet_file=None):\n # Acquire sample sheet linter instance\n linter = SampleSheetLinter(sample_sheet=sample_sheet,\n sample_sheet_file=sample_sheet_file)\n # Do checks\n warnings = False\n if linter.close_project_names():\n logger.warning(\"Some projects have similar names: check for typos\")\n warnings = True\n if linter.samples_with_multiple_barcodes():\n logger.warning(\"Some samples have more than one barcode assigned\")\n warnings = True\n if linter.samples_in_multiple_projects():\n logger.warning(\"Some samples appear in more than one project\")\n warnings = True\n if linter.has_invalid_characters():\n logger.warning(\"Sample sheet file contains invalid characters \"\n \"(non-printing ASCII or non-ASCII)\")\n warnings = True\n if linter.has_invalid_barcodes():\n logger.warning(\"Some samples have invalid barcodes\")\n warnings = True\n if linter.has_invalid_lines():\n logger.warning(\"Sample sheet has one or more invalid lines\")\n warnings = True\n return warnings", "def has_warning(miscobj):\n\n typewarning = (misctype_byname(miscobj.filetype).warning is not None)\n langwarning = (miscobj.language in ('Python 2', 'Python 3', 'Python'))\n return (typewarning or langwarning)", "def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def error_occured(self) -> None:\r\n \r\n warnings.warn(\r\n '''An Error has occured when processing this photo!\r\n The plants are too emerged in some places to analyze.''',\r\n RuntimeWarning)", "def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0", "def hasErrors(self):\n return False", "def is_error(response: str) -> bool:\n return \"ERROR\" in response", "def _validate(self, df: pd.DataFrame, **kwargs) -> tuple:\n errors = \"\"\n warnings = \"\"\n logger.info(\"NO VALIDATION for %s files\" % self._fileType)\n return errors, warnings", "def process_msg(self, msg, event_string, line_id):\n if msg is None:\n if line_id == -1:\n # all events are known and we don't want to report anything else\n logger.info(self._get_inner_time() + ' end of simulation')\n else:\n # failed to answer in time limit\n logger.error(self._get_inner_time() + ' ! %i: no answer', line_id)\n return True\n if msg.strip().lower() == 'ok':\n if line_id == -1:\n # all events are known and we don't want to report anything else\n logger.info(self._get_inner_time() + ' end of simulation')\n return True\n else:\n logger.info(self._get_inner_time() + ' < ok')\n return True\n else:\n self._anomaly_check(msg)\n return False", "def check_message(self, message):\n for word in self.bad_words:\n if word in message:\n return -1\n for word in self.filter_words:\n if word in message:\n return 0\n return 1", "def _check_error(self):\n\n if self.error_code_test != 0:\n return False\n else:\n return True", "def warning_check(self, rule_to_check, valid):\r\n for warning in self.warning_functions:\r\n warning(rule_to_check, valid)", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def run(input_string):\n funclist =[query_is_empty,\n parentheses_are_uneven,\n operators_with_no_words_in_between,\n operator_following_opening_parenthesis_or_before_closing_parenthesis,\n quotation_marks_are_uneven,\n operators_within_exact_phrase,\n distance_must_be_between_1_and_999]\n errorcount = 0\n errorlist = []\n for func in funclist:\n if func(input_string) is False:\n errorcount += 1\n errorlist.append(\"Error: {}\".format(func.__name__))\n if errorcount != 0:\n return \"{} Errors found.\".format(errorcount), errorlist\n else:\n return True, []", "def show_error_message(self, error_name):\n return error_name in self.errors", "def check_message(self, message: str) -> bool:\n processed_message = message.lower()\n\n for _, module in self.modules.get_modules():\n if not module.is_loaded:\n continue\n\n for _, reg_list in module.module_settings.templates.items():\n find_match = any(\n [re.findall(reg, processed_message) for reg in reg_list]\n )\n if find_match:\n return True\n return False", "def isInfo(self):\n return _libsbml.XMLError_isInfo(self)", "def HasErrors(self):\n for name in self._GetStreamNames():\n if name.startswith('error_data.'):\n return True\n\n return False", "def check_no_silent_crash(self, override=False):\n if self.results:\n score = self.results.linter.stats.get('global_note', False)\n if score is False:\n messages = self.results.linter.stats.get('by_msg', {})\n if messages.get('syntax-error', False) and not override:\n self.logging.warning('\\n------------------------------------------------------------------')\n self.logging.warning('PYLINT FAILED BECAUSE SYNTAX ERROR.')\n self.logging.warning('------------------------------------------------------------------')\n self.logging.warning('\\n')\n self.failed_files.append(self.fname)\n return False\n self.logging.info('\\n------------------------------------------------------------------')\n self.logging.info('FILE WAS IGNORED.')\n self.logging.info('------------------------------------------------------------------')\n return True\n return False", "def check_and_print_if_error(self): # pragma: no cover\n dupes, empties, not_detected = self._get_aberrations()\n if dupes:\n print 'duplicate entries for:'\n for dup, matches in dupes:\n print ' %s: %s' % (dup, [f.func_name for f in matches])\n if empties:\n print 'empty entries for:'\n for empty in empties:\n print ' ' + str(empty)\n if not_detected:\n print 'dimensions not detected:'\n for n_d in not_detected:\n print ' ' + str(n_d)\n return self.is_correct", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def check( value, message, arguments = DEFAULT_ARGUMENTS):\n if value == None:\n err_msg = \"Error processing %s libSBML returned a null value trying to %s\" % (get_path( arguments), message)\n raise SystemExit( err_msg)\n elif type( value) is int:\n if value == libsbml.LIBSBML_OPERATION_SUCCESS:\n return\n else:\n err_msg = 'Error processing {0} trying to {1} libSBML returned error code {2} : \"{3}\"'.format( get_path( arguments), message, value, libsbml.OperationReturnValue_toString(value).strip())\n raise SystemExit(err_msg)\n else:\n return", "def is_failed(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-failed').succeeded", "def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def check_output_contains(context, text, err_msg):\n res = re.search(text, context.output.decode('utf-8'))\n if res is None:\n print(context.output.decode('utf-8'))\n raise Exception(err_msg)", "def error(self, msg, details = \"\" ):\n\n if details is not None:\n msg += \"\\n\\n\" + details\n\n if not self.is_subprocess:\n self.parser.error(msg)\n else:\n raise Exception(msg)", "def error(self, error_msg):\n print(\"ERROR DETECTED\")\n print(error_msg)", "def check_for_errors(self):\n\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"*** Psi4 exiting successfully.\" in line:\n return {\"success\": True}\n\n elif \"*** Psi4 encountered an error.\" in line:\n return {\"success\": False, \"error\": \"Not known\"}\n\n return {\"success\": False, \"error\": \"Segfault\"}", "def has_error(self, response):\n return response.find(' Matched') == -1 and response.find(' Failed') == -1", "def has_err_warn(self):\r\n return self._arm.has_err_warn", "def __call__(self, read, info: ModificationInfo):\n return expected_errors(read.qualities) > self.max_errors", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def error(msg):\n return ErrorRule(msg)", "def parse_lspci_vv_chk_error(output,raiseOnErrors = \"1\"):\n \n found_devSta = 0\n \n #sys.exit(1)\n if re.search(\"DevSta\",output):\n found_devSta = 1\n \n # remove DevStat after splitting it\n l_a = output.split(\":\")\n l_a1 = l_a[1].split()\n for m in l_a1:\n \n # if ends with +, \n if re.search(\"Err\",m):\n if re.search(\".*\\+$\",m):\n \n print \"-\" * 8\n \n errorStr = \"Found + in lspci output for '%s' , line details '%s'\"%(m,output)\n trace_error(errorStr)\n if raiseOnErrors == \"1\":\n raise ViriError(errorStr)\n\n return 2\n \n if found_devSta == 0:\n raise ViriError(\"Did not find 'devSta' in the output %s\"%output)\n\n trace_info(\"No lspci correctable or uncorrectable issues seem to be present , output '%s'\"%output)\n return 1", "def warnings(self) -> List[Error]:", "def measurement_testing(self, measurements):\n if (isinstance(measurements, list) | isinstance(measurements, tuple)):\n for measurement in measurements:\n if isinstance(measurement, str):\n error_msg = 'Individual measurements cannot be strings'\n return error_msg\n if measurement <= 0:\n error_msg = 'All measurements must be positive: {}'.format(measurements)\n return error_msg\n if not (isinstance(measurement, float) | isinstance(measurement, int)):\n error_msg = 'All measurements must be floats or integers: {}'.format(measurements)\n return error_msg\n else:\n error_msg = 'Measurements must be a list or tuple: {}'.format(measurements)\n return error_msg\n return ''", "def debug_error_message(msg):\r\n\r\n action = config.compute_test_value\r\n\r\n #this message should never be called when the debugger is off\r\n assert action != 'off'\r\n\r\n if action in ['raise', 'ignore']:\r\n raise ValueError(msg)\r\n else:\r\n assert action == 'warn'\r\n warnings.warn(msg, stacklevel=2)", "def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)", "def error(self, message=None, show_help=True):", "def check_if_help_message(message):\n return \"The commands are\" in message", "def alert(self, msg):\n raise DatacheckException(msg)", "def check_parsing_errors(self, lens: str) -> None:\n error_files = self.aug.match(\"/augeas//error\")\n\n for path in error_files:\n # Check to see if it was an error resulting from the use of\n # the httpd lens\n lens_path = self.aug.get(path + \"/lens\")\n # As aug.get may return null\n if lens_path and lens in lens_path:\n msg = (\n \"There has been an error in parsing the file {0} on line {1}: \"\n \"{2}\".format(\n # Strip off /augeas/files and /error\n path[13:len(path) - 6],\n self.aug.get(path + \"/line\"),\n self.aug.get(path + \"/message\")))\n raise errors.PluginError(msg)", "def IdentifySimpleError(langage, errorLog):\n\t\n\tif(langage == \"perl\"):\n\t\terrorLogInArray = errorLog.split(\" \")\n\t\tif(errorLogInArray[0] == \"syntax\" and errorLogInArray[1] == \"error\"):\n\t\t\tif (errorLogInArray[4] == \"line\"):\n\t\t\t\terrorLineInArray = errorLogInArray[5].split(\",\")\n\t\t\t\terrorLine = int(errorLineInArray[0])-1\n\t\t\t\tprint \"ADD \\\";\\\" at the end of line \"+str(errorLine)+\" IF \\\";\\\" is missing\\n\"", "def ReportWarning(text):\n print('warning: %s' % text)", "def check_log():\r\n errors = 0\r\n warnings = 0\r\n log_handle = open(CurrentLogPath.path)\r\n for line in log_handle:\r\n if 'ERROR:' in line:\r\n errors += 1\r\n elif 'WARNING' in line:\r\n warnings += 1\r\n log_handle.close()\r\n if errors or warnings:\r\n print '!'*80\r\n if errors:\r\n print '{0} errors occurred during the run! See log file for more ' \\\r\n 'details!'.format(errors)\r\n if warnings:\r\n print '{0} warnings occurred during the run! See log file for ' \\\r\n 'more details!'.format(warnings)\r\n print 'Run was completed anyway.'\r\n print '\\n\\n'", "def calculateError(self, errorMessage):\n self.no_error = False\n QMessageBox.warning(\n self, \"Found Error\", errorMessage, QMessageBox.Ok, QMessageBox.Ok\n )", "def __CheckVulnerability__(self, content):\n if self.lang == 'PHP':\n\n with open(\"./WebAttack/sqlinjection/payloads/ErrorBaseResult.json\") as jsonFile:\n errorListMysql = json.load(jsonFile)\n for item in errorListMysql[\"mysql\"]:\n if content.find(item) is not -1:\n return 'mysql'\n\n if str(content).decode('utf-8') != str(self.firstResponse.text).decode('utf-8'):\n return 'maybe it is mysql'\n\n elif self.lang == 'ASP.net':\n\n with open(\"./WebAttack/sqlinjection/payloads/ErrorBaseResult.json\") as jsonFile:\n errorListMysql = json.load(jsonFile)\n for item in errorListMysql[\"sqlserver\"]:\n if content.find(item) is not -1:\n return 'MSsql'\n\n if content != self.firstResponse.text:\n return 'maybe it is MSsql'\n\n return 'none'", "def contains(self, *args):\n return _libsbml.SBMLErrorLog_contains(self, *args)", "def _check_message(self, _message_contents):\r\n if not type(_message_contents) is dict:\r\n self.logger.error(\"Message should be a dict.\")\r\n return False\r\n if not \"event\" in _message_contents:\r\n self.logger.error(\"Message dict has no event key.\")\r\n return False\r\n if not \"data\" in _message_contents:\r\n self.logger.error(\"Message dict has no data key.\")\r\n return False\r\n if not type(_message_contents[\"event\"]) == str:\r\n self.logger.error(\"Message event is not a string.\")\r\n return False\r\n if len(_message_contents[\"event\"]) == 0:\r\n self.logger.error(\"Message event cannot be empty.\")\r\n return False\r\n if not type(_message_contents[\"data\"]) == list:\r\n self.logger.error(\"Message data is not a list.\")\r\n return False\r\n if len(_message_contents[\"data\"]) == 0:\r\n self.logger.error(\"Message data cannot be empty list.\")\r\n return False\r\n return True", "def is_error(self):\n\n return self.severity in [AlertSeverity.CRITICAL, AlertSeverity.FATAL]", "def test_type_errors():\n\n\ttry:\n\t\ttransmissions = compute_transmissions(cal_directory, lines = 3.0)\n\texcept TypeError:\n\t\ttry:\n\t\t\ttransmissions = compute_transmissions(cal_directory, calibrator = 300.0)\n\t\texcept TypeError:\n\t\t\tassert True\n\t\telse:\n\t\t\tassert False\n\telse:\n\t\tassert False", "def with_status_message(msg, action):\n try:\n show_status_message(msg)\n action()\n show_status_message(msg, True)\n return True\n except SublimeHaskellError as e:\n show_status_message(msg, False)\n log(e.reason)\n return False", "def _check_function(self, function, attr):\n if function == 'sum':\n if self._cast_to_number(attr):\n return True, ''\n else:\n return False, \"Invalid type for function '%s': '%s' should \"\n \"contain only number values.\" % (function, attr)\n if function == 'count':\n return True, ''\n if function == 'count distinct':\n return True, ''\n else:\n return False,\n \"Unknown function: '%s' is not a predefined function.\" % function", "def warning(self) -> Optional[pulumi.Input['AnyArgs']]:\n return pulumi.get(self, \"warning\")", "def test_irobotframework_report_error(self):\n with patch(\"jupyter_kernel_test.validate_message\", fake_validate):\n reply, outputs = self.execute_helper(code=ERROR_TASK)\n assert reply[\"content\"][\"status\"] != \"ok\"\n assert outputs", "def parsed_error_msg(self):\r\n return self.error_msg", "def test_check_data_fields_errors_warnings(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s1', 'ACGT', 'AAAxA', 's1_data'],\r\n ['s_2', 'CGTA', 'AAAA', 's2_data']]\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_data_fields(header,\r\n mapping_data, errors, warnings)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: AAAxA\\t1,2']\r\n expected_warnings = ['Invalid characters found in s_2\\t2,0']\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def run_check_errors(cmd):\n if type(cmd) == str:\n cmd = cmd.split()\n output = subprocess.run(cmd, capture_output=True, text=True)\n if output.stderr != \"\":\n print_cmd = \" \".join(map(str, cmd))\n sys.exit(\n f\"The error {output.stderr} was generated when running {print_cmd}. Exiting.\"\n )\n return", "def check_errors(self) -> None:\n # TODO check the manual for error codes & interpert them.\n return self.send(self.cmd.GET_GLOBALSTATUS_CURRENTERROR)", "def is_valid_function(self, paras):\n if len(paras) != 0:\n return True\n return True", "def exit_with_message(error_text: str) -> NoReturn:\n raise PealSpeedParseError(peal_speed, error_text)", "def _validate_analysis_description(\n analysis_description,feature_types,sample_column='sample_igf_id',\n feature_column='feature_type',reference_column='reference'):\n try:\n messages = list()\n analysis_list = list()\n sample_id_list = list()\n if not isinstance(analysis_description,list):\n raise ValueError(\n 'Expecting a list of analysis_description, got {0}'.\\\n format(type(analysis_description)))\n if not isinstance(feature_types,list):\n raise ValueError(\n 'Expecting a list for feature_types, got {0}'.\\\n format(type(feature_types)))\n df = pd.DataFrame(analysis_description)\n for c in (sample_column,feature_column):\n if c not in df.columns:\n messages.\\\n append('missing {0} in analysis_data'.format(c))\n if len(messages) > 0:\n raise KeyError('Missing key column: {0}'.format(messages))\n analysis_list = \\\n list(\n df[feature_column].\\\n dropna().\\\n drop_duplicates().\\\n values)\n analysis_list = \\\n set(\n [f.replace(' ','_').lower()\n for f in analysis_list])\n analysis_list = list(analysis_list)\n sample_id_list = \\\n list(\n df[sample_column].\\\n dropna().\\\n drop_duplicates().\\\n values)\n for f,f_data in df.groupby(feature_column):\n f = f.replace(' ','_').lower()\n f_samples = list(f_data[sample_column].values)\n if f not in feature_types:\n messages.\\\n append('feature_type {0} is not defined: {1}'.\\\n format(f,f_samples))\n if len(f_samples) > 1:\n messages.\\\n append('feature {0} has {1} samples: {2}'.\\\n format(f,len(f_samples),','.join(f_samples)))\n if reference_column in df.columns:\n ref_msg = \\\n ['reference {0} does not exists'.format(r)\n for r in list(df['reference'].dropna().values)\n if not os.path.exists(r)]\n if len(ref_msg) > 0:\n messages.\\\n extend(ref_msg)\n return sample_id_list, analysis_list, messages\n except Exception as e:\n raise ValueError(e)", "def has_warnings(self) -> bool:\n return len(self.warnings) > 0", "def analysis_errors(self) -> str:\n errors = []\n\n # Get any callback errors\n for cid, callback in self._analysis_callbacks.items():\n if callback.status == AnalysisStatus.ERROR:\n errors.append(f\"\\n[Analysis Callback ID: {cid}]: {callback.error_msg}\")\n\n return \"\".join(errors)", "def __input_validator(msg):\n\n\t\tstatus = msg[\"status\"]\n\n\t\tif status == 1:\n\t\t\treturn status\n\t\telif status == 0:\n\t\t\tprint(msg[\"body\"])\n\t\telif status == -1:\n\t\t\tprint(\"Please enter something!\")\n\t\telif status == -2:\n\t\t\tprint(\"Your command {} is invalid\".format(msg[\"verb\"]))\n\t\telif status == -3:\n\t\t\tprint(\"No argument given after {}\".format(msg[\"verb\"]))", "def error_analyze(\n self,\n data_dir: Path,\n processed_data_dir: Path,\n result_dir: Path,\n output_report_dir: Path,\n ) -> NoReturn:\n pass" ]
[ "0.6047532", "0.59931284", "0.59822315", "0.5976988", "0.5970713", "0.59450835", "0.59394825", "0.5925255", "0.5906123", "0.59012663", "0.5873115", "0.58316827", "0.57754755", "0.57746696", "0.57093453", "0.5681229", "0.56606114", "0.5603111", "0.558577", "0.5583142", "0.55792844", "0.55420583", "0.5537128", "0.55256253", "0.550237", "0.5488178", "0.5479753", "0.5463412", "0.54489315", "0.54489315", "0.54489315", "0.54489315", "0.54489315", "0.54489315", "0.54489315", "0.54489315", "0.54479516", "0.5444334", "0.5432317", "0.54278964", "0.54167295", "0.54135627", "0.54114884", "0.53977406", "0.5394834", "0.53826046", "0.53777516", "0.5375229", "0.5371156", "0.5370895", "0.5370497", "0.5359532", "0.5353924", "0.5350114", "0.53318954", "0.5330193", "0.53254634", "0.5316748", "0.5310947", "0.5295437", "0.5284662", "0.52832377", "0.52683276", "0.526491", "0.52639675", "0.52639675", "0.5259837", "0.5257578", "0.5256103", "0.5253951", "0.5248237", "0.52465385", "0.52377677", "0.52315867", "0.5226094", "0.5213543", "0.5212793", "0.52125365", "0.5211489", "0.5208029", "0.5207207", "0.5201952", "0.5201096", "0.5194093", "0.5191249", "0.5189909", "0.51884615", "0.5180751", "0.517938", "0.5171763", "0.5165555", "0.51618576", "0.5159738", "0.5159321", "0.51576495", "0.5156162", "0.514722", "0.51401335", "0.5139661", "0.5136833" ]
0.6277207
0
Check if message contains Error for get new_user_id function. Raise a ValueError if message contains "Error"
def check_user(msg): if "Error" in msg: raise ValueError('User already exists.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_message_id(self, user, item_id, message_id):\n try:\n\n err_options = message_id.split(':', 2)\n msg_error_count = int(err_options[1])\n\n # check for token update\n if len(err_options) > 1 and err_options[2] != str(self.get_token(user)):\n self.log.warning(MSG_NEW_TOKEN_.format(item_id, msg_error_count, message_id))\n return 0\n # check update timestamp for message edit\n elif msg_error_count > 1:\n self.log.warning(MSG_FAILED_ALREADY_.format(item_id, message_id))\n return None\n\n elif msg_error_count >= 0:\n self.log.warning(MSG_RETRYING_FAILED_.format(item_id, msg_error_count, message_id))\n return msg_error_count\n\n except:\n self.log.error(MSG_FAILED_TO_PARSE_ID_.format(item_id, message_id, traceback.format_exc()))\n\n return None", "def error_check(self, message):\n matches = ERROR_SYNTAX.match(message)\n if matches:\n error_code = int(matches.group(1))\n error_message = matches.group(2)\n return error_code, error_message\n return None", "def username_error(self, msg):\n raise NotImplementedError('username_error')", "def send_error_missing_id(message, obj_type):\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Missing id',\n \"object_type\": obj_type,\n \"description\": message\n }}), 400)", "def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value", "async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def test_is_valid_user_id_invalid(self):\n ids = (\n (\"SGVsbG8gd29ybGQ\", \"non-digit ASCII\"),\n (\"0J_RgNC40LLQtdGCINC80LjRgA\", \"cyrillic text\"),\n (\"4pO14p6L4p6C4pG34p264pGl8J-EiOKSj-KCieKBsA\", \"Unicode digits\"),\n (\"4oaA4oaB4oWh4oWi4Lyz4Lyq4Lyr4LG9\", \"Unicode numerals\"),\n (\"8J2fjvCdn5nwnZ-k8J2fr_Cdn7rgravvvJngr6c\", \"Unicode decimals\"),\n (\"{hello}[world]&(bye!)\", \"ASCII invalid Base64\"),\n (\"Þíß-ï§-ňøẗ-våłìÐ\", \"Unicode invalid Base64\"),\n )\n\n for user_id, msg in ids:\n with self.subTest(msg=msg):\n result = TokenRemover.is_valid_user_id(user_id)\n self.assertFalse(result)", "def check_user_id(user_id):\n\n try:\n message = (\n 'Validating submitted user id.'\n )\n logger.info(message)\n if user_id != '':\n invalid = (\n int(user_id) < 0 or\n cassy.check_user_id_exists(int(user_id))\n )\n if invalid:\n raise PlantalyticsDataException(USER_ID_INVALID)\n message = (\n 'Submitted user id successfully validated.'\n )\n logger.info(message)\n except PlantalyticsException as e:\n raise e\n except ValueError:\n raise PlantalyticsDataException(USER_ID_INVALID)\n except Exception as e:\n raise e", "def validate_message(self, state_id, msg):\n pass", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def error(msg):\n return ErrorRule(msg)", "def validateID(self, id : int) -> int:\n # If ID is a string, ensure it can be casted to an int before casting and returning.\n if type(id) == str:\n if not lib.stringTyping.isInt(id):\n raise TypeError(\"user ID must be either int or string of digits\")\n return int(id)\n # If ID is not a string, nor an int, throw an error.\n elif type(id) != int:\n raise TypeError(\"user ID must be either int or string of digits\")\n # ID must be an int, so return it.\n return id", "def id_check(self, message):\n matches = ID_SYNTAX.match(message)\n if matches:\n return matches.group(1)\n return None", "def get_message_lexical_error (id_error: int, number_line: int) -> str:\n global lexical_errors\n if id_error == 1:\n error_message = \"Erro léxico na linha {}: Número inválido\".format(str(number_line))\n lexical_errors = lexical_errors + error_message +'\\n'\n return error_message\n if id_error == 2:\n error_message = \"Erro léxico na linha {}: Identificador inválido\".format(str(number_line))\n lexical_errors = lexical_errors + error_message + '\\n'\n return error_message\n if id_error == 3:\n error_message = \"Erro léxico na linha {}: Caracter inválido\".format(str(number_line))\n lexical_errors = lexical_errors + error_message + '\\n'\n return error_message\n if id_error == 4:\n error_message = \"Erro léxico na linha {}: Comentário não finalizado\".format(str(number_line))\n lexical_errors = lexical_errors + error_message + '\\n'\n return error_message", "def test_get_user_id_unknown_user(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n self.assertIsNone(self.connection.get_user_id(\n NON_EXIST_PATIENT_USERNAME))", "def _get_user_id(self, name):\n try:\n apiResponse = twitchAPI.twitchAPIGet(\"/users\", {\"login\": name}) #Try to get user id from API\n userID = apiResponse[\"data\"][0][\"id\"]\n except (KeyError, APIUnavailable):\n userID = input(\"Please enter the user id of the user: \")\n except IndexError: #If Twitch API does not return user id\n print(\"That user does not exist on Twitch.\")\n userID = False\n return(userID)", "def error_message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"error_message\")", "def parsed_error_msg(self):\r\n return self.error_msg", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def process_error(self, id, code, error):\n raise NotImplementedError('process_error not implemented in BaseService')", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_id = -1\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], invalid_channel_id, 1)", "def insert_user_message(user_message: str):\r\n sql = \"\"\"INSERT INTO messages(user_message)\r\n VALUES(%s) RETURNING user_id;\"\"\"\r\n conn = None\r\n user_id = None\r\n try:\r\n # read database configuration\r\n params = config()\r\n # connect to the PostgreSQL database\r\n conn = psycopg2.connect(**params)\r\n # create a new cursor\r\n cur = conn.cursor()\r\n # execute the INSERT statement\r\n cur.execute(sql, (user_message,))\r\n # get the generated id back\r\n user_id = cur.fetchone()[0]\r\n # commit the changes to the database\r\n conn.commit()\r\n # close communication with the database\r\n cur.close()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n finally:\r\n if conn is not None:\r\n conn.close()\r\n return 'Loading'", "def error_message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"error_message\")", "def _parse_error(self, error):\n error = str(error)\n # Nvidia\n # 0(7): error C1008: undefined variable \"MV\"\n m = re.match(r'(\\d+)\\((\\d+)\\)\\s*:\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # ATI / Intel\n # ERROR: 0:131: '{' : syntax error parse error\n m = re.match(r'ERROR:\\s(\\d+):(\\d+):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # Nouveau\n # 0:28(16): error: syntax error, unexpected ')', expecting '('\n m = re.match(r'(\\d+):(\\d+)\\((\\d+)\\):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(4)\n # Other ...\n return None, error", "def handle_validation_error(self, error, bundle_errors):\n \n error_str = six.text_type(error)\n error_msg = self.help.format(error_msg=error_str) if self.help else error_str\n msg = {self.name: error_msg}\n\n if bundle_errors:\n return error, msg\n flask_restful.abort(400, message=msg)", "def error(self, message):\n return self.log(\"ERROR\", message)", "def raise_error(field: str, message: str, parent_error: Optional[Exception] = None) -> NoReturn:\n if parent_error is None:\n raise RowGenParseError(json, field, message)\n raise RowGenParseError(json, field, message) from parent_error", "def test_create_user_with_invalid_input(self, user, status_code, len_):\n # setup: none\n\n # test\n resp = self.create_user(user)\n try:\n assert resp.status_code == status_code\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp.text.find(\"Passwords must have at least one non alphanumeric character\") == len_\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # teardown: none", "def check_value(is_valid, error_msg):\n if not is_valid:\n raise ValueError(error_msg)", "async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:\n if isinstance(e, errors.MissingRequiredArgument):\n embed = self._get_error_embed(\"Missing required argument\", e.param.name)\n self.bot.stats.incr(\"errors.missing_required_argument\")\n elif isinstance(e, errors.TooManyArguments):\n embed = self._get_error_embed(\"Too many arguments\", str(e))\n self.bot.stats.incr(\"errors.too_many_arguments\")\n elif isinstance(e, errors.BadArgument):\n embed = self._get_error_embed(\"Bad argument\", str(e))\n self.bot.stats.incr(\"errors.bad_argument\")\n elif isinstance(e, errors.BadUnionArgument):\n embed = self._get_error_embed(\"Bad argument\", f\"{e}\\n{e.errors[-1]}\")\n self.bot.stats.incr(\"errors.bad_union_argument\")\n elif isinstance(e, errors.ArgumentParsingError):\n embed = self._get_error_embed(\"Argument parsing error\", str(e))\n await ctx.send(embed=embed)\n self.bot.stats.incr(\"errors.argument_parsing_error\")\n return\n else:\n embed = self._get_error_embed(\n \"Input error\",\n \"Something about your input seems off. Check the arguments and try again.\"\n )\n self.bot.stats.incr(\"errors.other_user_input_error\")\n\n await ctx.send(embed=embed)\n await self.send_command_help(ctx)", "def sendErrorMessage(msg): #@NoSelf", "def error_message(iden: int | None, code: str, message: str) -> dict[str, Any]:\n return {\n \"id\": iden,\n \"type\": const.TYPE_RESULT,\n \"success\": False,\n \"error\": {\"code\": code, \"message\": message},\n }", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def getUIDValidity(self):\n return 42", "def handle_error(msg):\r\n query.error_message = msg\r\n query.status = ADMIN_USER_QUERY_STATUSES.FAILED.value\r\n db.session.commit()\r\n raise Exception(msg)", "async def error(self, channel_id,user_infos, user_id, team_id):\n # Message de commande incorrecte\n error = \"Commande invalide. Veuillez utiliser la commande [help] pour plus d'informations.\"\n return await self.sendText(error, channel_id,user_infos, team_id)", "def get_user_id(user_name):\n try:\n return_msg = {}\n result = \"\"\n\n with UserDao() as userDao:\n user_id = userDao.getUserId(user_name) \n if user_id:\n return user_id\n else:\n return_msg['error'] = 'No such user'\n return return_msg\n\n except DB_Exception as e:\n db.close()\n return_msg[\"error\"] = e.args[1]\n return return_msg", "def _assert_existing_user_error(self, response):\n assert response.status_code == 409\n errors = json.loads(response.content.decode('utf-8'))\n for conflict_attribute in [\"username\", \"email\"]:\n assert conflict_attribute in errors\n assert \"belongs to an existing account\" in errors[conflict_attribute][0][\"user_message\"]", "def proto_error(err):\n if isinstance(err, ProtoError):\n return err\n else:\n return ProtoError(err)", "def test_invalid_username():\n expect_error(edit, InputError, \"aaa\", 1, True, None, None)", "async def gen_error(error_id: str, ctx: commands.Context) -> Embed:\n errors = get_file(\"errors\")\n error = Embed(color=error_color)\n error.add_field(name=\"⚠️ \" + errors[error_id][\"title\"], value=errors[error_id]['txt'])\n error = set_footer(error, ctx)\n await ctx.send(embed=error)", "def signin_failure(request, message):\n request.user.message_set.create(message = message)\n return show_signin_view(request)", "def password_error(self, msg):\n raise NotImplementedError('password_error')", "def get_username_validation_error(username):\n return _validate(_validate_username, errors.AccountUsernameInvalid, username)", "def test_new_user_400(self):\n # Missing First Name\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['first_name'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Missing Last Name\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['last_name'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Missing UserID\n user1_body = deepcopy(self.test_user1_data)\n del(user1_body['userid'])\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400\n\n # Bad data type for groups\n user1_body = deepcopy(self.test_user1_data)\n user1_body['groups'] = self.test_group1_groupid\n resp = self.app.post('/users', data=json.dumps(user1_body))\n assert resp.status_code == 400", "def test_check_user_profile_id(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.env.ref('base.user_root').id,\n }\n with self.assertRaises(ValidationError):\n self.env['res.users'].create(userValue)", "def error(self, message=None, show_help=True):", "def parse_error (self, error_str):\r\n\t\t# Regex out the error and channel indices from the string\r\n\t\tob = re.match(ERROR_FORMAT, error_str)\r\n\t\t\r\n\t\t# If error_str doesn't match an error, return None\r\n\t\tif ob is None:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# Extract the two matched groups (i.e. the error and channel indices)\r\n\t\terrno,chno = ob.groups()\r\n\t\terrno = int(errno)\r\n\t\tchno = int(chno)\r\n\t\t\r\n\t\t# Get the error description; if none is defined, mark as unrecognised\r\n\t\terrdesc = self.error_desc_dict.get(errno, 'Unrecognised error code.').format(ch=chno)\r\n\t\t\r\n\t\treturn {'type':'err', 'id':errno, 'ch':chno, 'desc':errdesc, 'raw':error_str}", "def onError(self, error):\n log.err(\"Encountered an error: {0}\".format(\n error.getErrorMessage()))\n return error", "def exit_with_message(error_text: str) -> NoReturn:\n raise StartRowParseError(start_row, error_text)", "def error(self, error):\n pass", "def handle_invalid_arguments(e):\n errors = e.message\n return generic_errors(errors, code=400)", "def process_error(self, id, code, error):\n return {\n 'id': id,\n 'version': '1.1',\n 'error': {\n 'name': 'JSONRPCError',\n 'code': code,\n 'message': error,\n },\n }", "def _get_error_type(self):\n\n error_type = AladdinUserFaultType.Unknown\n if not self.error_msg:\n return error_type.value\n\n error_msg = self.error_msg.lower()\n if 'unrecognized' in error_msg:\n error_type = AladdinUserFaultType.UnrecognizedArguments\n elif 'expected one argument' in error_msg or 'expected at least one argument' in error_msg \\\n or 'value required' in error_msg:\n error_type = AladdinUserFaultType.ExpectedArgument\n elif 'misspelled' in error_msg:\n error_type = AladdinUserFaultType.UnknownSubcommand\n elif 'arguments are required' in error_msg or 'argument required' in error_msg:\n error_type = AladdinUserFaultType.MissingRequiredParameters\n if '_subcommand' in error_msg:\n error_type = AladdinUserFaultType.MissingRequiredSubcommand\n elif '_command_package' in error_msg:\n error_type = AladdinUserFaultType.UnableToParseCommandInput\n elif 'not found' in error_msg or 'could not be found' in error_msg \\\n or 'resource not found' in error_msg:\n error_type = AladdinUserFaultType.AzureResourceNotFound\n if 'storage_account' in error_msg or 'storage account' in error_msg:\n error_type = AladdinUserFaultType.StorageAccountNotFound\n elif 'resource_group' in error_msg or 'resource group' in error_msg:\n error_type = AladdinUserFaultType.ResourceGroupNotFound\n elif 'pattern' in error_msg or 'is not a valid value' in error_msg or 'invalid' in error_msg:\n error_type = AladdinUserFaultType.InvalidParameterValue\n if 'jmespath_type' in error_msg:\n error_type = AladdinUserFaultType.InvalidJMESPathQuery\n elif 'datetime_type' in error_msg:\n error_type = AladdinUserFaultType.InvalidDateTimeArgumentValue\n elif '--output' in error_msg:\n error_type = AladdinUserFaultType.InvalidOutputType\n elif 'resource_group' in error_msg:\n error_type = AladdinUserFaultType.InvalidResourceGroupName\n elif 'storage_account' in error_msg:\n error_type = AladdinUserFaultType.InvalidAccountName\n elif \"validation error\" in error_msg:\n error_type = AladdinUserFaultType.ValidationError\n\n return error_type.value", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\"\", \"test42837492374923749\")", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def _error(msg):\n\n error(None, msg)", "def _validate_user(_):\n pass", "def error(msg):\n return log().error(msg)", "def error():\n return None", "def error_callback(bot, update, error):\n user = get_user_info(update.message.chat_id)\n username = update.message.chat_id if user is None else user['PID']\n\n try:\n raise error\n except Unauthorized:\n # remove update.message.chat_id from conversation list\n mp.track(username, 'Error', {'type': 'Unauthorized' })\n logger.warning(\"TelegramError: Unauthorized user. User probably blocked the bot.\")\n except BadRequest as br:\n # handle malformed requests\n mp.track(username, 'Error', {'type': 'BadRequest', 'text': update.message.text, 'error': str(br) })\n logger.warning(\"TelegramError: {} | Text: {} | From: {}\".format(str(br), update.message.text, update.message.from_user))\n except TimedOut as time_out:\n # handle slow connection problems\n mp.track(username, 'Error', {'type': 'TimedOut', 'text': update.message.text, 'error': str(time_out) })\n logger.warning(\"TelegramError: {} | Text: {} | From: {}\".format(str(time_out), update.message.text, update.message.from_user))\n except NetworkError as ne:\n # handle other connection problems\n mp.track(username, 'Error', {'type': 'NetworkError', 'text': update.message.text, 'error': str(ne) })\n logger.warning(\"TelegramError: {} | Text: {} | From: {}\".format(str(ne), update.message.text, update.message.from_user))\n except ChatMigrated as cm:\n # the chat_id of a group has changed, use e.new_chat_id instead\n mp.track(username, 'Error', {'type': 'ChatMigrated' })\n logger.warning(\"TelegramError: {} | Text: {} | From: {}\".format(str(cm), update.message.text, update.message.from_user))\n except TelegramError as e:\n # handle all other telegram related errors\n mp.track(username, 'Error', {'type': 'TelegramError', 'text': update.message.text, 'error': str(e) })\n logger.warning(\"TelegramError: {} | Text: {} | From: {}\".format(str(e), update.message.text, update.message.from_user))", "def message_error_validator():\n\n return validator.MessageErrorSchema()", "def error(self) -> Optional[pulumi.Input['ErrorArgs']]:\n return pulumi.get(self, \"error\")", "def error(self) -> Optional[pulumi.Input['ErrorArgs']]:\n return pulumi.get(self, \"error\")", "def getErrorId(self):\n return _libsbml.XMLError_getErrorId(self)", "def error(msg):\n\n raise Exception(msg)", "def handle_args_validation_error(error: UnprocessableEntity):\n error_message = error.description or str(error)\n\n exception = getattr(error, \"exc\", None)\n if isinstance(exception, ValidationError):\n validation_messages = []\n for field, messages in exception.normalized_messages().items():\n validation_messages.append(\n \"Field '{}': {}\".format(field, \", \".join(messages))\n )\n error_message = \". \".join(validation_messages)\n\n return jsonify({\"message\": error_message}), 400", "def existing_user_id(new_user_id, mapp):\n mapp.create_user(\n user=new_user_id, password=1234,\n email=new_user_id + \"@example.com\")\n return new_user_id", "def gen_error(error_id, *args):\n errors = {\n 'generic': {'status': 400, 'error': 'generic', 'description': 'A unspecified error occurred'},\n 'invalid_pagetype': {'status': 400, 'description': 'Invalid pagetype \"{}\"'},\n }\n\n if error_id in errors.keys():\n error = dict(**errors[error_id])\n error['description'] = error['description'].format(*args)\n error['error'] = error_id\n return json.dumps({**error, 'success': False}), error['status']\n\n return json.dumps(errors['generic']), errors['generic']['status']", "def get_error_message(self):\n return self.error_message.get_error_message()", "def _get_error_code(self, data) -> int:\n return int(self._error_code)", "def error(self, msg, *args, **kwargs):\n pass", "def _identify_fail(failure):\n logger.warning(failure.getErrorMessage())\n logger.warning(\"Failed to setup & obtain identity\")\n return", "def format_error(invalid, doc_type):\n # using string for checking is probably not ideal,\n # but voluptuous does not have specific sub error\n # types for these errors\n if invalid.error_message == 'extra keys not allowed':\n msg = \"Key '{}' is not allowed\".format(invalid.path[0])\n elif invalid.error_message == 'required key not provided':\n msg = \"{} '{}' is missing\".format(doc_type, invalid.path[0])\n else:\n msg = invalid.message\n return {'message': msg, 'field': str(invalid.path[0])}", "def _err_response(self, msg):\r\n return {'success': False, 'error': msg}", "def errormessage(self, msg) :\n\t\tif msg != self.__olderror :\n\t\t\tself.__stderr.write(\"%s\\n\" % msg)\n\t\t\tself.htmlmessage(msg)\n\t\tself.__olderror = msg[:]\n\t\treturn -1", "def error(str):\n\n Utils.send('error', str)", "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "def error(self, message, new_line=True):\n #\n # Note that while the call to \"get_caller()\" is costly, it only happens\n # when an error occurs, so it shouldn't impact performance\n #\n error_data = (message, self.get_caller())\n self._errors.append(error_data)", "def _get_unknown_userid(self):\n cursor = self.conn.cursor()\n unknown_user_str = dbtypes.User.null\n cursor.execute(\"select id from users where uniqueid='%s'\" % unknown_user_str)\n return cursor.fetchone()[0]", "def error(request):\r\n #Retrieve information which caused an error\r\n messages = get_messages(request)\r\n info =''\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n return profile(request, info, info, messages)\r\n except:\r\n return _show_login(request, 'accounts/login.html', {'messages' : messages})", "def test_error_user_already_exists(self):\n User.objects.create_user(self.data)\n client = Client()\n client.post('/register/', self.data)\n self.assertRaisesMessage(ValueError, 'user already exists')", "def check_msg(msg):\n err = bool(0)\n if 'Error' in msg:\n err = bool(1)\n elif \"Warning\" in msg:\n msg = 'Success! Warning: Image already exists. ' \\\n 'Processing ran on existing image'\n else:\n msg = 'Image saved successfully'\n return err, msg", "def handle_invalid_parameter(message_id, *format, status = HttpStatus.BAD_REQUEST, var = (None, None)):\n\tif (var[1] and var[0] is None):\n\t\treturn handle_invalid_parameter(\"invalid_value\", var[1])\n\n\tmessage = MSG[message_id].copy()\n\tmessage[\"message\"] = message[\"message\"].format(*format)\n\treturn json_response(message, status)", "def message_user_failure(self, request, count, action):\n\n message_bit = self._get_message_bit(count)\n level = messages.ERROR\n message = \"%s could not be %s.\" % (message_bit, action)\n self.message_user(request, message, level=level)", "def error_callback(bot, update, error):\n if isinstance(error, TelegramError):\n raise error # raise it for more sentry verbosity", "def _get_password_error_msg(password):\n # At least one letter and one digit\n if not any(c.isalpha() for c in password):\n return (\"The new password must contain at least one letter\", 'no_letter_in_password')\n if not any(c.isdigit() for c in password):\n return (\"The new password must contain at least one digit\", 'no_digit_in_password')\n return None", "def parse(self, msg):\n msg = msg.rstrip('\\n')\n code_, value = msg.split(' ', 1)\n if code_ == '2':\n raise SMAPError(value)\n return int(code_), value", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def BadRequest(message):\n return f\"Bad Request: {message}\", 400", "def error_obj(message):\n return {\n 'error': message,\n 'success': False\n }" ]
[ "0.67848724", "0.61461526", "0.6122", "0.6017559", "0.5941634", "0.5908885", "0.59077275", "0.5874253", "0.5843121", "0.5823432", "0.5823432", "0.5823432", "0.57579714", "0.57539093", "0.5725768", "0.57183814", "0.57163316", "0.57006186", "0.56851375", "0.5680018", "0.56696147", "0.56570995", "0.5629921", "0.5621132", "0.5608277", "0.5608277", "0.5586423", "0.5557978", "0.5544715", "0.55199844", "0.5500085", "0.54883987", "0.5485573", "0.5461092", "0.5450737", "0.543313", "0.5430174", "0.54223007", "0.54122955", "0.5409737", "0.54079354", "0.540768", "0.5405423", "0.5400745", "0.53977424", "0.5394011", "0.5393637", "0.53901833", "0.5389471", "0.53759044", "0.5371888", "0.5369932", "0.53684753", "0.5366111", "0.5363948", "0.53593224", "0.53591436", "0.5357618", "0.5352722", "0.5348732", "0.5335736", "0.53216964", "0.53194875", "0.53137565", "0.53101367", "0.5309925", "0.5309925", "0.5306467", "0.5302699", "0.52998585", "0.528512", "0.52843153", "0.5270368", "0.52692074", "0.52664465", "0.5264751", "0.5263391", "0.5262301", "0.52606404", "0.525626", "0.52488315", "0.52488315", "0.52488315", "0.52488315", "0.52488315", "0.52488315", "0.52488315", "0.52473694", "0.5244347", "0.52355796", "0.523471", "0.5233468", "0.52308387", "0.5219248", "0.5218024", "0.521225", "0.5208608", "0.5208362", "0.52055025", "0.51943797" ]
0.701784
0
Upload image(s) and run the required analysis for multiple or single image file(s)
def run_images_analysis(filepath, ID, method): for path in filepath: try: Image.open(path) except IOError: msg = 'Please import images files, or just a single zip archive' else: filename, extension = get_file_name(path) # Save raw image to database msg = client.upload_file(ID, filename, extension, path) err, msg = check_msg(msg) if err is False: # if no error in uploading image # Request to process image client.process_image(ID, filename, method) return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def process():\n config = read_config()\n \n\n img_dir = config['DEFAULT']['images_directory']\n results_dict = {}\n images = list(get_image_files(img_dir))\n for image in tqdm.tqdm(images):\n info = hash_file(image)\n if info == 0:\n continue\n\n hash_value = info['hash']\n\n if hash_value not in results_dict:\n file_name = os.path.basename(info['_id'])\n results_dict[hash_value] = [file_name, 1]\n else:\n results_dict[hash_value][1] += 1\n\n count = list(results_dict.values())\n sorted_count = sorted(count, key=lambda x: x[1], reverse=True)\n \n with ImagesDB(IMG_INFO_DB_FILENAME) as imgDb: \n imgDb.insert_batch(sorted_count)", "def process_image(self):\n\n detect.main(self.nn_args)", "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def analyzeImages(path, name_type, box1_size, box2_size, box3_size, box4_size, box5_size):\n \n folders = [f for f in sorted(glob.glob(path + \"/**\"))]\n \n for folder in folders: \n \n # to save this data frame in a csv file\n \n files = [f for f in sorted(glob.glob(folder + \"/**\" + \".jpg\"))]\n \n centroidsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n headsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n \n img_array1 = []\n img_array2 = []\n img_array3 = []\n img_array4 = []\n img_array5 = []\n\n for file in files:\n \n print(file)\n \n centroidsDf[\"frame\"][files.index(file)] = files.index(file)+1\n headsDf[\"frame\"][files.index(file)] = files.index(file)+1\n \n img = cv2.imread(file)\n \n ## FLY 1 ##\n\n box1 = img[box1_size[0]:box1_size[1], box1_size[2]:box1_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box1, file) \n \n # add the centroid and head locations on the image \n box1 = cv2.circle(box1, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box1 = cv2.circle(box1, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array1.append(box1)\n \n # add the positions in the final data frame\n centroidsDf[\"fly1_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly1_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly1_x\"][files.index(file)] = x_head\n headsDf[\"fly1_y\"][files.index(file)] = y_head\n \n ## FLY 2 ##\n \n box2 = img[box2_size[0]:box2_size[1], box2_size[2]:box2_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box2, file)\n \n # add the centroid and head locations on the image \n box2 = cv2.circle(box2, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box2 = cv2.circle(box2, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array2.append(box2)\n \n # add the positions in the final data frame \n centroidsDf[\"fly2_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly2_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly2_x\"][files.index(file)] = x_head\n headsDf[\"fly2_y\"][files.index(file)] = y_head\n \n ## FLY 3 ##\n\n box3 = img[box3_size[0]:box3_size[1], box3_size[2]:box3_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box3, file)\n \n # add the centroid and head locations on the image \n box3 = cv2.circle(box3, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box3 = cv2.circle(box3, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array3.append(box3)\n\n # add the positions in the final data frame\n centroidsDf[\"fly3_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly3_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly3_x\"][files.index(file)] = x_head\n headsDf[\"fly3_y\"][files.index(file)] = y_head\n \n ## FLY 4 ##\n \n box4 = img[box4_size[0]:box4_size[1], box4_size[2]:box4_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box4, file)\n \n # add the centroid and head locations on the image \n box4 = cv2.circle(box4, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box4 = cv2.circle(box4, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array4.append(box4)\n \n # add the positions in the final data frame\n centroidsDf[\"fly4_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly4_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly4_x\"][files.index(file)] = x_head\n headsDf[\"fly4_y\"][files.index(file)] = y_head\n \n ## FLY 5 ##\n \n # the fifth fly is not present in all the genetic strains \n if (box5_size != []):\n box5 = img[box5_size[0]:box5_size[1], box5_size[2]:box5_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box5, file)\n \n # add the centroid and head locations on the image \n box5 = cv2.circle(box5, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box5 = cv2.circle(box5, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array5.append(box5)\n \n # add the positions in the final data frame\n centroidsDf[\"fly5_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly5_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly5_x\"][files.index(file)] = x_head\n headsDf[\"fly5_y\"][files.index(file)] = y_head\n \n # save the data frame in a .csv file, \n # one for the centroids and one for the heads\n #centroidsDf.to_csv(folder+\"/centroids.csv\", index = None, header=True)\n #headsDf.to_csv(folder+\"/heads.csv\", index = None, header=True)\n \n \n ## CREATE THE VIDEOS ##\n \n height, width, _ = box1.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_1_' + str(folders.index(folder)+1)+ '.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array1)):\n out.write(img_array1[i])\n out.release()\n \n height, width, _ = box2.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_2_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array2)):\n out.write(img_array2[i])\n out.release()\n \n height, width, _ = box3.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_3_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array3)):\n out.write(img_array3[i])\n out.release()\n \n height, width, _ = box4.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_4_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array4)):\n out.write(img_array4[i])\n out.release()\n \n if (box5_size != []):\n height, width, _ = box5.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_5_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array5)):\n out.write(img_array5[i])\n out.release()", "def main():\n\n\n\n skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)\n\n # fetch and sort the .mnc and .tag files\n mnc_files = [f for f in skulls_folder if 'mnc' in f]\n tag_files = [f for f in skulls_folder if 'tag' in f]\n mnc_names = [i.split('.mnc')[0] for i in mnc_files]\n \n mnc_files.sort()\n tag_files.sort()\n mnc_names.sort()\n\n # Process and package ndarrays as tuples inside npy file\n package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)\n \n print('\\n' * 5)\n\n # Push the npy files to GCP Cloud Storage\n upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME)", "def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)", "def do_stage(self, images):\n\n for i, image in enumerate(images):\n pass\n # logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)", "def uploadimg():\n print(str(pathlib.Path(__file__).resolve().parents[1])+\"im hereeeeeeeeeeeeeeeeeeeeeeeee\")\n path = str(pathlib.Path(__file__).resolve().parents[1])\n target = os.path.join(path,'Facial recognition/dataset')\n email = session['username']\n target = target+'/'+email\n # app_root, 'C:/Users\\meetp\\OneDrive\\Desktop\\IotAssigment2\\src\\Facial recognition\\dataset/')\n # print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n print(destination)\n file.save(destination)\n\n # encode the image\n # en = encode()\n # en.run(target)\n\n return render_template(\"imguploaded.html\")", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def process_image(self):\n pass", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('drive', 'v3', http=http)\n\n i = 0\n total = 0\n nextPageToken=None\n while True:\n results = service.files().list(\n pageSize=30,\n fields=\"nextPageToken, files(id, name, mimeType, modifiedTime)\",\n spaces='photos',\n pageToken=nextPageToken\n ).execute()\n\n items = results.get('files', [])\n nextPageToken = results.get(\"nextPageToken\")\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'].split('/')[0] != 'image':\n continue\n if vcoll.findBySrcId(item['id']) is not None:\n continue\n destination = 'image_tags/validation/' + item['name']\n file_content = get_file_stream(service, item['id'])\n if file_content and image_handler.is_valid_image(file_content):\n file_handler.upload_file_stream(destination, file_content)\n vcoll.insertValidationImage(destination, item['id'], item['modifiedTime'])\n total += 1\n print(\"Downloaded {0} photos\".format(total))\n i += 1", "def _process_image_files(name, cnts, roots, num_shards): \n \n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, sum(cnts), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (thread_index, ranges, name, cnts, roots, num_shards)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), sum(cnts)))\n sys.stdout.flush()", "def analyze(self):\n # turn off all indicator lights\n self._stop_all()\n \n # run, but catch exceptions and abort if necessary\n try:\n # setup\n self.analysis_led[1].blink\n ims_left = self.num_images\n fluid_left = True\n \n data_session = Data(self.data_path)\n \n # run motor & imaging\n while self.power.update() and ims_left > 0:\n # run pump\n self.motor.run(self.pump_runtime)\n \n if not self.power.update():\n break\n \n # image\n time.sleep(self.rest_time)\n self.cam_led.on\n self.camera.capture()\n data_session.fetch_data()\n self.cam_led.off\n \n # subtract from remaining images every cycle\n # if the fluid sensor turns off, set remaining\n # images to the maximum possible remaining\n ims_left -= 1\n if fluid_left and \\\n not self.fluid.update() and \\\n ims_left > self.samps_after_sensor_off:\n fluid_left = False\n ims_left = self.samps_after_sensor_off\n \n # change indicator lights, given complete or power off\n if ims_left == 0:\n # set analysis to green\n self.analysis_led[1].off\n self.analysis_led[0].on\n else:\n # set analysis to solid red\n self.analysis_led[1].on\n \n # transmit data whether or not power switched off\n self.data_led.blink\n data = data_session.prepare_broadcast()\n broadcast_session = Broadcast(self.peer_ip)\n broadcast_session.broadcast_data(data)\n self.data_led.off\n \n except:\n # turn on error indicator and turn off all else\n # do not transmit data\n self._stop_all()\n self.error.on", "def process(self, image):", "def execute(args, **kwargs):\n p = set_options()\n a = p.parse_args(args)\n # logging.info(str(a))\n\n ifiles = ImageFiles(a)\n\n if a.info:\n ifiles.describe()\n else:\n ifiles.write()", "def upload_analysis(list_of_contents, list_of_names, list_of_dates, session_id, job_id, clean_input_dir):\n\n clean_input_dir = len(clean_input_dir) != 0\n\n print('UPLOAD')\n\n if session_id is not None and list_of_contents is None:\n print(f'Running in session {session_id}')\n\n # make a subdirectory for this session if one doesn't exist\n input_dir = join(BASE_DIR, 'input', f'input_{session_id}')\n try:\n os.mkdir(input_dir)\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(input_dir, 'analysis'))\n except FileExistsError:\n pass\n\n # Create an output directory for this session if it doesn't exist\n output_dir = join(BASE_DIR, 'output', f'output_{session_id}')\n try:\n os.mkdir(output_dir)\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'analysis'))\n except FileExistsError:\n pass\n\n try:\n os.mkdir(join(output_dir, 'analysis', 'images'))\n except FileExistsError:\n pass\n\n def _clean_input_dir():\n \"\"\"\n Clean the input directory by removing every existing file.\n \"\"\"\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))\n\n try:\n\n # If the user isn't uplaoding anything and\n # hasn't uploaded anything, ask them to do so.\n # print(os.listdir(input_dir))\n if list_of_contents is None and len(os.listdir(join(input_dir, 'analysis'))) == 0:\n return 'Please upload some files.'\n\n # if the user is uploading something, first clean the input directory,\n # then write the uploaded files to BASE_DIR/input/input_{session_id}\n if list_of_contents:\n\n if clean_input_dir:\n _clean_input_dir()\n\n # Save successfully uploaded filenames here\n written = list()\n\n # Write uploaded files to BASE_DIR/input/input_{session_id}\n # If any of the files do not end in .txt,\n # or cannot be decoded properly, or cannot be parsed\n # into Voigt models, then clean the input directory and print\n # the error message. Otherwise, show a bullet list of files\n # uploaded to the input directory.\n\n if not clean_input_dir:\n old_peaks = pd.read_csv(join(input_dir, 'peaks.csv'))\n old_models = pd.read_csv(join(input_dir, 'models.csv'))\n else:\n old_peaks = pd.DataFrame()\n old_models = pd.DataFrame()\n \n new_peaks = pd.DataFrame()\n\n for i, c in enumerate(list_of_contents):\n\n if not list_of_names[i].endswith('.txt'):\n raise Exception(f'File {list_of_names[i]} must be .txt')\n\n s = c.split(',')[1]\n\n try:\n s = base64.b64decode(s).decode()\n except UnicodeDecodeError:\n raise Exception(f'Error uploading file {list_of_names[i]}.\\\n Please check file format and try again.')\n\n with open(join(input_dir, 'analysis', list_of_names[i]), 'w') as f:\n f.write(s)\n\n try:\n parsed_file = parse_file(join(input_dir, 'analysis', list_of_names[i]))\n new_peaks = pd.concat([new_peaks, parsed_file], sort=True)\n except Exception as e:\n import traceback\n traceback.print_exc()\n raise Exception(f'Cannot parse file {list_of_names[i]}: {e}')\n\n written.append(list_of_names[i])\n\n res = [html.Li(x) for x in written]\n res.insert(0, html.P(f'Success! {len(written)} \\\n .txt files were uploaded.'))\n\n # peaks = read_input(session_id)\n id_vars = pd.Series(new_peaks.columns)\n mask = ~(id_vars.str.contains('(p|n)m', regex=True) &\n id_vars.str.contains('center'))\n id_vars = id_vars.loc[mask]\n new_peaks = new_peaks.melt(id_vars=id_vars)\n new_peaks = new_peaks.loc[new_peaks.value.notnull()]\n\n def compute_models(DATA):\n res = pd.DataFrame([], columns=['filename', 'peak_name', 'peak_position', 'amplitude'])\n for idx, (_, model) in enumerate(DATA.iterrows()):\n\n row = pd.Series()\n row['filename'] = model.filename\n row['peak_name'] = model.variable\n row['peak_position'] = model.value\n \n amp_col = model.variable[:model.variable.index('_')] + '_amplitude'\n row['amplitude'] = model[amp_col]\n\n res.loc[idx] = row\n\n return res\n\n new_models = compute_models(new_peaks)\n\n models = pd.concat([old_models, new_models])\n peaks = pd.concat([old_peaks, new_peaks])\n\n models.to_csv(join(input_dir, 'models.csv'))\n\n peaks.to_csv(join(input_dir, 'peaks.csv'))\n\n return res\n\n except Exception as e:\n # If any of the files raise an error (wrong extension,\n # decoding error, error parsing into models),\n # then print the error message.\n _clean_input_dir()\n import traceback; traceback.print_exc()\n return f'An error occurred while uploading files: {e}'", "def main():\n service = discovery.build('drive', 'v3', http=get_http())\n\n pic_q = retrieve_pics(service)\n classify_pics(pic_q)", "def run_analysis(filepath, ID, method):\n filename, extension = get_file_name(filepath[0])\n if extension == '.zip':\n msg = run_zip_analysis(filepath, ID, method)\n else:\n msg = run_images_analysis(filepath, ID, method)\n return msg", "def autoAnalyze(self):\n print(\"Perfoming full automatic analysis...\")\n t1=time.perf_counter()\n self.cleanUp()\n self.figure_rois()\n self.figure_roi_inspect_all()\n self.figure_dGoR_roi(showEach=False,saveAs=self.folderSave+\"/avg.png\")\n self.figure_dGoR_roi(showEach=True,saveAs=self.folderSave+\"/each.png\")\n self.index()\n print(\"analysis completed in %.02f sec\"%(time.perf_counter()-t1))", "def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1", "def main():\n nb_processed = 0\n for dataset_name in DATASETS:\n print(\"-----------------\")\n print(\"Dataset: '%s'\" % (dataset_name,))\n print(\"-----------------\")\n\n dataset_dir = os.path.join(WRITE_MAIN_DIR, dataset_name)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n\n dataset = Dataset([os.path.join(READ_MAIN_DIR, dataset_name)])\n print(\"Found %d images total.\" % (len(dataset.fps),))\n\n errors = []\n\n scale_height, scale_width = SCALES[dataset_name]\n target_aspect_ratio = scale_width / scale_height\n\n # iterate over every image in the current dataset,\n # augment that image N times, add cols/rows until target aspect ratio\n # is reached, resize it (e.g. 64x64), save it\n for img_idx, (image_filepath, image) in enumerate(zip(dataset.fps, dataset.get_images())):\n print(\"[%s] Image %d of %d (%.2f%%)...\" \\\n % (dataset_name, img_idx+1, len(dataset.fps),\n 100*(img_idx+1)/len(dataset.fps)))\n\n # IOErrors during loading of images result here in a None value\n if image is None:\n print(\"Error / None\")\n errors.append((\n image_filepath,\n \"Failed to load image '%s' (idx %d for dataset %s)\" \\\n % (image_filepath, img_idx, dataset_name)\n ))\n else:\n # resize too big images to smaller ones before any augmentation\n # (for performance reasons)\n height = image.shape[0]\n width = image.shape[1]\n aspect_ratio = width / height\n if width > 1000 or height > 1000:\n image = misc.imresize(image, (1000, int(1000 * aspect_ratio)))\n\n # augment image\n # converts augmented versions automatically to float32, 0-1\n augmentations = augment(image, **AUGMENTATIONS[dataset_name])\n\n # create list of original image + augmented versions\n images_aug = [image / 255.0]\n images_aug.extend(augmentations)\n\n # for each augmented version of the images:\n # resize it to target aspect ratio (e.g. same width and height),\n # save it\n for aug_idx, image_aug in enumerate(images_aug):\n image_aug = to_aspect_ratio_add(image_aug, target_aspect_ratio)\n filename = \"{:0>6}_{:0>3}.jpg\".format(img_idx, aug_idx)\n img_scaled = misc.imresize(image_aug, (scale_height, scale_width))\n misc.imsave(os.path.join(dataset_dir, filename), img_scaled)\n\n nb_processed += 1\n\n print(\"Processed %d images with %d errors.\" % (nb_processed, len(errors)))\n for (fp, err) in errors:\n print(\"File %s error:\" % (fp,))\n print(err)\n print(\"Finished.\")", "def process(image):\n pass", "def _preprocess(self):\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imageio.imwrite(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)", "def nipt_upload_all(context: click.Context, dry_run: bool):\n\n LOG.info(\"*** UPLOAD ALL AVAILABLE NIPT RESULTS ***\")\n\n nipt_upload_api = NiptUploadAPI(context.obj)\n analyses = nipt_upload_api.get_all_upload_analyses()\n if not analyses:\n LOG.info(\"No analyses found for upload\")\n return\n\n for analysis in analyses:\n case_id = analysis.family.internal_id\n context.invoke(nipt_upload_case, case_id=case_id, dry_run=dry_run)", "def run(self):\n st.title('Acne Classifier')\n st.markdown(STYLE, unsafe_allow_html=True)\n file = st.file_uploader(\"Upload file\", type=self.fileTypes)\n show_file = st.empty()\n if not file:\n show_file.info(\"Please upload a file of type: \" + \", \".join([\"png\", \"jpg\"]))\n return\n\n file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)\n opencv_image = cv2.imdecode(file_bytes, 1)\n cv2.imwrite('out.jpg', opencv_image)\n df = get_score()\n\n df2 = df.set_index('Issue')\n st.dataframe(df2)\n st.bar_chart(df2)\n\n if isinstance(file, BytesIO):\n show_file.image(file)\n else:\n data = pd.read_csv(file)\n st.dataframe(data.head(10))\n file.close()", "def do_analysis(ckpt, queries_type, entities_type, request):\n global currently_analyzing, results, d, analysis_user\n try:\n print(\"starting analysis!\")\n if entities_type == \"all\":\n print(\"using all entities detected!\")\n elif entities_type == \"uploaded\":\n print(\"using only entities specified in csv file!\")\n \n currently_analyzing = True\n analysis_user = request.user.username\n results = []\n proj_path = os.path.abspath(os.path.dirname(__file__)).split(\"FYP_Web_App\")[0]\n ckpt = proj_path + \"FewRel/checkpoint/\" + ckpt\n if d is None or d.ckpt_path != ckpt:\n d = DetectionFramework(ckpt_path=ckpt)\n if cancel_flag[0]:\n return\n d.clear_support_queries()\n if len([i for i in os.listdir(\"temp/relation_support_datasets\") if 'csv' in i and request.user.username in i]) == 0:\n raise ValueError(\"Please upload relation support dataset!\")\n \n d.load_support_files(\"temp/relation_support_datasets\", request.user.username)\n if queries_type == \"csv_option\":\n if not os.path.exists(\"temp/queries.csv\"):\n raise ValueError(\"Please upload query CSV dataset!\")\n d.load_queries_csv(\"temp/queries.csv\")\n \n elif queries_type == \"url_option\":\n if not os.path.exists(\"temp/url.txt\"):\n raise ValueError(\"Please specify news article url!\")\n with open(\"temp/url.txt\") as f:\n url = f.read()\n d.load_url(url)\n \n elif queries_type == \"txt_option\":\n d.load_text_files(os.path.abspath(\"temp/text_files\"))\n \n elif queries_type == \"ind_sentence_option\":\n ind_sentence = request.POST.get('ind_sent')\n d.load_ind_sentence(ind_sentence)\n \n elif queries_type == \"html_option\":\n d.load_html_file_queries(os.path.abspath(\"temp/html_files\"))\n \n if entities_type == \"uploaded\":\n d.trim_queries_based_on_entities_file(os.path.abspath(\"temp/entities_csv_file.csv\"))\n\n if cancel_flag[0]:\n return\n d.detect(rt_results=results, cancel_flag=cancel_flag)\n if cancel_flag[0]:\n return\n src=None\n if queries_type == \"csv_option\":\n src = \"queries_csv\"\n elif queries_type == \"txt_option\":\n src = \"queries_text_file\"\n elif queries_type == \"ind_sentence_option\":\n src = \"ind_sentence\"\n elif queries_type == \"url_option\":\n with open(\"temp/url.txt\") as f:\n src = f.read()\n elif queries_type == \"html_option\":\n src = \"html_files\"\n \n s = Source(source=src, user=request.user)\n s.save()\n for r in results:\n er = ExtractedRelation(sentence=r['sentence'],head=r['head'],tail=r['tail'],pred_relation=r['pred_relation'],sentiment=r['sent'],conf=r['conf'],ckpt=ckpt, source=s)\n er.save()\n except Exception as e:\n print(len(str(e)))\n print(str(e))\n errors.append(str(e))\n tb = traceback.format_exc()\n print(tb)\n finally:\n currently_analyzing = False\n analysis_user = None", "def main():\n with open(IMAGEPATH_LIST_PATH, \"rt\") as imagepath_list_handle:\n imagepath_list = [line.strip() for line in imagepath_list_handle.readlines()]\n\n object_detector = ObjectDetector(MODEL_PATH)\n\n dataset_json = []\n for imagepath in imagepath_list:\n image = scipy.misc.imread(imagepath)\n detections = object_detector.run(image)\n\n detections_json = {\"path\": imagepath, \"detections\": [det.to_dict() for det in detections]}\n dataset_json.append(detections_json)\n\n with open(DATASET_PATH, \"wt\") as json_handle:\n json.dump(dataset_json, json_handle, sort_keys=True, indent=4)", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n #submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, \"submit\")\n #os.makedirs(submit_dir)\n\n # Read dataset\n img_ids = []\n dataset_dir = os.path.join(dataset_dir, subset)\n image_file = os.listdir(dataset_dir)\n #submission = []\n for img in image_file:\n if not img.startswith('.'):\n img_file = os.path.join(dataset_dir, img)\n image = skimage.io.imread(img_file)\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # Detect object\n\t\t\t\n r = model.detect([image])[0]\n # Encode image to RLE. Returns a string of multiple lines\n source_id = img.split(\".\")[0]\n #rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n #submission.append(rle)\n # Save image with masks\n visualize.display_instances(\n image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'],\n #show_bbox=False, show_mask=False,\n title=\"Predictions\")\n plt.savefig(\"{}/{}.png\".format(submit_dir, source_id))\n\n\n\t\t\n # Save to csv file", "def process_images():\n\t\n\tparser = argparse.ArgumentParser(description=\"Splice image patch for face from GAN generated donor to detected face in recipient image.\")\n\tparser.add_argument(\"-d\", \"--donor\", dest=\"donor\", default=\"./GAN_Faces/\", help=\"path to directory containing GAN generated faces\")\n\tparser.add_argument(\"-r\", \"--recipient\", dest=\"recipient\", default=\"./MediFor_Images/\", help=\"path to directory containing images into which faces are spliced\")\n\tparser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"./GAN_MediFor/\", help=\"output directory into which spliced images are saved\")\n\tparser.add_argument(\"-f\", \"--files\", dest=\"files\", default=False, help=\"If the input and output are files not directories\", action='store_true')\n\n\targs = parser.parse_args()\n\tdonor_directory = args.donor\n\trecipient_directory = args.recipient\n\tout_directory = args.output\n\tfi = args.files\n\t\n\t# donor images\n\ttry:\n\t\thead_image_paths = os.listdir(donor_directory) if not fi else [donor_directory]\n\t\tif not os.path.exists(head_image_paths[0]):\n\t\t\traise ValueError\n\texcept:\n\t\tprint('Did you create the donor image directory?')\n\t\tprint('Quiting ...')\n\t\treturn\n\t\t\n\t# recipient images\n\ttry:\n\t\trecipient_paths = os.listdir(recipient_directory) if not fi else [recipient_directory]\n\t\tif not os.path.exists(recipient_paths[0]):\n\t\t\traise ValueError\n\texcept:\n\t\tprint('Did you create the recipient image directory?')\n\t\tprint('Quiting ...')\n\t\treturn\n\t\n\t# output folder existence\n\tif not os.path.exists(out_directory) and not fi:\n\t\tprint('Did you create the output image directory?')\n\t\tprint('Quiting...')\n\t\treturn\n\t\n\t# log errors\n\tlf = open('./log.txt', 'w')\n\t\n\t\"\"\"\n\tTowards the objectives of the MediFor program, all Progressive GAN generated face images are utilized in combination with all available images in recipient images.\n\t\n\tNaming convention:\n\tThe spliced images are named as <donor image name>--<recipient image name>.png\n\tThe spliced images can be renamed at a later date if a hashing function is used to rename donor or recipient image file names.\t\n\t\"\"\"\n\n\tfor head_img in head_image_paths:\n\t\thead_path = donor_directory + head_img if not fi else head_img\n\t\tfor recipient_img in recipient_paths:\n\t\t\trecipient_path = recipient_directory + recipient_img if not fi else recipient_img\n\t\t\tout_img = head_img.split('.')[0] + '--' + recipient_img.split('.')[0] + '.png'\n\t\t\tout_path = os.path.join(out_directory, out_img) if not fi else out_directory\n\t\t\ttry:\n\t\t\t\tsplice_donor_recipient(recipient_path, head_path, out_path)\n\t\t\t\tprint('donor: {}, recipient: {}\\n output: {}'.format(head_path, recipient_path, out_path))\n\t\t\texcept Exception as err:\n\t\t\t\tprint(err)\n\t\t\t\tlf.write('Issue with: {}\\n'.format(out_img))\n\t\n\tlf.close()", "def _process_image_files(self, input_files):\n # Handle single file-object as arg.\n if not isinstance(input_files, list):\n input_files = [input_files]\n self._check_batch_size(input_files)\n # Handle unnames images as lists of file objects. Named by index in list.\n image_files = []\n for i, tup in enumerate(input_files):\n if not isinstance(tup, tuple):\n image_files.append((tup, str(i)))\n assert hasattr(image_files[i][0], 'read'), (\n 'image_files[%d] has wrong type: %s. Must be file-object with read method.') % (\n i, type(image_files[i][0]))\n else: # already tuples passed in.\n image_files.append(tup)\n # Resize any images such that the min dimension is in range.\n if CAN_RESIZE:\n for i, image_tup in enumerate(image_files):\n image_files[i] = self._resize_image_tuple(image_tup)\n # Return a list of (bytes, name) tuples of the encoded image bytes.\n image_data = []\n for image_file in image_files:\n image_data.append((bytes(image_file[0].read()), image_file[1]))\n return image_data", "def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))", "def main():\n\n inputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_raw'\n outputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_faces'\n\n # detects all faces from all images in inputDirectory and outputs\n # to outputDirectory\n FaceDetection.extractFaces(\n inputDirectory=inputDirectory, outputDirectory=outputDirectory)", "def run(self, images):\n\n # Apply filtering\n if len(self.preprocessing) > 0: \n print('Applying', len(self.preprocessing), 'filter(s) to input images')\n for filter in self.preprocessing:\n for i in range(len(images)):\n images[i] = filter(images[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from input images')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(images)):\n features = []\n for feature in self.features:\n features.append(feature(images[i]))\n images[i] = np.hstack(features)\n images = scaler.fit_transform(images)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Run predictions\n print('Predicting presence of parasites in', len(images), 'images\\n')\n return self.classifier.predict(images)", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def file_upload():\n\n click.secho('*** Uploading image...', fg='green')\n uploaded = _uploaded_file('cover.jpg')\n click.secho(json.dumps(uploaded, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating a Picture document for it...', fg='green')\n picture = _make_document('picture', title='cover image', sys_filename=uploaded['path'])\n click.secho(json.dumps(picture, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Attaching it to a Blueray as cover...', fg='green')\n slp = _make_document('movie', title='Silver Linings Playbook')\n blueray = _make_document('blueray', movie_id=slp['_id'], cover_id=picture['_id'])\n click.secho(json.dumps(blueray, indent=2, sort_keys=True), fg='yellow')", "def main():\n time_start = perf_counter()\n\n args = parse_args(sys.argv[1:]).ordered()\n _, opts = next(args)\n log_level = 0\n try:\n log_level = (0, 20, 10)[opts.verbosity]\n mpl_log_level = log_level + 10 if log_level > 0 else log_level\n except IndexError:\n log_level = 10\n mpl_log_level = log_level\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n # set level for all loggers\n # separate log level for matplotlib because it's so verbose\n for logger in loggers:\n if logger.name.startswith(\"matplotlib\"):\n logger.setLevel(mpl_log_level)\n else:\n logger.setLevel(log_level)\n\n LOG.debug(\"Program opts:\\n%s\", pformat(vars(opts)))\n\n # main vars\n inputs = []\n processed = []\n # im: Optional[Image.Image] = None\n im: Image.Image | np.ndarray | None = None\n in_file_path: Optional[str]\n in_image_size = Size(0, 0)\n in_file_size = 0\n in_dpi = 0\n in_exif: Optional[dict] = None\n out_exif: bytes = b\"\"\n out_exif_size = 0\n out_file_path = None\n out_image_size = Size(0, 0)\n out_file_size = 0\n no_op = False\n\n for cmd, arg in args:\n LOG.debug(\"Processing command %s with args:\\n%s\", cmd, pformat(vars(arg)))\n\n if cmd == \"open\":\n in_file_path = arg.input.name\n in_file_size = os.path.getsize(in_file_path) # type: ignore\n im = Image.open(arg.input)\n in_image_size = Size(*im.size)\n LOG.info(\"Input dims: %s\", in_image_size)\n try:\n in_exif = piexif.load(in_file_path)\n del in_exif[\"thumbnail\"]\n # LOG.debug(\"Exif: %s\", in_exif)\n in_dpi = im.info[\"dpi\"]\n except KeyError:\n pass\n LOG.info(\"Input file size: %s\", humanize_bytes(in_file_size))\n LOG.info(\"Input dpi: %s\", in_dpi)\n if arg.show_histogram:\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n im = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"open2\":\n # Test of opening multiple images for some operations, such as matting\n for item in arg.input:\n _im = Image.open(item)\n try:\n ex = piexif.load(item.name)\n dpi = _im.info[\"dpi\"]\n del ex[\"thumbnail\"]\n except KeyError:\n ex = None\n dpi = (0, 0)\n _im = np.asarray(_im)\n _im = cv2.cvtColor(_im, cv2.COLOR_RGB2BGR)\n inputs.append(\n Img(\n _im,\n file_path=item.name,\n dpi=dpi,\n exif=ex,\n )\n )\n LOG.debug(\"Imgs: %s\", inputs)\n im = inputs[0].data\n in_file_path = inputs[0].file_path\n in_file_size = inputs[0].file_size\n in_image_size = inputs[0].size\n if arg.show_histogram:\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n LOG.debug(\"Generating numpy thumbnail for histogram\")\n thumb = resize.resize_thumbnail_opencv(im, Size(1000, 1000))\n print(generate_rgb_histogram(thumb))\n show_rgb_histogram(im)\n elif cmd == \"mat\":\n if not is_ndarray(im):\n raise TypeError('Expected numpy.ndarray')\n im = mat.create_mat(im, size_inches=arg.size)\n out_image_size = Size.from_np(im)\n elif cmd == \"resize\":\n im = Image.fromarray(im) if type(im) == np.ndarray else im\n if is_ndarray(im) or im is None:\n raise TypeError('Expected Image, not ndarray')\n orig_size = Size(*im.size) # type: ignore\n out_image_size = orig_size\n try:\n resize_method, new_size = resize.get_method(\n orig_size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n else:\n # Resize/resample\n try:\n im = resize.resize(\n resize_method,\n im,\n new_size,\n )\n except ImageTooSmallError as e:\n LOG.warning(e)\n out_image_size = Size(*im.size) # type: ignore\n elif cmd == \"resize2\":\n for item in inputs:\n try:\n resize_method, new_size = resize.get_method(\n item.size,\n width=arg.width,\n height=arg.height,\n scale=arg.scale,\n longest=arg.longest,\n shortest=arg.shortest,\n force=arg.force,\n )\n except ResizeNotNeededError as e:\n LOG.warning(e)\n except ResizeAttributeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n else:\n try:\n _im = resize.resize_opencv(\n resize_method, item.data, new_size, resample=cv2.INTER_AREA\n )\n if _im is not None:\n processed.append(Img(_im))\n else:\n LOG.error('Expected image from resize_opencv(), got None')\n except ImageTooSmallError as e:\n LOG.warning(e)\n LOG.info(processed)\n out_image_size = processed[0].size\n im = processed[0].data\n elif cmd == \"text\":\n if im is None:\n LOG.error('Image is None')\n return\n im = watermark.with_text(\n im,\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n ) # type: ignore\n elif cmd == \"text2\":\n im = watermark.with_text(\n Image.fromarray(im),\n text=arg.text,\n copyright=arg.copyright,\n scale=arg.scale,\n position=arg.position,\n opacity=arg.opacity,\n exif=in_exif,\n )\n im = np.asarray(im)\n elif cmd == \"watermark\":\n im = watermark.with_image(\n im,\n Image.open(arg.image),\n scale=arg.scale,\n position=arg.position,\n padding=arg.margin,\n opacity=arg.opacity,\n invert=arg.invert,\n )\n elif cmd == \"watermark2\":\n watermark_image = cv2.imread(arg.image.name, cv2.IMREAD_UNCHANGED)\n # im = watermark.with_image_opencv(\n # im,\n # watermark_image,\n # scale=arg.scale,\n # position=arg.position,\n # opacity=arg.opacity,\n # padding=arg.margin,\n # )\n try:\n im = watermark.overlay_transparent(\n im,\n watermark_image,\n scale=arg.scale,\n padding=arg.margin,\n position=arg.position,\n alpha=arg.opacity,\n invert=arg.invert,\n )\n except OverlaySizeError as e:\n print(f\"{fg.li_red}error: {e}{rs.fg}\", file=sys.stderr)\n sys.exit(1)\n elif cmd == \"sharpen\":\n im = sharpen.unsharp_mask(im, amount=arg.amount, threshold=arg.threshold)\n elif cmd == \"save\":\n # if type(im) == np.ndarray:\n # im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n use_progressive_jpg = in_file_size > 10000\n if use_progressive_jpg:\n LOG.debug(\"Large file; using progressive jpg\")\n\n # Exif\n if arg.keep_exif:\n out_exif = piexif.dump(piexif.load(in_file_path))\n out_exif_size = sys.getsizeof(out_exif)\n\n outbuf = BytesIO()\n try:\n im.save(\n outbuf,\n \"JPEG\",\n quality=arg.jpg_quality,\n dpi=in_dpi,\n progressive=use_progressive_jpg,\n optimize=True,\n exif=out_exif,\n )\n except AttributeError:\n write_params = [\n cv2.IMWRITE_JPEG_QUALITY,\n arg.jpg_quality,\n cv2.IMWRITE_JPEG_OPTIMIZE,\n ]\n if use_progressive_jpg:\n write_params += [\n cv2.IMWRITE_JPEG_PROGRESSIVE,\n ]\n _, buf = cv2.imencode(\".jpg\", im, write_params)\n outbuf = BytesIO(buf)\n image_buffer = outbuf.getbuffer()\n out_file_size = image_buffer.nbytes + out_exif_size\n LOG.info(\"Buffer output size: %s\", humanize_bytes(out_file_size))\n\n if arg.output is None:\n root, _ = os.path.splitext(in_file_path)\n out_file_path = f\"{root}{arg.suffix}.jpg\"\n else:\n out_file_path = arg.output.name\n\n if arg.no_op:\n no_op = True\n continue\n LOG.info(\"Saving buffer to %s\", out_file_path)\n if (out_path := Path(out_file_path)).exists():\n if not arg.force:\n LOG.critical(\n \"file '%s' exists and force argument not found\", out_path\n )\n print(\n f\"{fg.red}{ef.bold}Error: file '{out_path}' exists;\",\n f\" use -f option to force overwrite.{rs.all}\",\n file=sys.stderr,\n )\n return\n # Create output dir if it doesn't exist\n out_path.parent.mkdir(parents=True, exist_ok=True)\n\n with out_path.open(\"wb\") as f:\n f.write(image_buffer)\n if arg.keep_exif:\n piexif.insert(out_exif, out_file_path)\n out_file_size = os.path.getsize(out_file_path)\n\n elapsed = perf_counter() - time_start\n report = generate_report(\n in_file_size,\n out_file_size,\n in_file_path,\n out_file_path,\n in_image_size,\n out_image_size,\n elapsed,\n no_op,\n )\n print(report)", "def _process_images(self, docname: pathlib.Path, images: List[nodes.image]) -> None:\n logger.debug(\"[nbtutorial]: Processing images for %s\", docname)\n\n if len(images) == 0:\n return\n\n img_dir = pathlib.Path(self.outdir, docname.parent, RESOURCE_DIR)\n\n if not img_dir.exists():\n img_dir.mkdir(parents=True)\n\n for img in images:\n fname = pathlib.Path(img[\"uri\"]).name\n\n source = pathlib.Path(self.app.confdir, img[\"uri\"])\n destination = pathlib.Path(img_dir, fname)\n\n shutil.copy(source, destination)", "def process(self):\n level = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n\n for (n, input_file) in enumerate(self.input_files):\n self.logger.info(\"INPUT FILE %i / %s\", n, input_file.pageId or input_file.ID)\n file_id = make_file_id(input_file, self.output_file_grp)\n\n pcgts = page_from_file(self.workspace.download_file(input_file))\n self.add_metadata(pcgts)\n page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)\n page = pcgts.get_Page()\n \n page_image, page_xywh, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n zoom = 300.0/self.parameter['dpi']\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi *= 2.54\n self.logger.info('Page \"%s\" uses %f DPI', page_id, dpi)\n zoom = 300.0/dpi\n else:\n zoom = 1\n \n if level == 'page':\n self.process_page(page, page_image, page_xywh, zoom,\n input_file.pageId, file_id)\n else:\n if level == 'table':\n regions = page.get_TableRegion()\n else: # region\n regions = page.get_AllRegions(classes=['Text'], order='reading-order')\n if not regions:\n self.logger.warning('Page \"%s\" contains no text regions', page_id)\n for region in regions:\n region_image, region_xywh = self.workspace.image_from_segment(\n region, page_image, page_xywh, feature_filter='binarized')\n if level == 'region':\n self.process_region(region, region_image, region_xywh, zoom,\n input_file.pageId, file_id + '_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n self.logger.warning('Page \"%s\" region \"%s\" contains no text lines',\n page_id, region.id)\n for line in lines:\n line_image, line_xywh = self.workspace.image_from_segment(\n line, region_image, region_xywh, feature_filter='binarized')\n self.process_line(line, line_image, line_xywh, zoom,\n input_file.pageId, region.id,\n file_id + '_' + region.id + '_' + line.id)\n\n # update METS (add the PAGE file):\n file_path = os.path.join(self.output_file_grp, file_id + '.xml')\n pcgts.set_pcGtsId(file_id)\n out = self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n local_filename=file_path,\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n self.logger.info('created file ID: %s, file_grp: %s, path: %s',\n file_id, self.output_file_grp, out.local_filename)", "def add_processed_image(image_proc_type, name, b64_string, export_file_type):\n\n if image_proc_type == \"contrast stretching\":\n info = process_contrast_stretch(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with contrast stretching')\n\n if image_proc_type == \"adaptive equalization\":\n info = process_adapt_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with adaptive equalization')\n\n if image_proc_type == \"histogram equalization\":\n info = process_histogram_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with histogram equalization')\n\n if image_proc_type == \"reverse video\":\n info = process_reverse_image(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with reverse image')\n\n if image_proc_type == \"log compression\":\n info = process_log_compression(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with log compression')\n\n return jsonify(\"it worked\")", "def process_images(self):\n source_images = self.get_build_images()\n self.log.info(\"Got %s images for publishing. Processing..\", len(source_images))\n\n for image in source_images:\n self.fetch_image(image)\n\n for target in image.push_registries:\n for tag in image.release_tags:\n repository = \"%s/%s\" % (target, image.repository.name)\n self.tag_image(image, repository, tag)\n retry_count = 1\n while retry_count <= self.retry_limit:\n self.log.info(\"Pushing %s:%s to %s (%d/%d)\", repository, tag, target, retry_count, self.retry_limit)\n try:\n self.publish_image(target, repository, tag)\n break\n except ImagePushError as e:\n self.log.error(\"%s\", e.message)\n retry_count = retry_count + 1\n else:\n return False\n return True", "def process_image(self):\n if not os.path.isfile(self.output_file) or self.gallery.generator.settings[\"GALLERY_REGENERATE_EXISTING\"]:\n \n # Actions should be processed in order of appearance in actions array\n for i in range(len(self.preset[\"actions\"])):\n a = self.preset[\"actions\"][i]\n\n if a[\"type\"] == \"fit\":\n if not \"from\" in a:\n a[\"from\"] = (0.5, 0.5) # crop from middle by default\n\n self.image = ImageOps.fit(self.image, (a[\"width\"], a[\"height\"],), method=Image.ANTIALIAS, centering=a[\"from\"])\n \n if a[\"type\"] == \"greyscale\":\n self.image = ImageOps.grayscale(self.image)\n\n if a[\"type\"] == \"resize\":\n self.image.thumbnail((a[\"width\"], a[\"height\"]), Image.NEAREST)\n \n # TODO: Write other useful transforms here!\n \n\n self.image.save(self.output_file, \"JPEG\")", "async def extractimages(self, ctx):\n if self.extract_images_running:\n await ctx.send(inline('Extract images already running'))\n return\n\n event_loop = asyncio.get_event_loop()\n running_load = event_loop.run_in_executor(self.executor, self.do_extract_images)\n\n self.extract_images_running = True\n await ctx.send(inline('Running image extract pipeline: this could take a while'))\n await running_load\n self.extract_images_running = False\n await ctx.send(inline('Image extract finished'))", "def upload_samples():\n # Retrieve a list of all files and paths within the target\n paths = Path(Config.target_dir).glob(Config.target_pattern)\n # Inform the user as to what we're doing\n logger.info(\"Assembling %s volume for submission\", Config.target_dir)\n # Loop through each identified file and upload it to the sandbox for analysis\n for path in paths:\n # Convert the path to a string\n filepath = str(path)\n # Grab the file name\n filename = os.path.basename(filepath)\n # Open the file for binary read, this will be our payload\n with open(filepath, 'rb') as upload_file:\n payload = upload_file.read()\n # Upload the file using the Sandbox\n response = Samples.upload_sample(file_name=filename, sample=payload)\n # Grab the SHA256 unique identifier for the file we just uploaded\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the volume payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we can remove the file when we're done\n Analyzer.files.append([filename, filepath, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def upload_all_completed_analyses(context: click.Context, pipeline: Pipeline = None):\n\n LOG.info(\"----------------- AUTO -----------------\")\n\n status_db: Store = context.obj.status_db\n\n exit_code = 0\n for analysis_obj in status_db.get_analyses_to_upload(pipeline=pipeline):\n if analysis_obj.family.analyses[0].uploaded_at is not None:\n LOG.warning(\n f\"Skipping upload for case {analysis_obj.family.internal_id}. \"\n f\"It has been already uploaded at {analysis_obj.family.analyses[0].uploaded_at}.\"\n )\n continue\n\n case_id = analysis_obj.family.internal_id\n LOG.info(\"Uploading analysis for case: %s\", case_id)\n try:\n context.invoke(upload, case_id=case_id)\n except Exception:\n LOG.error(f\"Case {case_id} upload failed\")\n LOG.error(traceback.format_exc())\n exit_code = 1\n\n sys.exit(exit_code)", "def run_zip_analysis(filepath, ID, method):\n with zipfile.ZipFile(filepath[0]) as zf:\n for entry in zf.namelist():\n if not entry.startswith(\"__\"): # Get rid hidden files in zip\n with zf.open(entry) as file:\n data = file.read()\n fh = io.BytesIO(data)\n Image.open(fh)\n\n filename, extension = get_file_name(file.name)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename,\n extension, fh.getvalue())\n err, msg = check_msg(msg)\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def preprocess_images():\n \n # Set up the lists to collect the images and measurements\n images = []\n measurements = []\n \n # Set up the path to the data files \n data_sets_path = 'data'\n data_sets = [os.path.join(data_sets_path, i) for i\n in os.listdir(data_sets_path)]\n \n # Step through the data folders and collect the images\n # and the steering angles\n for data_set in data_sets:\n lines = []\n \n # Open up the csv file of image paths and steering angles\n with open(os.path.join(data_set,\n 'driving_log.csv')) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n for line in lines:\n source_path = line[0]\n filename = source_path.split('\\\\')[-1]\n current_path = os.path.join(data_set, 'IMG',\n filename)\n \n # Import each image and change it to RGB\n BGR_image = cv2.imread(current_path)\n image = cv2.cvtColor(BGR_image, cv2.COLOR_BGR2RGB)\n rows, cols, depth = image.shape\n flipped_image = cv2.flip(image, 1)\n \n # Create a scaled version of the image\n scale = [0.9, 1.1]\n zoomfactor = random.choice(scale)\n scale_matrix = cv2.getRotationMatrix2D((cols/2, rows/2),\n 0, zoomfactor)\n scaled_image = cv2.warpAffine(image, scale_matrix,\n (cols, rows))\n\n # Append the images to the image list\n images.append(image)\n images.append(scaled_image)\n images.append(flipped_image)\n \n # Append the steering angle to the measurements list\n measurement = float(line[3])\n measurements.append(measurement)\n measurements.append(measurement)\n measurements.append(-1*measurement)\n \n return images, measurements", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def run_yolo_indir(images_path):\n for filename in os.listdir(images_path):\n try:\n # print(filename)\n Image.open(os.path.join(images_path, filename))\n test_detector(b'cfg/voc.data', b'cfg/yolo.cfg', b'yolo.weights', os.path.join(\n images_path, filename).encode('utf-8'), parameters.YOLO_THRES, 0.5)\n w, h, o = read_bounding_boxes('bounding_boxes.txt')\n crop_all_bounding_boxes(o, filename, os.path.join, images_path)\n except:\n print('Cannot test image', filename)\n continue", "def main():\n args = doParsing()\n print(args)\n\n # Load model with custom object for mobilenet\n model = load_model(args.modelPath, custom_objects={\n 'relu6': mobilenet.relu6,\n 'DepthwiseConv2D': mobilenet.DepthwiseConv2D})\n\n print(\"Loaded model from \" + args.modelPath)\n\n print(model.summary())\n\n # Dogs and cats test dataset has 12500 samples\n\n results = []\n\n for file in sorted(glob.glob(args.datasetTestDir + \"/*.jpg\")):\n\n # One by one image prediction\n\n # Image processing (resize and inception like preprocessing to have [-1.0, 1.0] input range)\n image = imread(file)\n image = imresize(image, size=model.input_shape[1:3])\n image = image.astype(np.float32)\n processedImage = mobilenet.preprocess_input(image)\n\n # Add 1st dimension for image index in batch\n processedImage = np.expand_dims(processedImage, axis=0)\n\n # Get and print TOP1 class\n result = model.predict_classes(x=processedImage, batch_size=1, verbose=False)\n print(os.path.basename(file) + \" -> \" + classes[result[0]])\n\n # Get and save dog probability\n resultProba = model.predict_proba(x=processedImage, batch_size=1, verbose=False)\n results.append((os.path.basename(file)[:os.path.basename(file).rfind('.')], resultProba[0][classes.index(\"dog\")]))\n\n print(\"Test finished\")\n\n if args.kaggleExportFile is not None:\n exportResults(results, args.kaggleExportFile)", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def index_files():\n\n print(\"Indexing files\")\n\n for root, _, files in os.walk(image_directory):\n for item in files:\n for file_type in file_types:\n if file_type in item:\n images_in_directory.append(os.path.join(root, item))\n\n print(f'Finished indexing {len(images_in_directory)} files')\n\n pass", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def take_action(self, parsed_args):\n if parsed_args.file:\n for file in parsed_args.file:\n if not os.path.exists(file):\n self.logger.error('Specified file does not exist: {}'.format(file))\n continue\n self.logger.info('File uploading is started: {}'.format(file))\n file_id = self.app.metagen.upload_files(file)\n if not file_id:\n return False\n self.logger.info('File {} has been sent to analysis.'.format(file))\n self.logger.info('Use File ID to get Analysis Result: {}'.format(file_id))\n self.logger.info('Task Done')", "def Run(self, args):\n\n with RecoverFromDiagnosticException(args.image_name):\n img_name = util.GetDigestFromName(args.image_name)\n return util.TransformContainerAnalysisData(img_name,\n args.occurrence_filter)", "def process_all_images(input_path: str, output_path: str, resized_image_shape: Tuple,transformations:List[TransformationsEnum]):\n\n output_images_path = os.path.join(output_path, \"images\")\n csv_file_path = os.path.join(output_path, \"metadata.csv\")\n\n prepare_folders(output_path, output_images_path)\n prepare_csv(csv_file_path)\n\n df = pd.read_csv(csv_file_path)\n current_id = 1 #has to check the current id in the folder or be set to 1 if none\n categories_names = list(os.listdir(input_path))\n\n encoder = LabelBinarizer()\n encoder.fit(categories_names)\n\n\n for folder_name in os.listdir(input_path):\n current_category_name = folder_name\n category_path = os.path.join(input_path, folder_name)\n images_in_category = list(Path(category_path).glob(\"*.jpg\"))\n df, current_id = process_image(\n df, current_id, encoder, current_category_name, images_in_category,output_images_path, resized_image_shape,transformations\n )\n\n df.to_csv(csv_file_path, index=False, quotechar='\"', encoding='ascii')\n\n print(\"done, processed\", len(df), \"images\")", "def process_image(self, **kwargs):\n try:\n img = self.current_image\n\n if self.is_vis:\n result = self._process_job_vis(img, **kwargs)\n elif self.is_nir:\n result = self._process_job_nir(img, **kwargs)\n elif self.is_fluo:\n result = self._process_job_fluo(img, **kwargs)\n else:\n raise NotImplementedError\n\n except Exception as e:\n print(\n 'Failed to process image: \"{}\", because \"{}\"'.format(\n self.file_path, repr(e)\n )\n )\n self.print_images()\n return False\n\n self.print_images()\n\n return result", "def _process_images(\n raw_image_paths: pd.Series,\n raw_images_dir: str,\n ROI_definitions: Dict[str, Tuple],\n flat_field_filepath_or_none: Union[str, None],\n save_ROIs: bool,\n save_dark_frame_corrected_images: bool,\n save_flat_field_corrected_images: bool,\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n def _process_image_local(raw_image_path):\n \"\"\" Version of process_image with all of the local configuration variables packed in.\n Also encapsulates the opening of the image.\n \"\"\"\n return process_image(\n original_rgb_image=raw.open.as_rgb(raw_image_path),\n original_image_filepath=raw_image_path,\n raw_images_dir=raw_images_dir,\n ROI_definitions=ROI_definitions,\n flat_field_filepath_or_none=flat_field_filepath_or_none,\n save_ROIs=save_ROIs,\n save_dark_frame_corrected_image=save_dark_frame_corrected_images,\n save_flat_field_corrected_image=save_flat_field_corrected_images,\n )\n\n with ThreadPoolExecutor() as executor:\n # We want identical warnings to be shown only for the first image they occur on (the default),\n # but we also want subsequent calls to process_experiment to start with a fresh warning store\n # so that warnings don't stop showing after the first run.\n # catch_warnings gives us this fresh warning store.\n with warnings.catch_warnings():\n # process_image returns roi_summary_data df, image_diagnostics df -> this will be a list of 2-tuples\n roi_summary_data_and_image_diagnostics_dfs_for_files = list(\n tqdm(\n executor.map(_process_image_local, raw_image_paths),\n total=len(raw_image_paths),\n )\n )\n roi_summary_data_for_files, image_diagnostics_for_files = zip(\n *roi_summary_data_and_image_diagnostics_dfs_for_files\n )\n\n roi_summary_data_for_all_files = _stack_dataframes(roi_summary_data_for_files)\n image_diagnostics_for_all_files = _stack_serieses(image_diagnostics_for_files)\n\n return roi_summary_data_for_all_files, image_diagnostics_for_all_files", "def main(root_dir):\n # load annotations\n print('Loading instances and annotations...')\n captions_file = json.load(open('{}/annotations/captions_train2017.json'.format(root_dir), 'r'))\n categories_file = json.load(open('{}/annotations/instances_train2017.json'.format(root_dir), 'r'))\n print('Done.')\n\n # group categories by image\n image_categories = group_categories(categories_file)\n\n # group captions by image\n image_captions = group_captions(captions_file['annotations'])\n\n # get filename of each image\n image_file = get_filename(captions_file['images'])\n\n # assign each category an id.\n # we are not using the default ids given in the dataset because\n # the id ranges are not continuous.\n category_id, id_category = map_category_id(categories_file['categories'])\n \n # save parsed coco dataset\n save_dataset(image_categories, image_captions, image_file, category_id, id_category, root_dir)", "def __call__(self, images, targets):\n pass", "def uploadFile(self,path):\n\n response = requests.post('https://api.imagga.com/v1/content',\n auth=(self.apikey, self.secret),\n files={'image': open(path, 'r')})\n json_data = json.loads(response.text)\n uploadedData=json_data[u'uploaded'][0]\n resourceId=uploadedData[u'id']\n filename = uploadedData[u'filename']\n self.fileToIdMap[filename] = resourceId\n self.getTagsUsingId(resourceId)", "def process_images():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n image = unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key'])\n s3.download_file(input_bucket_name, image, image)\n process_json(image)\n upload_image(image)\n cleanup_files(image)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()", "def upload():\n\n # salvar imagem local ou no db\n if 'file' not in request.files:\n raise ImageUploadError\n\n file = request.files['file']\n if file.filename == '':\n raise ImageUploadError\n\n if file and _allowed_file(file.filename):\n try:\n filename = secure_filename(file.filename)\n filepath = os.path.join(UPLOAD_FOLDER, filename)\n file.save(os.path.join(filepath))\n except Exception as e:\n logger.error(\"Error: {0}\".format(e))\n raise internal_exception_handler(\"error to save image!\")\n\n # se não error, processar imagem usando opencv\n ft = FaceTagger()\n try:\n faces = ft.image_process(filepath)\n except Exception as e:\n logger.error(\"Error: {0}\".format(e))\n raise internal_exception_handler(\"process image failed\")\n\n # TODO: return image id (string) unique\n return jsonify({'data': {'faces': faces}}), 200\n else:\n return jsonify({'errors':\n {'file_error': ['file format not allowed, ' +\n 'only {0}'.format(\n ALLOWED_EXTENSIONS)]}}), 422", "def serve_files(self, image_files: List[Text]):\n if not self.sess:\n self.build()\n predictions = self.sess.run(\n self.signitures['prediction'],\n feed_dict={self.signitures['image_files']: image_files})\n return predictions", "def test_measurement_on_images(file_list):\n test_details_list = []\n for idx, file in enumerate(file_list):\n dict_results = {}\n image = cv2.imread(file)\n FaceDetector().run(image, dict_results)\n file_name = ntpath.basename(file)\n is_there_face = \"True\" in file_name\n test_details_list.append([file_name, is_there_face, dict_results[\"FaceDetector\"],\n is_there_face == dict_results[\"FaceDetector\"]])\n\n # print test results in a readable table format\n headers = ['File Name', 'Face Exist', 'Measurement Result', 'Test Result']\n print(tabulate(test_details_list, headers))", "def preprocess_images(images_dir, image_dims, logger):\n find_str = images_dir + '/**/*.jpg'\n images = glob.glob(find_str, recursive=True)\n num_samples = get_num_samples(images_dir)\n\n # Load in the already processed file list\n proc_list_path = images_dir + '/processed_list.txt'\n if os.path.isfile(proc_list_path):\n with open(proc_list_path) as f:\n proc_list = f.read().split('\\n')\n else:\n proc_list = []\n \n i = 1\n for image in images:\n image_name = image.split('/')[-1]\n if image not in proc_list:\n logger.info(\"Processing %s\", \" {} - {}/{}\".format(\n image_name, i, num_samples))\n try:\n processed_image = ImageCheck.check_and_crop(image)\n except (ImageCheck.ObjectMissingError,\n ImageCheck.WormMissingError,\n ImageCheck.MultipleWormsError,\n ImageCheck.TooBlurryError) as e:\n logger.info(\"Processing Error: %s\",\n \"Image at: \\n{} \\n Produced error: {} \\n Removing\"\n \" image\".format(image, e))\n os.remove(image)\n i = i + 1\n continue\n cv2.imwrite(image, processed_image)\n with open(proc_list_path, 'a') as f:\n f.write(image + '\\n')\n else:\n logger.info(\"Skipping %s\", \" {} (already processed) - {}/{}\".format(\n image_name, i, num_samples))\n i = i + 1", "def run(self, input_path, output_path):\n # read in data\n try:\n image = Image.open(input_path)\n except Exception:\n raise ValueError(\"invalid image file\")\n \n # data preprocessing\n img = self.preprocess(image)\n \n # perform inference\n output = self.model(img)\n \n # post process\n results = self.postprocess(output)\n \n # save output\n results = {'results': results}\n\n with open(output_path, 'w') as out:\n json.dump(results, out)", "def main():\n\n # this will analyze all files in the input_files directory\n for folder in [x for x in os.listdir(os.path.join(os.getcwd(), 'test_directory')) if os.path.isdir(os.path.join(os.getcwd(), 'test_directory', x))]:\n try:\n print(f'Creating GED_Repo for files in {folder}')\n g = GED_Repo([os.path.join(os.getcwd(), 'test_directory', folder, f) for f in os.listdir(os.path.join(os.getcwd(), 'test_directory', folder)) if f.endswith('.ged')])\n g.check_data()\n g.print_data()\n g.print_individuals()\n g.print_families()\n except ValueError as v:\n print(v)\n except FileNotFoundError as f:\n print(f)", "def test_http_classifier(self):\n \n msg = \"\"\n \n files = 0\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n\n self.addr = \"http://\" + self.Helpers.confs[\"cnn\"][\"api\"][\"server\"] + \\\n ':'+str(self.Helpers.confs[\"cnn\"][\"api\"][\"port\"]) + '/Inference'\n self.headers = {'content-type': 'image/jpeg'}\n\n for data in os.listdir(self.testing_dir):\n if os.path.splitext(data)[1] in self.valid:\n \n response = self.send_request(self.testing_dir + \"/\" + data)\n\n msg = \"\"\n if response[\"Diagnosis\"] == \"Positive\" and \"_1.\" in data:\n tp += 1\n msg = \"Acute Lymphoblastic Leukemia correctly detected (True Positive)\"\n elif response[\"Diagnosis\"] == \"Positive\" and \"_0.\" in data:\n fp += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly detected (False Positive)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_0.\" in data:\n tn += 1\n msg = \"Acute Lymphoblastic Leukemia correctly not detected (True Negative)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_1.\" in data:\n fn += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly not detected (False Negative)\" \n \n files += 1\n \n self.Helpers.logger.info(msg)\n print()\n time.sleep(7)\n \n self.Helpers.logger.info(\"Images Classifier: \" + str(files))\n self.Helpers.logger.info(\"True Positives: \" + str(tp))\n self.Helpers.logger.info(\"False Positives: \" + str(fp))\n self.Helpers.logger.info(\"True Negatives: \" + str(tn))\n self.Helpers.logger.info(\"False Negatives: \" + str(fn))", "def _iterate_over_files(self):\n stats = Statistics()\n\n args = arguments.Args()\n\n for file in args.files:\n\n if isimage(file):\n before_size = stats.calculate_before_optimization(file)\n\n puts(\"%s %s\" % (\n e(\"==>\"),\n os.path.basename(file))\n )\n\n if \"--lossy\" in args.flags:\n Optimize.lossy(file)\n if \"--lossless\" in args.flags:\n Optimize.lossless(file)\n after_size = stats.calculate_after_optimization(file)\n\n puts(\"%s %s (%s)\" % (\n p(\"<==\"),\n os.path.basename(file),\n s(after_size) if after_size < before_size else after_size\n ))\n\n stats.show_statistics()", "def main():\n print(\"For each image, type the new name of the file.\" +\n \" No extension necessary!\", end=\"\\n\\n\")\n file_list = input_path.glob(f\"*.{args.ext}\")\n plt.ion()\n\n for pic in file_list:\n img = io.imread(str(pic))\n img = rescale(img, 0.25)\n img = rotate(img, 90, resize = True)\n plt.draw()\n plt.pause(0.001)\n if args.vinyl:\n new_name = get_vinyl_name()\n else:\n print(\"\\n\")\n new_name = input(\n \"Please enter a new filename. Press [enter] to skip: \")\n if new_name:\n if not new_name.endswith(args.ext):\n new_name += \".\" + args.ext\n # io.imsave(output_path / new_name, img)\n shutil.copyfile(pic, output_path / new_name)\n if args.replace:\n os.remove(pic)", "def imageFileProcessor(path):\n # Show all files in RawCapturedPicture\n # ..., and get the completed path files\n img_paths = []\n for ea in o_tl.showAllFiles(path):\n img_paths.append(os.path.join(path, ea))\n\n # Empty face list\n faces = []\n # Empty ID list\n IDs = []\n\n # Looping through all the image paths and loading the IDs and the faces\n for each_path in img_paths:\n # Loading the image and converting it to gray scale\n pil_img = Image.open(each_path).convert('L')\n # Converting the PIL image into numpy array\n image_numpy = np.array(pil_img, 'uint8')\n # Getting the Id from the image\n Id = int(os.path.split(each_path)[-1].split(\"_\")[1])\n # Extract the face from the training image sample\n faces.append(image_numpy)\n IDs.append(Id)\n return faces, IDs", "def evaluate(model, dataset, append_submission, dataset_root):\n\n with open(os.path.join(dataset_root, dataset + '.json'), 'r') as f:\n image_list = json.load(f)\n\n print('Running evaluation on {} set...'.format(dataset))\n\n count_img=0\n for img in image_list:\n img_path = os.path.join(dataset_root, 'images', dataset, img['filename'])\n pil_img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(pil_img)\n x = preprocess_input(x)\n x = np.expand_dims(x, 0)\n output = model.predict(x)\n sys.stdout.write('\\r'+str(count_img/len(image_list))+' ')\n sys.stdout.flush()\n append_submission(img['filename'], output[0, :4], output[0, 4:])\n count_img+=1", "def main():\n\n experiment_config_path = _parse_input()\n all_experiments = read_experiments_config(experiment_config_path)\n\n for experiment_name, experiment_config in all_experiments.items():\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n results, model = perform_experiment(experiment_config)\n weights_file_name = save_model_weights(experiment_name, model)\n testing_layers_files = save_layers_logs(results['Layers Testing Output'], 'Testing')\n training_layers_files = save_layers_logs(results['Layers Training Output'], 'Training')\n\n results.pop('Layers Training Output')\n results.pop('Layers Testing Output')\n print(\"Testing Data Confusion Matrix\")\n print(np.array2string(results['Confusion Matrix']))\n results['Confusion Matrix'] = str(results['Confusion Matrix'].tolist())\n print(\"Experiment Results:\")\n print(json.dumps(results, indent=2, sort_keys=True))\n\n results_file = save_experiment_log(results, experiment_name)\n upload_to_s3([], [], [results_file], [weights_file_name], testing_layers_files + training_layers_files)", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "def main():\n\t# create output file\n\tif not os.path.exists(OUTPUT_PATH):\n\t\tos.makedirs(OUTPUT_PATH)\n\n\t# init model with pre-trained weights\n\tmodel = create_model()\n\n\tmodel.load_state_dict(torch.load(PATH_PYTORCH_WEIGHTS)['state_dict'])\n\tmodel.eval()\n\n\n\t# if GPU is enabled\n\tif USE_GPU:\n\t\tmodel.cuda()\n\tvideos = os.listdir(INPUT_PATH)\n\t# load and preprocess images in folder\n\tfor y in videos[numTraining:(numTraining+numValidation)]:\n\t\tif not os.path.exists(os.path.join(OUTPUT_PATH,y)):\n\t\t\tos.makedirs(os.path.join(OUTPUT_PATH,y))\n\t\t\tfor i, name in enumerate(os.listdir(os.path.join(INPUT_PATH,y))):\n\t\t\t\tfilename = os.path.join(INPUT_PATH,y,'{:04d}.jpg'.format(i+1))\n\t\t\t\timage_tensor, image_size = load_image(filename)\n\n\t\t\t\tif USE_GPU:\n\t\t\t\t\timage_tensor = image_tensor.cuda()\n\n\t\t\t\t# run model inference\n\t\t\t\tprediction = model.forward(image_tensor[None, ...]) # add extra batch dimension\n\n\t\t\t\t# get result to cpu and squeeze dimensions\n\t\t\t\tif USE_GPU:\n\t\t\t\t\tprediction = prediction.squeeze().data.cpu().numpy()\n\t\t\t\telse:\n\t\t\t\t\tprediction = prediction.squeeze().data.numpy()\n\n\t\t\t\t# postprocess\n\t\t\t\t# first normalize [0,1]\n\t\t\t\tprediction = normalize_map(prediction)\n\t\t\t\tsaliency = postprocess_prediction(prediction, image_size)\n\t\t\t\tsaliency = normalize_map(saliency)\n\t\t\t\tsaliency *= 255\n\t\t\t\tsaliency = saliency.astype(np.uint8)\n\t\t\t\t# save saliency\n\n\t\t\t\tcv2.imwrite(os.path.join(OUTPUT_PATH,str(y),name), saliency)\n\t\t\t\tprint(\"Processed image {} from video {}\".format(i+1,y), end=\"\\r\")\n\t\t\t\tsys.stdout.flush()", "def images(self, **kwargs):\n\n raise NotImplementedError", "def run(self,image, label, featureClasses, settings, enabledImageTypes,csvFile):\n print('Processing started')\n import time\n startTime = time.time()\n # grayscaleImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(imageNode.GetName()))\n grayscaleImage = sitk.ReadImage(image)\n #sitkUtils.PushToSlicer(label, labelNode.GetName(), overwrite=True, compositeView=2)\n labelsDict = {}\n if label:\n print(\"label={}\".format(label))\n labelsDict = self.prepareLabelsFromLabelmap(label, grayscaleImage, labelsDict)\n # if segmentationNode:\n # labelsDict = self.prepareLabelsFromSegmentation(segmentationNode, grayscaleImage, labelsDict)\n\n #self.featureValues = extractor.execute(grayscaleImage, labelImage, images, **kwargs)\n featuresDict = {}\n for l in labelsDict.keys():\n print(\"Calculating features for \"+l)\n try:\n featuresDict[l] = self.calculateFeatures(grayscaleImage,\n labelsDict[l],\n featureClasses,\n settings,\n enabledImageTypes)\n except:\n print('calculateFeatures() failed')\n traceback.print_exc()\n self.saveFeatures2CSVFile(featuresDict,csvFile)\n print(\"Completed\")\n endtime = time.time()\n print(\"totalTime={}\".format(endtime-startTime))\n # return featuresDict", "def createAverageImages(self):\n for grabber in self.grabbers:\n callsign = grabber[\"ID\"]\n callMatch = \"%s/%s*\" % (self.downloadFolder, callsign)\n fnameOut = \"%s/%s.%s.jpg\" % (self.averagesFolder, callsign, self.timeCode())\n cmd = \"convert %s -evaluate-sequence Mean %s\" %(callMatch, fnameOut)\n print(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def push_staged_images(self, copy_to_data=True):\n image_paths = self.get_staged_images()\n if len(image_paths) == 0:\n logger.info(\"No files to push.\")\n return\n\n index = self.get_index()\n next_index = LocalDataset.get_max_frame_index(index) + 1\n for path in image_paths:\n image_info = LocalDataset.get_image_info(path)\n if image_info is None:\n logger.error(f\"Skipping '{path}'\")\n continue\n\n md5 = image_info[\"md5\"]\n if not self.conservator.dvc_hash_exists(md5):\n self.upload_image(path, md5)\n else:\n logger.debug(f\"File '{path}' already exists on conservator, skipping\")\n\n # Dataset frames uploaded in this manner do not have a video.\n # They are considered \"loose\" frames and use the dataset's ID\n # as the video ID.\n video_id = index[\"datasetId\"]\n frame_id = self.conservator.generate_id()\n\n del image_info[\"filename\"]\n\n new_frame = {\n **image_info,\n \"datasetFrameId\": frame_id,\n \"isEmpty\": False,\n \"isFlagged\": False,\n \"annotations\": [],\n \"videoMetadata\": {\n \"frameId\": frame_id,\n \"videoId\": video_id,\n \"frameIndex\": next_index,\n },\n }\n index[\"frames\"].append(new_frame)\n logger.debug(f\"Added new DatasetFrame with id {frame_id}\")\n\n if copy_to_data:\n os.makedirs(self.data_path, exist_ok=True)\n\n # First copy it to the cache:\n cache_path = self.get_cache_path(md5)\n cache_dir = os.path.split(cache_path)[0]\n os.makedirs(cache_dir, exist_ok=True)\n logger.debug(f\"Copying file from '{path}' to '{cache_path}'\")\n shutil.copyfile(path, cache_path)\n\n # Then link to data path:\n filename = f\"video-{video_id}-frame-{next_index:06d}-{frame_id}.jpg\"\n data_path = os.path.join(self.data_path, filename)\n logger.debug(f\"Linking '{data_path}' to '{cache_path}'\")\n os.link(cache_path, data_path)\n\n next_index += 1\n\n with open(self.index_path, \"w\") as f:\n json.dump(index, f, indent=1, sort_keys=True, separators=(\",\", \": \"))\n with open(self.staging_path, \"w\") as f:\n json.dump([], f)", "def start_video_analysis(event, context): # This is the Lambda handler to kick off Rekognition job\n\n for Record in event['Records']: # There may have been more than one file uploaded\n Bucket = Record['s3']['bucket']['name']\n NewFileUploaded = urllib.parse.unquote_plus(Record['s3']['object']['key'])\n # Remove the \"+\" signs of url encoding and return original whitespaces to the filename\n start_label_detection(Bucket, NewFileUploaded)\n \n return", "def upload_files(self, context, instance_ref, bless_files):\n raise Exception(\"Uploading files to the image service is not supported.\")", "def execute(self, images_and_density_maps):\n raise NotImplementedError(\"execute method not implemented in the child class\")", "def save_uploadedfiles(uploadedfiles: list, foldername: str, process=True):\r\n # make sure the foldername exists\r\n if not os.path.exists(foldername+\"/\"):\r\n os.makedirs(foldername+\"/\")\r\n # Go over each file\r\n for i, file in enumerate(uploadedfiles):\r\n if process:\r\n # define the picture filename according to the counter i, png file for processing with GauGAN\r\n joined_path = os.path.join(foldername,\"%s.png\"%i)\r\n else:\r\n # define the picture filename according to the counter i, jpg file if processing is skipped\r\n joined_path = os.path.join(foldername,\"%s.jpg\"%i)\r\n # open the file\r\n resized = Image.open(file)\r\n # make sure to have the picture in 512X512 resolution\r\n resized = resized.resize((512,512))\r\n # save the file\r\n if process:\r\n resized.save(joined_path)\r\n else:\r\n resized.save(joined_path, \"JPEG\")\r\n return st.success(\"Files were saved successfully\")", "def model_processing(img):\n\n # assert isinstance(img, EmotionalImage)\n\n if str(img.name).find('json') > -1:\n return\n user = get_user(img.path + '/' + 'meta.json')\n filePath = img.path + '/' + img.name\n # print(\"---------------Processsing----------------\", img.name)\n\n features = extract_features(filePath)\n emotions = predict_emotions(features)\n uuid1 = uuid.uuid4()\n emImage = EmotionalImage(\n uuid1, img.name, img.path, features, emotions, \"\", \"\", \"\")\n user.images.append(emImage)\n # user.save()", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def load_batch(self, request, *args, **kwargs):\n try:\n # get a list of the files in the associated path\n base_path = self.request.user.profile.VideoExperiment_path\n file_list = listdir(base_path)\n # include only csv files\n file_list = [el[:-4] for el in file_list if ('.csv' in el) and ('sync' not in el)]\n # get a list of the existing file names (bonsai)\n existing_rows = [el[0] for el in VideoExperiment.objects.values_list('slug')]\n # for all the files\n for file in file_list:\n # check if the entry already exists\n if file.lower() in existing_rows:\n # if so, skip making a new one\n continue\n # get the data for the entry\n data_dict = parse_path_experiment(file, self, 'VideoExperiment_path')\n # get rid of the animal2 entry\n del data_dict['animal2']\n # of the screen one\n del data_dict['screen_path']\n # and of the motive one\n del data_dict['track_path']\n # check the paths in the filesystem, otherwise leave the entry empty\n for key, value in data_dict.items():\n # if the entry is already empty, don't check\n if data_dict[key] == '':\n continue\n if (isinstance(value, str)) and ('path' in key) and (not exists(value)):\n # print a warning\n print('Path not found for key %s and value %s' % (key, value))\n # clear the path\n data_dict[key] = ''\n\n # # if the tif file exists but the calcium_data file doesn't, log it in the notes\n # This is for when we didn't have calcium signal extraction as part of snakemake\n # if (data_dict['fluo_path'] == '') and (data_dict['tif_path'] != ''):\n # data_dict['imaging'] = 'no'\n # data_dict['notes'] += 'norois'\n # create the model instance with the data\n model_instance = VideoExperiment.objects.create(**data_dict)\n # get the model for the experiment type to use\n experiment_type = ExperimentType.objects.filter(experiment_name='Free_behavior')\n # add the experiment type to the model instance (must use set() cause m2m)\n model_instance.experiment_type.set(experiment_type)\n # save the model instance\n model_instance.save()\n\n return HttpResponseRedirect('/loggers/video_experiment/')\n except:\n print('Problem file:' + file)\n return HttpResponseBadRequest('loading file %s failed, check file names' % file)", "def __extract_images(self, images_file, labels_file, phase):\n images, max_size = self.__readImages(\n os.path.join(self.outdir, images_file))\n assert len(labels) == len(images), '%d != %d' % (\n len(labels), len(images))\n\n map_size = len(images) * 28 * 28 * 10\n env = lmdb.open(self.outdir, map_size=map_size)\n\n with env.begin(write=True) as txn:\n # txn is a Transaction object\n for i, image in enumerate(images):\n datum = annfab.utils.image_to_datum(image, labels[i])\n str_id = '{:08}'.format(i)\n\n # The encode is only essential in Python 3\n txn.put(str_id.encode('ascii'), datum.SerializeToString())", "def process_imgdir(self,imgdir):\n #Write images into resultdir\n resultdir = os.path.join(imgdir, 'results')\n #Read images from input dir\n inputdir = os.path.join(imgdir, 'inputs')\n shutil.rmtree(resultdir)\n os.mkdir(resultdir)\n #Read files from input images\n for fullname in os.listdir(inputdir):\n filepath = os.path.join(inputdir, fullname)\n if os.path.isfile(filepath):\n basename = os.path.basename(filepath)\n image = cv2.imread(filepath, cv2.IMREAD_COLOR)\n if len(image.shape) == 3 and image.shape[2] == 3:\n print('Processing %s ...' % basename)\n else:\n sys.stderr.write('Skipping %s, not RGB' % basename)\n continue\n #Extract haze from the scene and then save the image\n dehazed = self.get_scene_radiance(image)\n cv2.imwrite(os.path.join(resultdir, basename), dehazed)\n return os.path.join(resultdir, basename)", "def analysis():\n global prediction\n\n json_path = os.path.join(basedir, 'static', 'data', 'tmp_json')\n # csv_path = os.path.join(basedir, 'static', 'data', 'csv')\n # if not os.path.exists(csv_path):\n # os.mkdir(csv_path)\n\n if os.name == 'nt':\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf.dir'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf.dir'))\n else:\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf'))\n\n # Files exists\n if audio_file.is_file() and video_file.is_file():\n with shelve.open(os.path.join(json_path, 'facial_data.shlf')) as shelf:\n emotion_data = shelf['emotion_data']\n microexpression_data = shelf['micro_expression_data']\n blink_data = shelf['blink_data']\n\n with shelve.open(os.path.join(json_path, 'audio_data.shlf')) as shelf:\n mean_energy = shelf['mean_energy']\n max_pitch_amp = shelf['max_pitch_amp']\n vowel_duration = shelf['vowel_duration']\n pitch_contour = shelf['pitch_contour']\n\n else:\n emotion_data = None\n microexpression_data = None\n blink_data = None\n mean_energy = None\n max_pitch_amp = None\n vowel_duration = None\n pitch_contour = None\n\n # Training Files (choose one)\n # soc_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_soc.txt')\n # niko_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_niko.txt')\n # vero_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_vero.txt')\n\n # txt_file = soc_file\n\n # train_data = []\n\n # for cases where one parameter has more elements\n # for i in range(min(len(blink_data), len(microexpression_data), len(mean_energy))):\n # train_data.append(0)\n\n # train_file = open(txt_file)\n\n # for line in train_file:\n # index1 = int((int(line[4]) * 600) + ((int(line[5]) * 60) + (int(line[7]) * 10) + int(line[8])) / 2)\n # index2 = int((int(line[10]) * 600) + ((int(line[11]) * 60) + (int(line[13]) * 10) + int(line[14])) / 2)\n # if line[0] == 'F':\n # train_data[index1] = 1\n # train_data[index2] = 1\n\n # with open(os.path.join(csv_path, 'train.csv'), 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency',\n # 'False/True'])\n\n # # for cases where one parameter has more elements than another\n # for index in range(min(len(mean_energy), len(blink_data), len(microexpression_data))):\n # writer.writerow([index, microexpression_data[index], blink_data[index],\n # mean_energy[index], max_pitch_amp[index], vowel_duration[index], pitch_contour[index],\n # train_data[index]])\n\n # finalresults = [['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency' ]]\n final_results = []\n\n for index in range((min(len(mean_energy), len(blink_data), len(microexpression_data)))):\n final_results.append([microexpression_data[index], blink_data[index],\n mean_energy[index], max_pitch_amp[index], vowel_duration[index],\n pitch_contour[index]])\n\n prediction[0] = predict(final_results)\n\n return render_template('analysis.html', mean_energy=mean_energy, max_pitch_amp=max_pitch_amp,\n vowel_duration=vowel_duration, pitch_contour=pitch_contour, blink_data=blink_data,\n microexpression_data=microexpression_data, emotion_data=emotion_data)", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def upload_data(self):\n labeled_ids = self.get_labeled_ids()\n\n users = []\n users_ids = []\n\n activities = []\n last_activities = []\n\n trackpoints = []\n\n for root, dirs, files in os.walk(DATASET_PATH, topdown=True):\n path_parts = root.split(\"/\")\n if len(path_parts) < 4: # check if inside user folder\n continue\n user_id = path_parts[3]\n\n if user_id not in labeled_ids:\n continue\n\n if user_id not in users_ids:\n users_ids.append(user_id)\n users.append({\"id\": user_id, \"has_labels\": user_id in labeled_ids})\n\n if 'labels.txt' in files:\n last_activities = self.get_activities(user_id, root + \"/labels.txt\")\n activities.extend(last_activities)\n\n if 'Trajectory' in root:\n files.sort()\n for file_path in files:\n trackpoints.extend(self.get_trackpoints(root + \"/\" + file_path, last_activities))\n print(len(trackpoints))\n\n\n print(\"Uploading data\")\n self.insert_data_bulk(\"User\", users)\n print(\" > Users done\")\n self.insert_data_bulk(\"Activity\", activities)\n print(\" > Activities done\")\n self.insert_data_bulk(\"TrackPoint\", trackpoints)\n print(\" > TrackPoints done\")\n self.cursor.close()", "def webcam_submit():\n\n # Base64 string of image.\n pic_64 = request.form['file'].partition('base64,')[2]\n\n # Convert base64 string to bytes object.\n pic = base64.b64decode(pic_64)\n\n # Save bytes object to storage and predict.\n destination_filename = save_photo(pic)\n destination = os.path.join('app/static/img/tmp', destination_filename)\n pred_class, pred_idx, outputs = classify_photo(destination=destination)\n\n # If probability of classifying the image is less than 92%, ask user to\n # resubmit a different picture.\n if max(outputs) < 0.92:\n print(f\"{pred_class}: {max(outputs)}\")\n flash(\n \"We are unsure about What Those R. Please try another image.\",\n \"form-warning\"\n )\n return jsonify({\"redirect\": url_for('index')})\n\n else:\n return jsonify({\"results\":\n url_for('results',\n pred_class=str(pred_class).replace('_',\n ' '),\n pred_prob=round(max(outputs).item() * 100,\n 4),\n img=os.path.join(\n 'img/tmp',\n destination_filename)\n )\n })", "def upload():\n global local_filename\n global task_id\n head_group_id = current_app.config['HEAD_GROUP_ID']\n if request.method == 'POST':\n user_id = request.form['user_id']\n followed_group_id = request.form['group_id']\n if not followed_group_id:\n return jsonify({'code': 400, 'msg': 'group_id is required.'})\n elif not user_id:\n return jsonify({'code': 400, 'msg': 'user_id is required.'})\n\n if 'photo' not in request.files:\n return jsonify({'code': 400, 'msg': 'photo is required.'})\n head_pic = request.files['photo']\n\n try:\n # 1、校验组是否存在\n group = get_group(head_group_id)\n if not group:\n return jsonify({'code': 400, 'msg': 'can not found group by id [{0}]'.format(head_group_id)})\n photos = UploadSet('photos', IMAGES)\n configure_uploads(current_app, photos)\n filename = photos.save(head_pic)\n local_filename = photos.path(filename)\n oss_dir = group[1]\n oss_filename = '{0}{1}'.format(group[1], filename)\n if not oss_dir.endswith(\"/\"):\n oss_filename = '{0}/{1}'.format(group[1], filename)\n\n # 2、获得图片的base64编码\n photo_base64 = base64.b64encode(open(local_filename, 'rb').read())\n encode_str = str(photo_base64, 'utf-8')\n\n # 3、通过第三方接口获得图片的特征值\n ret = r.post(current_app.config['GET_FEATURE_URL'], data=json.dumps({'image': encode_str}),\n headers={'content-type': 'application/json'})\n response = json.loads(ret.text)\n\n if 'body' in response:\n body = response['body']\n # 4、上传图片到oss\n oss_url = upload_file(oss_filename, local_filename)\n # 5、保存photo数据到db\n head_pic = get_image_by_muiti_condition(filename, oss_filename, int(head_group_id))\n global head_pic_id\n if not head_pic:\n head_pic_id = add_image(filename, oss_filename, int(head_group_id), json.dumps(body))\n else:\n head_pic_id = head_pic[0]\n update_image(filename, oss_filename, int(head_group_id), json.dumps(body), head_pic_id)\n # 6、保存用户、头像图片和关注组的关系\n face = get_face_by_user_id_and_grou_id(user_id, followed_group_id)\n if not face:\n add_face(user_id, followed_group_id, head_pic_id)\n else:\n update_face(user_id, followed_group_id, head_pic_id, face[0])\n # 7、添加一个相似图片查找任务,使用用户头像去所关注的组中找相似的图片,结果缓存到结果表\n task_id += 1\n Thread(target=find_similar_task,\n args=(current_app._get_current_object(), \"face_%s\" % task_id, followed_group_id, head_pic_id,)\n ).start()\n\n # 8、返回头像保存结果\n return jsonify(\n {'code': 200, 'image_id': head_pic_id, 'url': oss_url,\n 'url_express': current_app.config['OSS_URL_EXPIRES'],\n 'msg': 'modify head image success.'})\n else:\n return jsonify(response)\n except Exception as e:\n traceback.print_exc()\n return jsonify({'code': 500, 'error': '{0}'.format(e)})\n finally:\n # 6、删除临时图片\n try:\n if os.path.isfile(local_filename):\n os.remove(local_filename)\n except FileNotFoundError:\n print(\"delete not exits file\")\n except Exception:\n traceback.print_exc()\n else:\n return jsonify({'code': 400, 'msg': 'upload image failed.'})", "def test_postImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n testBot.run() #THEN the bot should save images from the group to the imgs folder\n self.assertTrue(len(os.listdir('./imgs')) > 0) #AND there should be at least one image in the folder", "def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def analyze_files(self):\n # Reset the analyze file results\n self.analzye_results.clear()\n self.filesListWidget.clear()\n\n # Update the progress bar\n self.scanFilesProgressBar.setValue(0)\n self.scanFilesProgressBar.setMaximum(len(self.selected_files))\n\n # Reset the index\n self.analyze_file_index = 0\n\n # Start the analyzing\n # This will start the thread\n # When this thread is complete, the signal will be emitted\n # A and new thread will be created and started\n # This is to prevent all the threads starting at once\n if len(self.selected_files) > 0:\n self.analyze_file(self.selected_files[0])" ]
[ "0.6907992", "0.6606996", "0.65919435", "0.6470824", "0.6414355", "0.6368806", "0.6355245", "0.63245857", "0.6265804", "0.62025523", "0.62011194", "0.6152564", "0.6149888", "0.61378866", "0.61241436", "0.608345", "0.60723394", "0.606071", "0.606038", "0.6033568", "0.6033372", "0.60208344", "0.6011238", "0.60009104", "0.59986246", "0.5977645", "0.59569484", "0.5935017", "0.59187293", "0.58748215", "0.5867482", "0.58671016", "0.58591974", "0.5846389", "0.5838646", "0.5835147", "0.5834258", "0.5819677", "0.5819601", "0.5818455", "0.5808703", "0.5808633", "0.5807695", "0.5796048", "0.5795814", "0.57935447", "0.579323", "0.5770964", "0.57653344", "0.5761508", "0.57341146", "0.5731737", "0.57285637", "0.5714669", "0.5714029", "0.5713882", "0.56947434", "0.56890714", "0.56828684", "0.56809247", "0.5674022", "0.5673026", "0.5671756", "0.56702286", "0.5662069", "0.56449896", "0.56363344", "0.5636133", "0.56166804", "0.5615678", "0.5615218", "0.5610749", "0.56017953", "0.5599583", "0.55987996", "0.5584171", "0.5573594", "0.5570759", "0.5564121", "0.5562329", "0.55621034", "0.5558031", "0.55553144", "0.5555206", "0.55527925", "0.5550967", "0.55482507", "0.55452895", "0.5544375", "0.55358744", "0.552857", "0.55277085", "0.55136174", "0.5513494", "0.5499478", "0.54976684", "0.54919946", "0.5489994", "0.5489862", "0.5488257" ]
0.76435435
0
Download multiple processed images in a zip archive.
def download_multiple(select_files, savepath, id, ext): with zipfile.ZipFile(savepath + '/processed_images.zip', mode='w') as zf: for file in select_files: pro_img, _, _, _, _ = get_image_pair(file, id) output = io.BytesIO() pro_img.save(output, format=ext) filename = file + '.' + ext zf.writestr(filename, output.getvalue())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1", "def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)", "def download_images(pages):\n try:\n pool = Pool(conf.MAX_PROCESS)\n pool.map_async(get_image_from_page, pages)\n pool.close()\n pool.join()\n except:\n pool.close()\n pool.join()", "def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)", "def download_files(self):", "def download_images(src_dir, dest_dir):\n # +++your code here+++\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n res=utility(src_dir)\n k=0\n f=file(dest_dir+\"/\"+\"index.html\", 'w')\n f.write(\"<html><body>\")\n for i in res:\n local_name='image'+str(k)\n print \"downloading image%d\" %(k)\n urllib.urlretrieve(i, os.path.join(dest_dir, local_name))\n f.write(\"<img src=\"+'\"'+os.path.join(dest_dir, local_name)+'\"'+\">\")\n k+=1\n f.write(\"</body></html>\")\n f.close()\n cmd=\"xdg-open\"+\" \"+'\"'+dest_dir+\"/\"+\"index.html\"+'\"'\n (status, output)=commands.getstatusoutput(cmd)\n sys.exit(1)", "def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path", "def download_images(img_urls, dest_dir):\n if not os.path.exists(dest_dir):\n # If the directory doesn't exist, create it\n os.mkdir(dest_dir)\n count = 0\n img_string = ''\n # Copies each file from the url provided to the directory provided\n for file in img_urls:\n new_filename = '{}/img{}.jpg'.format(dest_dir, count)\n print \"Retrieving {}\".format(file)\n urllib.urlretrieve(file, new_filename)\n img_string += \"<img src = 'img{}.jpg'>\".format(count)\n count += 1\n print \"Retrieved {} files\".format(count)\n # Creates an html file to display the completed image\n with open('{}/index.html'.format(dest_dir), 'w') as f:\n f.write(\n '<html>\\n<body>\\n{}\\n</body>\\n</html>'.format(img_string)\n )\n pass", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def download_images(img_urls, dest_dir):\n # +++your code here+++\n (errcode, statusmsg) = check_create_dir(dest_dir)\n if errcode:\n print statusmsg\n sys.exit(errcode)\n else: print statusmsg\n # retrieve images and generate html code for files\n html_str = '<html>\\n<body>\\n' # opening html file tags\n i = 0\n for img in img_urls:\n img_filename = 'img' + str(i)\n full_filepath = os.path.join(dest_dir, img_filename) \n print 'Retrievieng ' + img + ' to ' + full_filepath + ' file..'\n urllib.urlretrieve(img, full_filepath)\n html_str += '<img src=\\\"' + img_filename + '\\\">'\n i += 1\n html_str += '\\n</html>\\n</body>' # closing html file tags\n # create html file\n html_filename = os.path.join(dest_dir, 'index.html')\n f = open(html_filename, 'w')\n f.write(html_str) \n f.close()\n print 'File ' + html_filename + ' was created.'", "def get_zipped_images(self, num_sequence=None):\n self._create_pdf(self.survey, self.response)\n self._build_image_names(num_sequence, self._page_count)\n self._create_index()\n self._build_zip()\n return self.zip", "def run(self):\n urls_to_download = self._get_links()\n results = ThreadPool(8).imap_unordered(self._download_url, urls_to_download)\n for path in results:\n print(path)", "def _download_images(self, image_urls: typing.List[str], save_dir: str) -> typing.List[str]:\n\n\t\timage_paths = []\n\n\t\tfor i, url in enumerate(image_urls):\n\t\t\timage = self.send_request_image(url)\n\n\t\t\timage_ext = url.split(\".\")[-1]\n\n\t\t\timage_dst_path = os.path.join(save_dir, f\"{i}.{image_ext}\")\n\n\t\t\tif image is not None:\n\t\t\t\twith open(image_dst_path, \"wb\") as fh:\n\n\t\t\t\t\t# Magic boolean which makes it work\n\t\t\t\t\timage.raw.decode_content = True\n\n\t\t\t\t\t# noinspection PyBroadException\n\n\t\t\t\t\t# Attempt to download the image from the URL\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfileobj(image.raw, fh)\n\n\t\t\t\t\t# We should reduce the scope\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\t# We downloaded the image without any errors\n\t\t\t\t\telse:\n\t\t\t\t\t\timage_paths.append(image_dst_path)\n\n\t\treturn image_paths", "def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames", "def download_photos(urls, folder=''):\n folder_path = os.path.join('photos', folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n for url in urls:\n image = requests.get(url)\n filename = os.path.join(folder_path, url.split('/')[-1])\n with open(filename, 'wb') as f:\n f.write(image.content)", "def download_and_unzip(self, file_list):\n self.logger.logMsg(\"Starting Download and unzip files\")\n rtn = True\n if not len(file_list):\n self.logger.logError('Nothing to Download Return ')\n raise Exception('Nothing to Download')\n else:\n for file in file_list:\n try:\n self.logger.logMsg(\"Downloading {}\".format(file))\n with urlopen(file) as zipresp:\n with ZipFile(BytesIO(zipresp.read())) as zfile:\n zfile.extractall(self.download_path)\n except Exception as e:\n self.logger.logError(\"Error {} Downloading/Unzipping {}\".format(str(e), file))\n rtn = False\n if not rtn:\n self.logger.logError(\"Error Download and unzip files\")\n raise Exception('Failed to Download/Unzip one or More Files')\n\n self.logger.logMsg(\"Finished Download and unzip files\")", "def download(request):\n \n\n def make_archive(source, destination):\n print(source, destination)\n base = os.path.basename(destination)\n name = base.split('.')[0]\n format = base.split('.')[1]\n archive_from = os.path.dirname(source)\n archive_to = os.path.basename(source.strip(os.sep))\n print(source, destination, archive_from, archive_to)\n shutil.make_archive(name, format, archive_from, archive_to)\n shutil.move('%s.%s' % (name, format), destination)\n\n user_id = request.session['user_id']\n user_root = request.session['user_root']\n search_id = request.session['search_id']\n logger = Logger(user_root,user_id)\n logger.write(\"start compressing images..\")\n t_start_zip=time.time()\n zip_target = os.path.join(user_root, search_id)\n zip_path = os.path.join(user_root, search_id, \"Color_images.zip\")\n make_archive(zip_target, zip_path)\n print(\"finish zip.\")\n zip_file = open(zip_path, '+rb')\n response = HttpResponse(zip_file, content_type='application/zip')\n response[\n 'Content-Disposition'] = 'attachment; filename=%s' % \"dataset.zip\"\n response['Content-Length'] = os.path.getsize(zip_path)\n zip_file.close()\n logger.write(\"compressing images finished (\"+convert_duration_time(time.time(),t_start_zip)+\"s)\")\n\n return response", "def download_imgs(img_urls, outfolder):\n \n print \"Downloading %d images from: \" %len(img_urls), url\n \n for image in img_urls:\n filename = image.split('/')[-1]\n outpath = os.path.join(outfolder, filename)\n img_url = urljoin(url, image)\n try:\n urlretrieve(image, outpath)\n print img_url, \"downloaded successfully.\"\n \n except IOError:\n print \"Failed to download file:\", img_url\n pass", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched", "def download_images(keyword, limit = 1):\n #creating list of arguments\n arguments = {\"keywords\": keyword ,\n \"limit\": limit , \n \"print_urls\": False,\n \"output_directory\": OUT_DIR} \n\n # Pass the arguments to above function and download images\n paths = response.download(arguments)", "def download_images_jpg(self):\n self.show_as_waiting(True)\n self.download_images('JPEG')\n self.show_as_waiting(False)", "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download_artifacts(token, artifacts):\n zipfiles = []\n for a in artifacts:\n updated_at = datetime.fromisoformat(a[\"updated_at\"][:-1])\n datename = a[\"name\"]+updated_at.strftime(\"-%Y-%m-%d\")\n filename = datename + \".zip\"\n if os.path.exists(filename):\n zipfiles.append((a[\"name\"], datename, filename))\n print(f\"{filename} skipped, already downloaded\")\n continue\n\n print(f\"Fetching {filename}\")\n ok = run_curl(token, a[\"archive_download_url\"], filename)\n if not ok:\n continue\n\n zipfiles.append((a[\"name\"], datename, filename))\n\n return zipfiles", "def download(query, destination='', max_items=None):\n destination = os.path.join(destination, query)\n eol_id = search(query)\n urls = []\n for idx, url in enumerate(get_images(eol_id)):\n filepath = os.path.join(destination, str(idx))\n data.download_image(url, filepath)\n print(idx)\n if max_items and idx >= max_items:\n break", "def download(root: str) -> None:\n for ix in [1, 2]:\n fn = f\"lizard_images{ix}.zip\"\n url = f\"https://warwick.ac.uk/fac/cross_fac/tia/data/lizard/{fn}\"\n SimpleDownloader.download(url, root)\n\n url = \"https://warwick.ac.uk/fac/cross_fac/tia/data/lizard/lizard_labels.zip\"\n SimpleDownloader.download(url, root)\n LizardDataModule.extract_zips(root, rm=True)", "def process_workers(worker_id: int, future: Future, params: DownloadCommandParameters) -> None:\n # Wait for the result.\n res = future.result()\n # If response is empty, let the user know.\n if not res:\n logger.error(f\"No data in response.\")\n raise ValueError(\"Issue downloading images.\")\n else:\n download_file = os.path.join(params.output, f\"download_{worker_id}.zip\")\n with open(download_file, \"wb\") as stream:\n for item in res:\n stream.write(item)", "def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)", "def download_images_tiff(self):\n self.show_as_waiting(True)\n self.download_images('TIFF')\n self.show_as_waiting(False)", "def download_images(img_urls, dest_dir):\n # Creating the directory if the directory does not already exist\n if not os.path.exists(str(dest_dir)):\n os.mkdir(dest_dir)\n print ('Retrieving...')\n with open(str(dest_dir) + '/index.html', 'w') as f:\n f.write(\"<html>\\n<body>\\n\")\n for index, url in enumerate(img_urls):\n img_name = 'img' + str(index + 1)\n urllib.urlretrieve(\"https://code.google.com\" + url, filename=str(dest_dir) + '/'\n + img_name +'.jpg')\n print ('Downloaded ' + url[-10:] + \": \" + \\\n str(index + 1) + \" images downloaded\")\n\n f.write(\"<img src=\" + '\"' + img_name +\".jpg\" +'\">')\n f.write(\"\\n</html>\\n</body>\")\n print ('Download Complete!')\n pass", "def download_images(img_urls, dest_dir):\n if len(img_urls) > 0 :\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n # save each images file name\n image_names = []\n # Iterate over each image url, downloading the image to a local file\n img_ctr = 0\n for url in img_urls :\n file_name = 'img' + str(img_ctr) + '.jpg'\n image_names.append(file_name)\n full_name = dest_dir + '/' + file_name\n print('Writing file: %s from %s' % (full_name, url) )\n # When calling the SSLContext constructor directly, CERT_NONE is the default.\n # Since it does not authenticate the other peer it can be insecure\n # Beyond the scope of this exercise (emoji holding my nose)\n unsecure_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n with urllib.request.urlopen(url, context=unsecure_context) as response, open(full_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n img_ctr += 1\n return image_names", "def download_images_png(self):\n self.show_as_waiting(True)\n self.download_images('PNG')\n self.show_as_waiting(False)", "def download(urls, dest_folder):\n pass", "def test_z_download_images(self):\n #img_urls = logpuzzle.read_urls('place_code.google.com')\n img_urls = logpuzzle.read_urls('animal_code.google.com')\n dest_dir = './puzzle_images'\n logpuzzle.download_images(img_urls, dest_dir)\n\n result = os.listdir(dest_dir)\n expected_result = ['img0.jpg', 'img1.jpg', 'img10.jpg', 'img11.jpg', 'img12.jpg', 'img13.jpg', 'img14.jpg', 'img15.jpg', 'img16.jpg', 'img17.jpg', 'img18.jpg', 'img19.jpg', 'img2.jpg', 'img3.jpg', 'img4.jpg', 'img5.jpg', 'img6.jpg', 'img7.jpg', 'img8.jpg', 'img9.jpg']\n self.assertEqual(expected_result, result,\n 'write_index_file() expected {} but got {}'.format(expected_result, result))", "def main():\n for i, url in enumerate(opts.thread, start=1):\n opts.archived_md5 = reload_archive()\n thread = DownloadableThread(i, url)\n thread.resolve_path()\n asyncio.run(thread.download(), debug=False)", "def download_images(self, url_file, destination_dir, log_file):\n try:\n self._download_images(url_file, destination_dir, log_file)\n except IOError as error:\n sys.stderr.write(str(error))\n sys.exit(error.errno)\n except Exception as error:\n sys.stderr.write('[Unknown error] %s' % str(error))\n sys.exit(1)", "def download(urls: List[str], num_threads: int = 40) -> List[str]:\n\n num_files = len(urls)\n start = perf_counter()\n\n print(\"Starting download of %s files . . .\" % num_files)\n\n results = multiprocess(urls, Downloader, num_threads=num_threads)\n\n dur = perf_counter() - start\n print(\"Completed download of %s files after %.3f seconds.\" % (num_files, dur))\n\n return results", "def download_card_images(self, card_names, lang=\"en\"):\n for card_name in card_names:\n print(\"Dowloading card imgs for \\'\" + card_name + \"\\' (\" + lang + \")\")\n output_file_name = card_name + \".jpg\"\n output_file_path = IoManager.CARD_IMAGES_PATH_EN + \"/\" + output_file_name if lang == \"en\" else IoManager.CARD_IMAGES_PATH_FR + \"/\" + output_file_name\n output_file_path = output_file_path.replace('//', '__')\n en_url, fr_url = self.get_card_urls(card_name)\n url = en_url if lang == \"en\" else fr_url\n # Open the url image, set stream to True, this will return the stream content.\n resp = requests.get(url, stream=True)\n # Open a local file with wb ( write binary ) permission.\n local_file = open(output_file_path, 'wb')\n # Set decode_content value to True, otherwise the downloaded image file's size will be zero.\n resp.raw.decode_content = True\n # Copy the response stream raw data to local image file.\n shutil.copyfileobj(resp.raw, local_file)\n # Remove the image url response object.\n del resp", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def download_images(img_urls, dest_dir, base_url=\"http://code.google.com\"):\n create_dir(dest_dir)\n img_tags = fetch_call(img_urls, dest_dir)\n create_html(dest_dir, img_tags)", "def download_extracted_files(a1000):\n hash_value = demisto.getArg('hash')\n try:\n response = a1000.download_extracted_files(hash_value)\n except Exception as e:\n return_error(str(e))\n\n filename = hash_value + '.zip'\n command_results = CommandResults(\n readable_output=f\"## ReversingLabs A1000 download extraced files \\nExtracted files are available for download \"\n f\"under the name {filename}\"\n )\n\n file_result = fileResult(filename, response.content, file_type=EntryType.FILE)\n\n return [command_results, file_result]", "def download_images(self, im_format: str):\n rows = self.tbl_images.get_selected_rows()\n ids = []\n names = []\n for r in rows:\n ids.append(self.tbl_images.item(r, 0).text())\n names.append(self.tbl_images.item(r, 1).text())\n\n if len(ids) == 1:\n\n # Create File Save Dialog\n dialog = QFileDialog(parent=self, caption='Save As..')\n\n dialog.setMimeTypeFilters([\"image/\"+im_format.lower()])\n dialog.setFileMode(QFileDialog.AnyFile)\n\n if dialog.exec_() == QDialog.Accepted:\n filename = dialog.selectedFiles()[0]\n ret = api.get_download_images(ids, im_format, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_b = b64s_to_b(ret['data'])\n with open(filename, 'wb+') as f:\n f.write(image_b)\n\n elif len(ids) >= 1:\n\n # Create File Save Dialog\n dialog = QFileDialog(parent=self, caption='222Save As..')\n dialog.setMimeTypeFilters(['application/zip'])\n dialog.setFileMode(QFileDialog.AnyFile)\n\n if dialog.exec_() == QDialog.Accepted:\n filename = dialog.selectedFiles()[0]\n ret = api.get_download_images(ids, im_format, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_b = b64s_to_b(ret['data'])\n with open(filename, 'wb+') as f:\n f.write(image_b)\n else:\n return", "def download_images(params: DownloadCommandParameters, error_queue: Queue):\n api = IsicApi()\n\n # Max size of download set by ISIC API\n MAX_DOWNLOAD_SIZE = 300\n\n # Get all the image ids from the\n image_ids = get_image_ids(params)\n\n # Get all batches.\n image_batches = list(chunks(image_ids, MAX_DOWNLOAD_SIZE))\n\n # Measure the download progress for the full dataset.\n with alive_bar(len(image_batches), title=\"Total Progress\", enrich_print=False) as total_bar:\n # Run concurrent workers to download ìmages.\n with ThreadPoolExecutor(max_workers=params.workers) as executor:\n # Create a worker with a set of images to request and download.\n futures_to_request = {executor.submit(make_request, api, batch, params): batch for batch_idx, batch in enumerate(image_batches)}\n for index, future in enumerate(as_completed(futures_to_request)):\n try:\n process_workers(index, future, params)\n total_bar()\n except Exception as e:\n logger.error(f\"{e}\")\n error_queue.put(futures_to_request[future])", "def download_data_files(self, dest_directory):\n\t\tif not os.path.exists(dest_directory):\n\t\t\tos.makedirs(dest_directory)\n\t\tfilename = DATA_URL.split('/')[-1]\n\t\tfilepath = os.path.join(dest_directory, filename)\n\t\tif not os.path.exists(filepath):\n\t\t\tdef _progress(count, block_size, total_size):\n\t\t\t\tsys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n\t\t\t\t\t\tfloat(count * block_size) / float(total_size) * 100.0))\n\t\t\t\tsys.stdout.flush()\n\t\t\tfilepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n\t\t\tprint()\n\t\t\tstatinfo = os.stat(filepath)\n\t\t\tprint('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\t\textracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n\t\tif not os.path.exists(extracted_dir_path):\n\t\t\ttarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def download_all(self):\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n for website in self.website_list:\r\n self.download(website['id'])", "def fetch_files_from_urls(urls, dir):\n makedir(dir)\n try:\n pool = []\n for url in urls:\n p = Process(target=download, args=(url, dir,))\n p.start()\n pool.append(p)\n for p in pool:\n p.join()\n except KeyboardInterrupt:\n print \"Shutdown requested...exiting\"\n # except Exception:\n # traceback.print_exc(file=sys.stdout)\n\n # print(\"removing temporary files from current directory\")\n map(os.remove, glob.glob(\"*.tmp\"))", "def downloadMinio(url_list,list_d):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n name = \"-\".join(parser_arguments().classes)\n name = name.lower()\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n\n\n if r.status_code == 200:\n r.raw.decode_content = True\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n metadata = list_d[i]\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n path = os.getcwd()+'/'+filename # image path\n minioClient.fput_object(name,filename,path,'image/jpg',metadata)\n os.remove(filename)\n print(filename,'have been successfuly uploaded')\n print('Done!')", "def download(word, n_images=100):\n\n # Fields for pixbay from https://pixabay.com/api/docs/#api_search_images\n\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())\n\n for i in range(5):\n fields = {\n \"key\": _(s.__secret__, egg_open()),\n \"q\": word,\n \"image_type\": \"photo\",\n \"safesearch\": \"true\",\n \"per_page\": max(3, min(200, n_images + i))\n }\n\n debug_log(f\"fields for request:\\n{ {key: fields[key] for key in fields.keys() if key != 'key'} }\")\n\n r = http.request(method='GET',\n url='https://pixabay.com/api/',\n fields=fields)\n\n debug_log(f\"Response data: {r.data}\")\n\n if \"ERROR\" in str(r.data, 'utf-8'):\n continue\n else:\n break\n\n try:\n data = json.loads(r.data.decode('utf-8'))\n except json.decoder.JSONDecodeError as e:\n warnings.warn(\"Cannot download '{word}'. Bad response: {response}\".format(\n word=word,\n response=str(r.data, 'utf-8')\n ))\n return False\n\n image_urls = [item[\"largeImageURL\"] for item in data[\"hits\"]]\n image_ids = [item[\"id\"] for item in data[\"hits\"]]\n\n\n debug_log(f\"Image urls: {image_urls}\")\n debug_log(f\"Len Image urls: {len(image_urls)}\")\n\n save_dir = os.path.join(s.__STEP_1_CACHE_DIR__, word)\n os.makedirs(save_dir, exist_ok=True)\n\n if len(image_urls) < n_images:\n warnings.warn(\"Not enough images for {word}. Only {len_image_urls} instead of {n_images}.\".format(\n word=word,\n len_image_urls=len(image_urls),\n n_images=n_images\n ))\n open(os.path.join(save_dir, \"SATURATED\"), 'w').close()\n open(os.path.join(save_dir, \"DO_NOT_DELETE\"), 'w').close()\n\n image_paths = [get_unique_save_path_name(save_dir,\n im_id,\n im_url.split('.')[-1]) # Get the right image extension\n for im_id, im_url in zip(image_ids, image_urls)]\n\n debug_log(f\"Image paths: {image_paths}\")\n\n for i, im_url, im_path in zip(range(len(image_urls)), image_urls, image_paths):\n debug_log(f\"Downloading '{word}' image [{i+1}/{len(image_urls)}]: {im_url}\")\n save_file(im_url, im_path, http)\n debug_log(f\"Done! Saved as {im_path}\")\n\n return True", "def download_images(urlList):\n fileNumber = 1;\n fileName = \"\"\n\n # urlList[0] is just titles, so we start at 1\n for url in urlList[1:]:\n sys.stdout.write(\"\\rFile number %i of %i \" % (fileNumber+1, len(urlList)))\n\n sys.stdout.flush()\n\n try:\n fileName = str(fileNumber) + \".png\"\n # Download the file from `url` and save it locally under `fileName`:\n # I append png to the end of the file to \"make it\" png, but there's definitely a better way\n with urllib.request.urlopen(url) as response, open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.HTTPError:\n sys.stdout.flush()\n print(\"\\r %s is not a downloadable image. Skipping to next url...\" % url)\n \n fileNumber += 1;\n\n sys.stdout.write(\"\\r\\nDone!\")\n sys.stdout.flush()\n sys.stdout.write(\"\\r\\n\")", "def extract_images(self, images, callback=None):\n\n def _cb(*args):\n if callback:\n callback(*args)\n try:\n #recursively call fn to get remaining images if they exists\n image = images.next()\n self.extracted_images.append(image)\n\n # send extracted images to front-end for display\n self.extract_images(images, callback)\n self.attachment_count += 1\n _cb('attachment-count', self.attachment_count)\n\n except:\n #send images to front end\n self.output_images(self.extracted_images, callback)\n pass", "def download(all):\n print(\"Downloading\")", "def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)", "def download_image_data_from_s3(image_ids, prefix=\"\", num_threads=20):\n\n # simple method for threads to pull from a queue and download files\n def download_object(queue):\n while True:\n obj = queue.get()\n if obj is None:\n break\n obj.Object().download_file(obj.key.lstrip(prefix))\n queue.task_done()\n\n # create a directory to store downloaded metadata\n\n cwd = Path.cwd()\n data_dir = cwd / 'data'\n masks_dir = data_dir / 'masks'\n try:\n os.mkdir(masks_dir)\n except FileExistsError:\n shutil.rmtree(masks_dir)\n os.mkdir(masks_dir)\n os.chdir(masks_dir)\n\n # create a queue for objects that need to be downloaded\n # and spawn threads to download them concurrently\n download_queue = Queue(maxsize=0)\n workers = []\n for worker in range(num_threads):\n worker = Thread(target=download_object, args=(download_queue, ))\n worker.setDaemon(True)\n worker.start()\n workers.append(worker)\n\n # loop through the files in the bucket and filter for JSON metadata\n # files for only labeled images; add them to the queue\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(os.environ[\"LABELED_BUCKET_NAME\"])\n for obj in bucket.objects.filter(Prefix=prefix):\n if obj.key.endswith(\"_mask.png\"):\n image_id = obj.key.replace(prefix,'').replace(\"_mask.png\", '')\n if image_id in image_ids:\n download_queue.put(obj)\n\n # wait for the queue to be empty, then join all threads\n download_queue.join()\n for _ in range(num_threads):\n download_queue.put(None)\n for worker in workers:\n worker.join()\n\n # create a directory to store downloaded metadata\n labels_dir = data_dir / 'labels'\n try:\n os.mkdir(labels_dir)\n except FileExistsError:\n shutil.rmtree(labels_dir)\n os.mkdir(labels_dir)\n os.chdir(labels_dir)\n\n # create a queue for objects that need to be downloaded\n # and spawn threads to download them concurrently\n download_queue = Queue(maxsize=0)\n workers = []\n for worker in range(num_threads):\n worker = Thread(target=download_object, args=(download_queue, ))\n worker.setDaemon(True)\n worker.start()\n workers.append(worker)\n\n # loop through the files in the bucket and filter for JSON metadata\n # files for only labeled images; add them to the queue\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(os.environ[\"LABELED_BUCKET_NAME\"])\n for obj in bucket.objects.filter(Prefix=prefix):\n if obj.key.endswith(\"_labels.csv\"):\n image_id = obj.key.replace(prefix, '').replace(\"_labels.csv\", '')\n if image_id in image_ids:\n download_queue.put(obj)\n\n # wait for the queue to be empty, then join all threads\n download_queue.join()\n for _ in range(num_threads):\n download_queue.put(None)\n for worker in workers:\n worker.join()\n\n # create a directory to store downloaded metadata\n images_dir = data_dir / 'images'\n try:\n os.mkdir(images_dir)\n except FileExistsError:\n shutil.rmtree(images_dir)\n os.mkdir(images_dir)\n os.chdir(images_dir)\n\n # create a queue for objects that need to be downloaded\n # and spawn threads to download them concurrently\n download_queue = Queue(maxsize=0)\n workers = []\n for worker in range(num_threads):\n worker = Thread(target=download_object, args=(download_queue, ))\n worker.setDaemon(True)\n worker.start()\n workers.append(worker)\n\n # loop through the files in the bucket and filter for JSON metadata\n # files for only labeled images; add them to the queue\n s3 = boto3.resource(\"s3\")\n bucket = s3.Bucket(os.environ[\"LABELED_BUCKET_NAME\"])\n for obj in bucket.objects.filter(Prefix=prefix):\n file_extensions = [\".png\", \".jpg\", \".jpeg\"]\n for extension in file_extensions:\n if obj.key.endswith(extension):\n image_id = obj.key.replace(prefix, '').replace(extension, '')\n if image_id in image_ids:\n download_queue.put(obj)\n\n # wait for the queue to be empty, then join all threads\n download_queue.join()\n for _ in range(num_threads):\n download_queue.put(None)\n for worker in workers:\n worker.join()\n\n os.chdir(cwd)", "def post_download(self, remote_files):\n pass", "def _download(data_folder): # pragma: no cover\n\n logger.info(f\"Downloading {SOURCE_URL}.\")\n\n with urlopen(SOURCE_URL) as zipresp:\n with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:\n zfile.extractall(data_folder)", "def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths", "def _download_images(self, url_file, destination_dir, log_file):\n logger = self.setup_log(log_file)\n logger.info(config.LOG_INITIAL_MESSAGE % (url_file, destination_dir))\n\n with open(url_file) as urls:\n for i, l in enumerate(urls):\n pass\n bar = progressbar.ProgressBar(i + 1)\n\n download_count = 0\n\n # opening the url file and reading the urls\n with open(url_file, 'r') as urls:\n for i, url in enumerate(urls):\n bar.set(i)\n\n url = url.strip()\n components = urllib.parse.urlparse(url)\n if not (components.scheme and components.netloc and components.path):\n logger.error('%s: \"%s\"' % (config.LOG_URL_INVALID, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # check whether the robots.txt allows us to crawl this URL\n try:\n can_fetch = self.download_allowed(url, components.scheme, components.netloc)\n except (AttributeError, urllib.error.URLError, ValueError):\n logger.error('%s: %s' % (config.LOG_ERROR_ROBOTS, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # log that image download is disallowed\n if not can_fetch:\n logger.error('%s: %s' % (config.LOG_DISALLOWED, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # open image url\n try:\n url_response = urllib.request.urlopen(url)\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_OPENING, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # check whether the URL content is an image \n if url_response.info().get_content_maintype().lower() != config.IMAGE_MIMETYPE:\n logger.error('%s: %s' % (config.LOG_NOT_AN_IMAGE, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # retrieve the content and store in the destination directory\n os.makedirs(destination_dir, exist_ok=True) \n image_name = '%s_%s' % (download_count + 1, os.path.basename(url))\n with open(os.path.join(destination_dir, image_name), 'wb') as image_file:\n try:\n image_file.write(url_response.read())\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_DOWNLOADING, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # log download and increment the counter\n logger.info('%s %s, url: %s' % (config.LOG_DOWNLOADED, self.truncate_middle(image_name, config.MAX_FILE_NAME), self.truncate_middle(url, config.MAX_URL)))\n download_count += 1\n\n # set the progress bar to 100 percent and print a comment and new line for the returning prompt\n bar.complete('completed')\n\n # release the logger handles\n self.shutdown_log(logger)", "def download_gaia(dest_path, files):\n for f in files:\n # Get URL and file name\n file_url, file_name = f\n print(file_name)\n file_path = os.path.join(path, file_name)\n # Download data (if not already)\n if (not os.path.exists(file_path) and not os.path.isfile(file_path)):\n print(\"Downloading {}...\".format(file_name))\n response = urllib.request.urlopen(file_url)\n data = response.read()\n tar_gz = open(file_path, 'wb')\n tar_gz.write(data)\n tar_gz.close()\n # Be nice\n sleep(1)", "def download_artifacts(self):\n for artifact_set in self._sets:\n for artifact in artifact_set.artifacts:\n artifact.download(self._cache_dir)", "def download(size):\n files = glob(f'{size}_chunk/{FILE_BASE}_rdn_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_loc_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_obs_*[!.hdr]')\n\n if len(files) != 3:\n Logger.info('Downloading data')\n\n req = requests.get(URLS[size])\n if req:\n with zipfile.ZipFile(BytesIO(req.content)) as zip:\n zip.extractall()\n else:\n Logger.error(f'Failed to download {size}_chunk data with HTTP error code: {req.status_code}')\n\n # Try again\n files = glob(f'{size}_chunk/{FILE_BASE}_rdn_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_loc_*[!.hdr]') \\\n + glob(f'{size}_chunk/{FILE_BASE}_obs_*[!.hdr]')\n\n if len(files) != 3:\n Logger.error('Not all input files are found')\n return\n\n return files", "def download(self):\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url in self.resources:\n filename = url.rpartition('/')[2]\n download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=None)\n\n print('Processing...')\n\n training_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_train_valid.amat'))\n )\n test_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_test.amat'))\n )\n\n with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')", "def download_data(self):\n headers = {'User-Agent': 'Mozilla/5.0',}\n\n #Request for html data of url page\n r = requests.get(self.url, headers = headers, allow_redirects=True)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n #Checking if folder path exists, if not, creats it\n i=0\n while i<len(self.folder)-1:\n if self.folder[i] == '/':\n if not os.path.isdir(self.folder[:i]):\n os.mkdir(self.folder[:i])\n i+=1\n if i==len(self.folder)-1:\n if not os.path.isdir(self.folder):\n os.mkdir(self.folder)\n\n # if not os.path.isdir(self.folder):\n # os.mkdir(self.folder)\n\n #Gets every href to zip file with data\n entries = []\n for link in soup.find_all('a'):\n if re.search(\"^data/.*.zip\", link.get('href')):\n entries.append(link.get('href'))\n\n #Gets the newest dataset\n self.getCurrentData(entries)\n\n i=0\n #Saves each file in dataset\n for list in self.ListOfZipFiles:\n if not os.path.isfile(self.folder+list[4:]):\n r = requests.get(self.url+list)\n open(self.folder+list[4:], 'wb').write(r.content)\n #deletes prefix \"data/\"\n self.ListOfZipFiles[i] = list[4:]\n i+=1", "def zipfiles (downloadable, name):\n\n print \"compressing files. almost done.\"\n import zipfile\n for book in downloadable:\n if (os.path.exists(os.path.join(name, book[1]))):\n files = os.listdir(os.path.join(name, book[1]))\n cbz = zipfile.ZipFile(os.path.join(name, name + '-' + book[1] + '.cbz'), 'w')\n for file in files:\n cbz.write(os.path.join(name, book[1],file))\n cbz.close()", "def create_images(jsons_info, image_dir, photos_info_dict, num_images=200):\n for item in jsons_info:\n json_info = json.load(open(item, \"r\"))\n category_dir = os.path.join(image_dir, os.path.splitext(os.path.basename(item))[0])\n print(\"Downloading in -- \", category_dir)\n if not os.path.exists(category_dir):\n os.makedirs(category_dir)\n count = 0\n i = 0\n while count < num_images:\n photo_id = json_info[i][\"photo\"]\n link = photos_info_dict[f'{photo_id:09}']\n try:\n urllib.request.urlretrieve(link, f\"{category_dir}/{count}.jpg\")\n count = count + 1\n i = i + 1\n except:\n i = i + 1\n print(\"Image - Downloaded\")", "def download_list(urls, outdir=None, workdir=None, threads=3):\n pool = ThreadPool(threads)\n download_lambda = lambda x: download(x, outfile=outdir, workdir=workdir)\n pool.map(download_lambda, urls)", "def extract_files(self, *filenames):\n for filename in filenames:\n data = self.read_file(filename)\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()", "def download_images(main_keyword, supplemented_keywords, download_dir): \n image_links = set()\n print('Process {0} Main keyword: {1}'.format(os.getpid(), main_keyword))\n\n # create a directory for a main keyword\n img_dir = download_dir + main_keyword + '/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n for j in range(len(supplemented_keywords)):\n print('Process {0} supplemented keyword: {1}'.format(os.getpid(), supplemented_keywords[j]))\n search_query = quote(main_keyword + ' ' + supplemented_keywords[j])\n # url = 'https://www.google.com/search?q=' + search_query + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'\n url = 'https://www.google.com/search?q=' + search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n print('Process {0} get {1} links so far'.format(os.getpid(), len(image_links)))\n time.sleep(2)\n print (\"Process {0} get totally {1} links\".format(os.getpid(), len(image_links)))\n\n print (\"Start downloading...\")\n count = 1\n for link in image_links:\n try:\n req = urllib.request.Request(link, headers = {\"User-Agent\": generate_user_agent()})\n response = urllib.request.urlopen(req)\n data = response.read()\n file_path = img_dir + '{0}.jpg'.format(count)\n with open(file_path,'wb') as wf:\n wf.write(data)\n print('Process {0} fininsh image {1}/{2}.jpg'.format(os.getpid(), main_keyword, count))\n count += 1\n except urllib.error.URLError as e:\n logging.error('URLError while downloading image {0}\\nreason:{1}'.format(link, e.reason))\n continue\n except urllib.error.HTTPError as e:\n logging.error('HTTPError while downloading image {0}\\nhttp code {1}, reason:{2}'.format(link, e.code, e.reason))\n continue\n except Exception as e:\n logging.error('Unexpeted error while downloading image {0}\\nerror type:{1}, args:{2}'.format(link, type(e), e.args))\n continue\n\n print(\"Finish downloading, total {0} errors\".format(len(image_links) - count))", "def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)", "async def download_all_images(main_page):\n all_relevant_pages = [f'https://{main_page}']\n async with aiohttp.ClientSession() as session:\n subpages = await get_all_relevant_subpages(session, main_page)\n all_relevant_pages.extend(subpages)\n\n await scrape_pages(session, all_relevant_pages)\n\n logging.info('Images from main page %s and its sub pages were download', main_page)", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "async def extractimages(self, ctx):\n if self.extract_images_running:\n await ctx.send(inline('Extract images already running'))\n return\n\n event_loop = asyncio.get_event_loop()\n running_load = event_loop.run_in_executor(self.executor, self.do_extract_images)\n\n self.extract_images_running = True\n await ctx.send(inline('Running image extract pipeline: this could take a while'))\n await running_load\n self.extract_images_running = False\n await ctx.send(inline('Image extract finished'))", "def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)", "def download(self, scenes, bands):\n \n scene_urls = [ build_band_urls(scene, bands) for scene in scenes ]\n image_path = os.path.join(self.download_dir, scenes[0])\n os.makedirs(image_path)\n \n for bands in scene_urls:\n for band in bands:\n download(band, image_path)", "def fetch_images(client, images):\n return [fetch_image(client, image) for image in images]", "def download_images(self):\n # download the json for the thread\n self.download_json()\n\n # open the json file\n with self.file.open('r', encoding=\"utf-8\") as json_file:\n # load into data\n data = json.load(json_file)\n\n # grab the posts from\n posts = data[\"posts\"]\n\n # iterate through posts in the thread\n for post_num in range(len(posts)):\n # grab the current post\n post = posts[post_num]\n\n # try to get these attributes. may throw an error because not\n # all posts or replies have images attached\n try:\n # images should have these attributes\n tim = post[\"tim\"]\n ext = post[\"ext\"]\n width = post[\"w\"]\n height = post[\"h\"]\n desired_size = False\n\n # filename consists of \"tim.ext\"\n image_filename = str(tim) + str(ext)\n\n # set resolution based on bool arguments\n if self.sd:\n self.min_width = 720\n self.min_height = 480\n if self.hd:\n self.min_width = 1280\n self.min_height = 720\n if self.fhd:\n self.min_width = 1920\n self.min_height = 1080\n if self.uhd:\n self.min_width = 3840\n self.min_height = 2160\n\n # check if the image is the desired size\n if (height <= self.max_height and height >= self.min_height\n ) and (width <= self.max_width\n and width >= self.min_width):\n desired_size = True\n\n if desired_size:\n try:\n # request image variables\n image_url = self.images_endpoint + image_filename\n image_res = requests.get(image_url)\n image_content = image_res.content\n\n # local image variables\n image_string = str(self.images_path.absolute()) + \\\n \"\\\\\" + image_filename\n image_file = Path(image_string)\n\n # write to disk\n if self.verbose:\n print(\"Downloading\", image_url, \"to\",\n image_string, \"from thread\",\n self.thread_id, \"with a resolution of\",\n width, \"x\", height)\n with image_file.open(\"wb\") as im_file:\n im_file.write(image_content)\n except KeyboardInterrupt:\n sys.exit(1)\n\n except KeyError:\n pass", "def _create_zip_file(self) -> BytesIO:\n zip_file_io = BytesIO()\n with ZipFile(zip_file_io, 'w') as zip_file:\n for image_scraper_model in self._url_model.image_scraper.all():\n image_absolute_path = self._get_image_absolute_path(image_scraper_model)\n zip_file_image_path = self._get_zip_file_image_path(image_absolute_path)\n zip_file.write(image_absolute_path, zip_file_image_path)\n zip_file.close()\n return zip_file_io", "def zip_images(self, messages_to_save):\n\n s = StringIO.StringIO()\n\n try:\n with zipfile.ZipFile(s, mode='w') as zf:\n for message, some_images in messages_to_save.iteritems():\n for an_image in some_images:\n zf.writestr(an_image.name(), an_image.body())\n\n return True, zf\n\n except:\n zf.close()\n\n return False", "def save_images(images, save_dir, image_type):\n for image in images:\n raw_img = urllib2.urlopen(image).read()\n count = len([i for i in os.listdir(save_dir) if image_type in i]) + 1\n f = open(save_dir + '/' + image_type + '_' + str(count), 'wb')\n f.write(raw_img)\n f.close()", "def _download_all(update_path=True, verbose=None):\n\n # iterate over dataset\n for ds in dataset_list:\n # call download\n ds().download(update_path=True, verbose=verbose, accept=True)", "def _download_archive(self):\n _logger.debug('Downloading archive...')\n response = urlopen(self.url)\n\n with open(self._archive_full_path, 'wb') as archive_file:\n chunk_size = 1024 * 1024 # 1 MB\n chunk = response.read(chunk_size)\n\n while chunk:\n archive_file.write(chunk)\n chunk = response.read(chunk_size)\n\n _logger.debug('Archive {name} has been successfully downloaded.'.format(name=self.archive_name))", "def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response", "def download_miniatures(self, folder):\n miniaturesToDownload = []\n for episode in self._root.iter('Episode'):\n seasonNumber = int(episode.find('SeasonNumber').text)\n episodeNumber = int(episode.find('EpisodeNumber').text)\n imgpath = '%s/%02d-%02d.jpg' % (folder, seasonNumber, episodeNumber)\n urlmin = episode.find('filename').text\n if urlmin and not os.path.isfile(imgpath):\n miniaturesToDownload.append((self.URL_BANNER + urlmin, imgpath))\n\n n = 0\n nbMiniatures = len(miniaturesToDownload)\n for urlmin, imgpath in miniaturesToDownload:\n urllib.urlretrieve(urlmin, imgpath)\n yield n, nbMiniatures\n n += 1", "def get_images(outputdir, parent_key, key, searchurl, maximum, json_path):\n body, browser = build_browser(searchurl)\n\n urls = []\n\n while len(urls) < maximum:\n try:\n page_source = browser.page_source\n\n soup = BeautifulSoup(page_source, 'lxml')\n\n search_result_soup = get_div_child(soup.body, \"islrg\")\n images = search_result_soup.find_all('img')\n urls = get_url_from_images(images)\n print(urls)\n\n for i in range(50):\n scroll_down(body)\n # browser.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div')\n browser.find_element_by_class_name(\"mye4qd\").click()\n print(len(urls) < maximum)\n except ElementNotInteractableException as e: # There is no next page\n print(e)\n break\n\n\n\n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n\n write_urls(json_path, parent_key, key, urls)\n\n # download_urls(urls, outputdir)\n browser.close()", "def download(parser, args):\n\n successful_count = 0\n unsuccessful_count = 0\n big_errors = []\n small_errors = []\n total_download_count = 0\n validate_args(parser, args)\n\n # sets do not allow duplicates in a list\n ids = set(args.file_ids)\n for i in args.manifest:\n if not i.get('id'):\n log.error('Invalid manifest')\n break\n ids.add(i['id'])\n\n index_client = GDCIndexClient(args.server)\n client = get_client(args, index_client)\n\n # separate the smaller files from the larger files\n bigs, smalls = index_client.separate_small_files(ids, args.http_chunk_size)\n\n # the big files will be normal downloads\n # the small files will be joined together and tarfiled\n if smalls:\n log.debug('Downloading smaller files...')\n\n # download small file grouped in an uncompressed tarfile\n small_errors, count = client.download_small_groups(smalls)\n successful_count += count\n\n i = 0\n while i < args.retry_amount and small_errors:\n time.sleep(args.wait_time)\n log.debug('Retrying failed grouped downloads')\n small_errors, count = client.download_small_groups(small_errors)\n successful_count += count\n i += 1\n\n # client.download_files is located in parcel which calls\n # self.parallel_download, which goes back to to gdc-client's parallel_download\n if bigs:\n log.debug('Downloading big files...')\n\n # create URLs to send to parcel for download\n bigs = [ urlparse.urljoin(client.data_uri, b) for b in bigs ]\n downloaded_files, big_error_dict = client.download_files(bigs)\n not_downloaded_url = ''\n big_errors_count = 0\n\n if args.retry_amount > 0:\n for url, reason in big_error_dict.iteritems():\n # only retry the download if it wasn't a controlled access error\n if '403' not in reason:\n not_downloaded_url = retry_download(\n client,\n url,\n args.retry_amount,\n args.no_auto_retry,\n args.wait_time)\n else:\n big_errors.append(url)\n not_downloaded_url = ''\n\n if not_downloaded_url:\n for b in big_error_dict:\n big_errors.append(url)\n\n if big_errors:\n log.debug('Big files not downloaded: {0}'\n .format(', '.join([ b.split('/')[-1] for b in big_errors ])))\n\n successful_count += len(bigs) - len(big_errors)\n\n unsuccessful_count = len(ids) - successful_count\n\n msg = 'Successfully downloaded'\n log.info('{0}: {1}'.format(\n colored(msg, 'green') if not args.color_off else msg,\n successful_count))\n\n if unsuccessful_count > 0:\n msg = 'Failed downloads'\n log.info('{0}: {1}'.format(\n colored(msg, 'red') if not args.color_off else msg,\n unsuccessful_count))\n\n return small_errors or big_errors", "def download_train_images(force=False):\n if not os.path.exists(train_images_dir_path) or force:\n train_images_url = '{}/{}.zip'.format(data_url, train_images_name)\n os.makedirs(data_dir, exist_ok=True)\n zip_file_path = wget.download(train_images_url, out=data_dir)\n with zipfile.ZipFile(zip_file_path, 'r') as zip_file:\n zip_file.extractall(data_dir)", "def runDataExtraction():\r\n config = CONFIG['steps']['DataExtraction']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n columns = ci['columns']\r\n nrows = ci['nrows']\r\n input_bucket = ci['bucket']\r\n no_of_files = ci['no_of_files']\r\n\r\n output_bucket = co['bucket']\r\n csv_name_prefix = co['csv_name_prefix']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n zip_files = get_files(input_bucket, boto_client, file_type='zip')\r\n\r\n no_of_files_to_process = no_of_files if no_of_files is not None else len(\r\n zip_files)\r\n for zip_file in tqdm(zip_files[:no_of_files_to_process], total=no_of_files_to_process):\r\n process_file(zip_file, input_bucket, output_bucket, minioClient, columns,\r\n nrows=nrows, output_csv_name_prefix=csv_name_prefix)", "def regular_download(self) -> NoReturn:\n\n if not path.isdir(self.name):\n mkdir(self.name)\n\n for chapter in self.chapters.keys():\n\n chapter_folder = f\"{self.name}/{chapter}/\"\n curr_chapter = self.chapters[chapter]\n base_url = f\"{curr_chapter['server']}{curr_chapter['hash']}/\"\n\n if not path.isdir(chapter_folder):\n mkdir(chapter_folder)\n\n for image in curr_chapter[\"images\"]:\n\n image_url = f\"{base_url}{image}\"\n image_file = f\"{chapter_folder}{image}\"\n response = requests.get(image_url, headers={\"Connection\":\"close\"})\n\n if response and response.status_code == 200:\n with open(image_file, \"wb\") as img_file:\n img_file.write(response.content)\n else:\n print(f\"Error downloading chapter: {curr_chapter['num']} Image: {image}\")", "def collect_images_from_urls(url_filepath, target_folder, image_class_name):\n\n def get_img_from_url(index, url):\n \"\"\"Closure function invoked by each running downloading Thread\"\"\"\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass\n\n image_filename_prefix = '{name}_{counter}.{ext}'\n list_of_urls = list()\n with open(url_filepath) as url_file:\n for url in url_file:\n url = url.strip()\n list_of_urls.append(url)\n\n print('Collected {} total URLS'.format(len(list_of_urls)))\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as thread_pool:\n for idx, url in enumerate(list_of_urls):\n thread_pool.submit(get_img_from_url, idx, url)", "def download_all(conn, logger):\n # setup slices, 24 in total\n slices = [f'year{x}month{y}' for x in [2, 1] for y in range(12, 0, -1)]\n for slice in slices:\n download_intraday_extended(conn, logger, slice)", "def update_downloads(self, download_items):\n \n for item in download_items:\n rec_id = item.get('recordId')\n img = self.get_image(rec_id)\n item_id = item.get('itemId')\n if item_id is None:\n item_id = item.get('ParentItemId')\n img.set_metadata(item_id, 'itemId')\n img.set_metadata(item.get('dateSubmitted'), 'dateSubmitted')\n img.set_metadata(item.get('userDisplayName'), 'userDisplayName')\n img.set_metadata(item.get('status'), 'status')\n img.set_metadata(item.get('orderStatus'), 'orderStatus')\n img.set_metadata(item.get('orderMessage'), 'orderMessage')\n img.set_metadata(item.get('downloaded'), 'downloaded')\n img.set_metadata(item.get('downloadPaths'), 'downloadPaths')\n img.set_metadata(item.get('priority'), 'priority')\n params = item.get('parameters')\n if params is not None:\n for k, v in params.items():\n img.set_metadata(v, k)", "def download_all_files(self):\n self.server_handler.get_sensor_data_from_server()", "def download_many(\n package: Package,\n versions: Sequence[Union[LooseVersion, str]],\n dest: Path,\n cache: Cache,\n verbose: bool = False,\n) -> int:\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(async_download_many(package, versions, dest, cache))", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def download_files(service, file_list, out_path):\n total = len(file_list)\n for i, file_id in enumerate(file_list, 1):\n name = get_file(service, file_id)['title']\n print('Downloading {}... ({}/{}) [{}%]'.format(name, i, total,\n round(i / total * 100)))\n path = os.path.join(out_path, name)\n try:\n download_file(service, file_id, path)\n except errors.HttpError as error:\n os.remove(path) # Remove broken file\n print('Could not download file: {}'.format(error), file=sys.stderr)", "def download_data(files: page_iterator.HTTPIterator, folder: str) -> None:\n logging.info('File download Started... Wait for the job to complete.')\n\n # create folder locally if not exists\n if not os.path.exists(folder): os.makedirs(folder)\n\n for file in files:\n logging.info('GCS File: {}'.format(file.name))\n destination_uri = '{}/{}'.format(folder, file.name.split('/')[-1])\n file.download_to_filename(destination_uri if destination_uri.endswith('.csv') else destination_uri + '.csv')\n logging.info('Exported {} to {}'.format(file.name, destination_uri))\n\n return None", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def download_checkpoint(self, exec_count):\n threads = []\n #dirs = range(1,exec_count+1)\n dirs = [exec_count]\n if self.use_active_set == \"True\":\n dirs += ['as']\n for i in range(1,exec_count):\n os.makedirs(\"/tmp/pico_cache/{0}/{1}\".format(self.pico_id, i))\n for i in dirs:\n logger.info(\"Starting thread for \" + \"{0}\".format(i))\n threads.append(self._run_in_new_thread(self.send_cmd, [\"download {0}\".format(i)]))\n logger.info(\"Done starting thread for \" + \"{0}\".format(i))\n # self.send_cmd(\"download {0}\".format(i))\n\n logger.info(\"About to join on threads\")\n map(lambda x: x.join(), threads)\n logger.info(\"Done join on threads\")", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()" ]
[ "0.76530296", "0.72565246", "0.68648124", "0.673801", "0.6725581", "0.67049855", "0.67010486", "0.6687771", "0.6672859", "0.6647223", "0.6634884", "0.66337126", "0.66298825", "0.66295946", "0.65946233", "0.65826285", "0.6576", "0.65477145", "0.6529526", "0.6509803", "0.6500442", "0.64792365", "0.64549303", "0.64405966", "0.64404327", "0.64315933", "0.6425823", "0.64142114", "0.6404702", "0.6399654", "0.6392682", "0.63784844", "0.6356528", "0.635374", "0.634743", "0.63056207", "0.6294939", "0.6292156", "0.628484", "0.6280611", "0.6261579", "0.6242335", "0.62256944", "0.6221745", "0.6210409", "0.62070936", "0.6203799", "0.6197441", "0.616705", "0.61469716", "0.6127604", "0.612749", "0.6100252", "0.60993606", "0.6090094", "0.60710883", "0.6062478", "0.60618204", "0.60172474", "0.60110873", "0.6003334", "0.5996099", "0.59876436", "0.5985444", "0.59843636", "0.59837455", "0.59788465", "0.59779793", "0.5973548", "0.5969114", "0.5952853", "0.59404147", "0.593383", "0.592975", "0.589964", "0.5893015", "0.58872426", "0.5885396", "0.5883965", "0.58825386", "0.5872604", "0.58705586", "0.5862654", "0.58615416", "0.58527005", "0.5849587", "0.5843771", "0.5840475", "0.5825196", "0.5820228", "0.5806048", "0.5795566", "0.5786962", "0.5784216", "0.5783278", "0.5770344", "0.5746284", "0.57390475", "0.57320094", "0.57285374" ]
0.7829335
0
This method will store a MistAssociation object into mongodb after creating a MistAssociation with the same values as the Association provided. Secret will be encoded because it constantly produced errors with encoding.
def storeAssociation(self, server_url, association): mist_association = MistAssociation() mist_association.assoc_type = association.assoc_type mist_association.handle = association.handle.hex() mist_association.secret = association.secret.hex() mist_association.lifetime = association.lifetime mist_association.issued = association.issued mist_association.server_url = server_url mist_association.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storeAssociation(self, server_url, assoc):\n assoc = models.Association(url=server_url,\n handle=assoc.handle,\n association=assoc.serialize())\n assoc.put()", "def storeAssociation(self, server_url, association):\r\n entity = datastore.Entity('Association')\r\n entity['url'] = server_url\r\n entity['handle'] = association.handle\r\n entity['association'] = association.serialize()\r\n datastore.Put(entity)", "def createAssociation(self, dumb=True, assoc_type='HMAC-SHA1'):\n secret = cryptutil.getBytes(getSecretSize(assoc_type))\n uniq = oidutil.toBase64(cryptutil.getBytes(4))\n handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq)\n\n assoc = Association.fromExpiresIn(self.SECRET_LIFETIME, handle, secret,\n assoc_type)\n\n if dumb:\n key = self._dumb_key\n else:\n key = self._normal_key\n self.store.storeAssociation(key, assoc)\n return assoc", "def store_metadata(self, cfg, dispatch_seq):\n\n j_str = serialize_dispatch_seq(dispatch_seq)\n # Put the channel serialization in the corresponding key\n j_str = '{\"channel_serialization\": ' + j_str + '}'\n j = json.loads(j_str)\n # Adds the channel_serialization key to cfg\n cfg.update(j)\n cfg.update({\"timestamp\": datetime.datetime.utcnow().strftime(\"%Y-%m-%d %X UTC\")})\n cfg.update({\"description\": \"metadata\"})\n\n with mongo_connection(self.cfg_mongo) as mongo:\n client, coll = mongo\n try:\n result = coll.insert_one(cfg)\n except pymongo.errors.PyMongoError as e:\n self.logger.error(f\"An error has occurred in store_metadata:: {e}\")\n\n return result.inserted_id", "async def _save(self, document, alias=None):\n doc = document.to_son()\n\n if document._id is not None:\n try:\n await self.coll(alias).update({\"_id\": document._id}, doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n else:\n try:\n doc_id = await self.coll(alias).insert(doc)\n except DuplicateKeyError as e:\n raise UniqueKeyViolationError.from_pymongo(str(e), self.__klass__)\n document._id = doc_id\n\n return document", "def to_db(self):\n bulk = conn_db().initialize_ordered_bulk_op()\n for fiction in self.fictions:\n bulk.find({'id': fiction.id}).upsert().update({'$set': fiction.__dict__})\n bulk.execute()", "def init_doc(self, obj, update_dict=True):\n try:\n obj.essentials\n except AttributeError:\n raise AttributeError(\"An object to be saved in db is supposed to have the essentials attribute\")\n\n if obj.essentials is None:\n raise AttributeError(\"An object to be saved in db should not have NoneType as its essentials\")\n\n print(\"Saving this object into db: {}\".format(type(obj)))\n\n start = datetime.now()\n essen = self.mongo_doc_generator(obj.essentials)\n document = {\"essentials\": essen, 'datetime': start, 'filepaths': obj.filepaths}\n\n db_location = obj.db\n element = obj.decide_element()\n host = db_location[\"host\"]\n project = db_location[\"project\"]\n\n target_db = connect_collection(host, project, element)\n doc_created = target_db.insert_one(document)\n inserted_id = doc_created.inserted_id\n\n return inserted_id", "def save(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.insert(self.__class__.__name__, data)\n\n self.__dict__.update(saved_data)", "def save (self):\n if self.newobj:\n using_sequence = self.sequence ()\n self.keyvals['id'] = using_sequence\n self.seq = using_sequence\n else:\n using_sequence = self.seq\n for key, val in self.keyvals.items ():\n r_key = self.prepare_key (key, using_sequence)\n r.set (r_key, val)\n self.keyvals = {}\n self.newobj = False", "def save_to_db(self, data, db_operations):\n self.from_dict(data)\n self._id = str(db_operations.insert_one(self.to_dict()).inserted_id)", "def save(image):\n keypoints, description = describe(image)\n artwork = {\n \"keypoints\": keypoints,\n \"description\": description,\n \"path\": image,\n \"date\": datetime.datetime.utcnow()\n }\n artwork_id = db.insert(artwork)\n print(artwork_id)", "def store_all_to_database(self, session):\n\n description = 'Established in 1974, JSM is a family-owned provider of quality apartments. We offer a variety of units from studios to five bedrooms with every location benefitting from our award winning amenities, responsive 24 hour maintenance, and friendly property management staff. JSM Development began in Champaign, IL, and manages roughly 1,500 apartments and 450,000 sq/ft of commercial space. JSM has been a major contributor to the development of Campustown in Champaign and the East Campus area in Urbana at the University of Illinois. These popular locations are now home to major national retailers such as Urban Outfitters, Chipotle, Panera, Cold Stone Creamery, and Noodles & Co.'\n\n # Insert a JSM company instance into the database\n current_company = Company(\n name='JSM',\n baseurl='https://apartments.jsmliving.com/',\n description = description\n )\n session.add(current_company)\n\n # Iterate over the apartments, storing each in the database\n for apartment in self.apartment_data:\n logging.info(\"Inserting %s to database\", apartment['name'])\n new_apartment = Apartment(\n company=current_company,\n url=apartment['url'],\n name=apartment['name'],\n bedrooms=apartment['bedrooms'],\n bathrooms=apartment['bathrooms'],\n price=apartment['price'],\n leasing_period=apartment['leasing_period'],\n description=apartment['description'],\n address=apartment['address'],\n lat=apartment['lat'],\n lng=apartment['lng']\n )\n session.add(new_apartment)\n\n # Insert images for the given apartment\n for index, image_url in enumerate(apartment['image_urls']):\n new_image = Image(\n url=image_url,\n apartment_id=new_apartment.id,\n type=0,\n image_index=index\n )\n session.add(new_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_image)\n\n # Insert floorplan image, if it exists\n if apartment['floorplan_url'] != 0:\n new_floorplan_image = Image(\n url=apartment['floorplan_url'],\n apartment_id=new_apartment.id,\n type=1,\n image_index=len(apartment['image_urls'])\n )\n session.add(new_floorplan_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_floorplan_image)\n\n # Insert amenities for the given apartment\n for amenity in apartment['amenities']:\n new_amenity = Amenity(\n apartment_id=new_apartment.id,\n amenity=amenity\n )\n session.add(new_amenity)\n\n # Connect amenity to apartment\n new_apartment.amenities.append(new_amenity)\n\n # Write all queries to the database\n session.commit()", "def save_cards(cards):\n connection = pymongo.MongoClient(MONGO_URL)\n db = connection[DB]\n\n for card in cards:\n card_selector = {'name': card.name, 'redaction': card.redaction}\n dbcard = db.cards.find_one(card_selector)\n if dbcard is not None:\n for k, v in tocard(dbcard).shops.items():\n if k not in card.shops:\n card.shops[k] = v\n\n db.cards.update(card_selector, {'$set': {'shops': todict(card.shops)}})\n else:\n db.cards.insert(todict(card))", "def save_to_mongo(self, collection='pending_trailers'):\n Database.insert(collection=collection, data=self.json())", "def createAssociation(self):\n return _libsbml.GeneAssociation_createAssociation(self)", "def _store(self):\n database.mongo_store_object_by_label(self, self.label)", "def save(self, obj):\n self.uow.save(obj)\n self.imap.save(obj)\n state(obj).session = self", "def _put_assume_new(self, _id=None, **data):\n if _id is None:\n _id = str(uuid4())\n doc = dict(_id=_id, **data)\n try:\n current_doc = self._db.create_document(doc, throw_on_exists=True)\n except couchdb.http.ResourceConflict:\n # TODO: _rev is in header, don't need to get entire doc\n # Don't use self.get, don't want to actually download an attachment\n current_doc = self._db.get(_id)\n current_doc.update(doc)\n current_doc.save()\n return current_doc", "def setAssociation(self, *args):\n return _libsbml.GeneAssociation_setAssociation(self, *args)", "def create_association (self):\n return self.tm.create_association(self.create_topic())", "def save_course(self, course: Course) -> None:\n self.collection.insert_one(course.dict())", "def addAssociation(self, *args):\n return _libsbml.FbcOr_addAssociation(self, *args)", "def _store_document(self, document: dict) -> None:\n\n for item in document.items():\n if not is_bson_valid(item):\n raise InvalidTypeException(item)\n\n self._db[\"documents\"].append(document)", "def saveimage(item, db, collection):\n db[collection].insert(item)", "def __store(self):\n # connection strings are accessed directly by dbo\n dbo = dbo.connect()\n dbo.save(self.__to_dict())\n # not supre important to call but a nice idea\n dbo.destroy()", "def store_one(self, item):\n with mongo_connection(self.cfg_mongo) as mongo:\n client, coll = mongo\n coll.insert_one(item)\n\n return None", "def save(self, a_s):\n\n # General action data.\n a_s = {\n 'description': a_s.description,\n 'num_neurons': a_s.N,\n 'tau_m': (a_s.taum/ms).tolist(),\n 'tau_pre': (a_s.taum/ms).tolist(),\n 'tau_post': (a_s.taupost/ms).tolist(),\n 'tau_c': (a_s.tauc/ms).tolist(),\n 'tau_dop': (a_s.tauDop/ms).tolist(),\n 'tau_e': (a_s.taue/ms).tolist(),\n 'Ee': (a_s.Ee/mV).tolist(),\n 'vt': (a_s.vt/mV).tolist(),\n 'vr': (a_s.vr/mV).tolist(),\n 'El': (a_s.El/mV).tolist(),\n 'F': (a_s.F/Hz).tolist(),\n 'gmax': a_s.gmax,\n 'dA_pre': a_s.dApre,\n 'dA_post': a_s.dApost,\n 'duration': (a_s.sim_time/ms).tolist(),\n 'frame_length': (a_s.frame_length/ms).tolist(),\n 'dop_boost': a_s.dopBoost,\n 'reward_distance': a_s.reward_distance,\n 'speed_factor': (a_s.SPEED_FACTOR/second).tolist(),\n 'dragonfly_start': a_s.dragonfly_start,\n 'animation_id': a_s.animation_id,\n 'pattern_recognition_id': a_s.pattern_recognition_id,\n 'weights': (a_s.saved_weights).tolist(),\n 'training': a_s.training\n }\n\n # Save general data.\n _id = self.collection.insert(a_s)\n\n return _id", "def MongoSave(message):\n client = pymongo.MongoClient(\"localhost\",27017)\n db = client.PortfolioTracker\n db.AllPortfolios.save(message)#this must be a dictionary for proper insertion http://docs.python.org/2/tutorial/datastructures.html#dictionaries", "def store_individual(self, hash, individual):\n if self.storage: \n self.storage.write_individual(hash, self.generation, individual )", "def save_to_mongodb(lst):\n # deaulft using host='localhost' and port=27107\n db_object = connect_mongodb.connect_mongodb()\n # mongodb'connect\n connect = db_object.get_connect()\n # whether or not db is None\n if connect is None:\n print 'connect to mongodb database error'\n return None\n # db.python of mongodb'database\n database = connect['python']\n # batch insert\n index = 0\n lt = []\n for item in lst:\n # index must to convert string\n dt = {str(index): item}\n lt.append(dt)\n index += 1\n database.activation_code.insert(lt)", "def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)", "def addAssociation(self, *args):\n return _libsbml.Association_addAssociation(self, *args)", "def save(self, db):\n db.query(\n \"INSERT INTO fellows (name, accomodation)\\\n VALUES(:name, :accomodation)\",\n name=self.name, accomodation=self.wants_accomodation\n )", "def addAssociation(self, *args):\n return _libsbml.FbcAnd_addAssociation(self, *args)", "def save_articles_mongo(articles):\n myclient = pymongo.MongoClient(\"mongodb://{host}:{port}/\".format(host=os.environ['MONGO_HOST'],\n port=os.environ['MONGO_PORT']))\n\n mongo_db = myclient[os.environ['MONGO_DATABASE']]\n col = mongo_db['articles']\n try:\n col.insert_many(articles)\n except (pymongo.errors.ConnectionFailure, pymongo.errors.ServerSelectionTimeoutError) as err:\n logging.exception(err)\n return", "def persistToStore(self, items, requestInstance):\n self._dbConnection = self.mongoConnection()\n imgStored = 0\n\n if (self.mongoConnection() and self.cumulusConnection()):\n\n try:\n contain = self._cumulusConnection.get_bucket(self._containerName)\n except boto.exception.S3ResponseError as detail:\n if(detail.reason.strip() == \"Not Found\"):\n self._log.warning(\"Creating bucket\")\n self._cumulusConnection.create_bucket(self._containerName)\n contain = self._cumulusConnection.get_bucket(self._containerName)\n else:\n self._log.error(\"Code and reason \" + detail.code + \" \" + detail.reason)\n self._log.error(\"Error in ImgStorecumulusMongo - queryToStore. full error \" + str(sys.exc_info()))\n except:\n self._log.error(\"Error in ImgStorecumulusMongo - persistToStore. \" + str(sys.exc_info()))\n\n try:\n dbLink = self._dbConnection[self._dbName]\n collection = dbLink[self._datacollection]\n collectionMeta = dbLink[self._metacollection]\n\n k = Key(contain)\n\n for item in items:\n\n k.key = item._imgId\n if requestInstance == None:\n k.set_contents_from_filename(item._imgURI)\n else:\n requestInstance.file.seek(0)\n k.set_contents_from_file(requestInstance.file)\n\n tags = item._imgMeta._tag.split(\",\")\n tags_list = [x.strip() for x in tags]\n meta = {\"_id\": item._imgId,\n \"os\" : item._imgMeta._os,\n \"arch\" : item._imgMeta._arch,\n \"owner\" : item._imgMeta._owner,\n \"description\" : item._imgMeta._description,\n \"tag\" : tags_list,\n \"vmType\" : item._imgMeta._vmType,\n \"imgType\" : item._imgMeta._imgType,\n \"permission\" : item._imgMeta._permission,\n \"imgStatus\" : item._imgMeta._imgStatus,\n }\n data = {\"_id\": item._imgId,\n \"createdDate\" : datetime.utcnow(),\n \"lastAccess\" : datetime.utcnow(),\n \"accessCount\" : 0,\n \"size\" : item._size,\n \"extension\" : item._extension,\n }\n\n collectionMeta.insert(meta, safe=True)\n collection.insert(data, safe=True)\n\n imgStored += 1\n\n except pymongo.errors.AutoReconnect:\n self._log.warning(\"Autoreconnected.\")\n except pymongo.errors.ConnectionFailure:\n self._log.error(\"Connection failure. The file has not been stored. Image details: \" + item.__str__() + \"\\n\")\n except IOError:\n self._log.error(\"Error in ImgStorecumulusMongo - persistenToStore. \" + str(sys.exc_info()))\n self._log.error(\"No such file or directory. Image details: \" + item.__str__())\n except TypeError:\n self._log.error(\"TypeError in ImgStorecumulusMongo - persistenToStore \" + str(sys.exc_info()))\n except pymongo.errors.OperationFailure:\n self._log.error(\"Operation Failure in ImgStorecumulusMongo - persistenToStore. \" + str(sys.exc_info()))\n except:\n self._log.error(\"Error in ImgStoreCumulusMongo - persistToStore. \" + str(sys.exc_info()))\n finally:\n self._dbConnection.disconnect()\n else:\n self._log.error(\"Could not get access to the database. The file has not been stored\")\n\n for item in items: \n cmd = \"rm -f \" + item._imgURI\n os.system(cmd)\n\n if (imgStored == len(items)):\n return True\n else:\n return False", "def write_data_to_mongo(self, db_name, collection_name, list_of_dicts):\n self.db_client.db_name = db_name\n self.db = self.db_client.affirm_client()\n collection = self.__write(collection_name, list_of_dicts)\n return collection", "def commit(self):\r\n # print(\"Connection to Mongo...\")\r\n client = MongoClient(DatabaseConfig.host, DatabaseConfig.port)\r\n # print(\"mongo-client: {}\".format(client))\r\n db = client[DatabaseConfig.database]\r\n records = db[self.collection]\r\n # print(kmodels)\r\n records.save(self.to_dict())\r\n client.close()", "def insert_school(mongo_collection, **kwargs):\n return mongo_collection.insert(kwargs)", "def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())", "def save_model(self, request, obj, form, change):\n obj.save()\n casestudy_id = request.session.get('casestudy', None)\n if casestudy_id is not None:\n casestudy = CaseStudy.objects.get(pk=casestudy_id)\n pic = PublicationInCasestudy.objects.get_or_create(\n casestudy=casestudy,\n publication=obj)", "def put(self, _id=None, **data):\n new_data, numpy_array = Database._put_prep(data)\n doc = self._put_assume_new(_id, **new_data)\n if numpy_array is not None:\n temp_file = TemporaryFile()\n np.save(temp_file, numpy_array)\n temp_file.seek(0)\n # TODO: check attachment success\n doc.put_attachment(\n attachment='value', content_type='application/octet-stream',\n data=temp_file)\n return doc", "def _create_association(\n context, namespace_name, resource_type_name, values, session):\n\n namespace_resource_type_rec = models.MetadefNamespaceResourceType()\n metadef_utils.drop_protected_attrs(\n models.MetadefNamespaceResourceType, values)\n # values['updated_at'] = timeutils.utcnow() # TS mixin should do this\n namespace_resource_type_rec.update(values.copy())\n try:\n namespace_resource_type_rec.save(session=session)\n except db_exc.DBDuplicateEntry:\n LOG.debug(\"The metadata definition resource-type association of\"\n \" resource_type=%(resource_type_name)s to\"\n \" namespace=%(namespace_name)s, already exists.\",\n {'resource_type_name': resource_type_name,\n 'namespace_name': namespace_name})\n raise exc.MetadefDuplicateResourceTypeAssociation(\n resource_type_name=resource_type_name,\n namespace_name=namespace_name)\n\n return namespace_resource_type_rec.to_dict()", "def openid_associate(self, request):\n # XXX: TESTME\n assoc_type = request.assoc_type\n session_type = request.session.session_type\n if self.negotiator.isAllowed(assoc_type, session_type):\n assoc = self.signatory.createAssociation(\n dumb=False, assoc_type=assoc_type)\n return request.answer(assoc)\n else:\n message = ('Association type %r is not supported with '\n 'session type %r' % (assoc_type, session_type))\n (preferred_assoc_type, preferred_session_type) = \\\n self.negotiator.getAllowedType()\n return request.answerUnsupported(message, preferred_assoc_type,\n preferred_session_type)", "def setAssociation(self, *args):\n return _libsbml.GeneProductAssociation_setAssociation(self, *args)", "def _save(self):\n yield self.validate()\n\n db = self.db_client()\n saved = yield db.save_doc(self._resource)\n\n # Allow couch to create Document IDs\n if '_id' not in self._resource:\n self._resource['_id'] = saved['id']", "def store(self, key, obj):\n attrs = self.load_attrs()\n attrs[key] = obj\n self.store_attrs(attrs)", "def set_many(self, mapping, timeout=None):\n values = [self._get_doc(key, value, timeout) for key, value in mapping.iteritems()]\n self.collection.insert_many(values)\n return True", "def upload_album(self, hash, girl=True):\n images = self._imgur.album_images(hash)['data']\n for image in images:\n doc = {\n '_id': image['id'],\n 'rating': 0.0,\n 'count': 0,\n 'link': image['link']\n }\n\n try:\n if girl:\n self._db.girls.insert_one(doc)\n else:\n self._db.boys.insert_one(doc)\n except pymongo.errors.DuplicateKeyError:\n continue", "def save_session(self, session, response):\n if not isinstance(session, PyMongoSession):\n raise TypeError('session (%r) is not a PyMongoSession' % session)\n\n try:\n cx, db = app.extensions['pymongo'][self.config_prefix]\n except KeyError:\n raise Exception('could not find PyMongo with config prefix %r in app' %\n self.config_prefix)\n\n db[self.collection].save(session)", "def save(self, db):\n db.googleResults.insert_one(\n {\n \"searchQuery\": self.search_query,\n \"title\": self.title,\n \"link\": self.link,\n \"subtext\": self.subtext,\n \"searchterms\" : self.searchterms, # array\n \"queryTime\": datetime.datetime.now(),\n \"details\": self.link_scripts\n }\n )", "def register_group_associations(group_id, requester: User, associate: User, associate_nickname, amount):\n\n assoc = GroupPaymentAssociation.query.filter_by(\n group_id=group_id,\n user_id=requester.id,\n associate_id=associate.id\n ).first()\n\n if assoc: # Check if associations already exist, and update balance if so.\n # Update balance for user association to associate\n assoc.balance += int(amount)\n\n assoc.associate_nickname=associate_nickname # Update associate_nickname\n\n # Update balance for the associate association to user\n associate_assoc = GroupPaymentAssociation.query.filter_by(\n group_id=group_id,\n user_id=associate.id,\n associate_id=requester.id\n ).first()\n\n associate_assoc.balance += -int(amount)\n else:\n # Create associations for both user and associate(to associate and to user, respectively)\n user_association = GroupPaymentAssociation(\n group_id=group_id,\n user_id=requester.id,\n associate_id=associate.id,\n associate_nickname=associate_nickname,\n balance=int(amount)\n )\n\n associate_association = GroupPaymentAssociation(\n group_id=group_id,\n user_id=associate.id,\n associate_id=requester.id,\n associate_nickname=requester.nickname,\n balance=-int(amount)\n )\n\n db.session.add_all([user_association, associate_association])", "def store_eeg_in_mongodb(eeg_data):\n con, eeg = connect_to_eeg_db()\n for eeg_record in eeg_data:\n eeg.insert(eeg_record)\n con.close()", "def store_user(profile, attrs={}):\n db = connection()\n doc_ref = db.document(f'users/{profile.username}')\n profile_dict = {\n 'userid': profile.userid,\n 'full_name': profile.full_name,\n 'is_verified': profile.is_verified,\n 'biography': profile.biography,\n 'followees': profile.followees,\n 'followers': profile.followers,\n 'mediacount': profile.mediacount,\n 'stored_at': datetime.now()\n }\n\n valid_attr_keys = ['is_artist', 'is_gallery',\n 'price_max', 'price_min', 'price_avg']\n valid_attrs = {k: attrs.get(k) for k in valid_attr_keys}\n\n return doc_ref.set({**profile_dict, **valid_attrs})", "def make_from_mongo_dict(household_as_dict):\n # First do the id shuffle\n mongo_id = household_as_dict['_id']\n del household_as_dict['_id']\n household_as_dict['_Household__id'] = str(mongo_id)\n # now unpickle\n return jsonpickle.decode(json.dumps(household_as_dict))", "def save(self):\n cur = get_cursor()\n args = (self.associated_page, self.associated_name, self.title,\n self.author, self.author_mail, self.comment_body, self.pub_date)\n if self.comment_id is None:\n cur.execute('''insert into comments (associated_page, associated_name,\n title,\n author, author_mail,\n comment_body, pub_date)\n values (?, ?, ?, ?, ?, ?, ?)''', args)\n self.comment_id = cur.lastrowid\n else:\n args += (self.comment_id,)\n cur.execute('''update comments set associated_page=?,\n associated_name=?,\n title=?, author=?,\n author_mail=?, comment_body=?,\n pub_date=? where comment_id = ?''', args)\n cur.close()", "def save(self):\n db = DBStorage()\n p = self.createPatient()\n db.add_prescription(p)", "def persist_standardizer(self, std_object):\n object_path = 'model_objects/'\n file_name = f'market_maker_standardizer_{self.target_coin}.pkl'\n self.s3_client.put_object(Bucket=self.s3_bucket,\n Key=object_path + file_name,\n Body=pickle.dumps(std_object, pickle.HIGHEST_PROTOCOL)\n )\n return", "def store_barbican_secret_for_coriolis(\n barbican, secret_info, name='Coriolis Secret'):\n payload = json.dumps(secret_info)\n\n secret = barbican.secrets.create(\n name=name, payload=payload,\n payload_content_type='application/json')\n secret_ref = secret.store()\n\n return secret_ref", "def add_to_db(ark_obj):\n session = Session()\n session.add(ark_obj)\n session.commit()\n session.close()", "def test_track_with_association(self):\n album_pk = self.add_album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=2)\n track = Track(artist='Artist', album='Album')\n pk = track.insert(self.app.db, self.app.curs,\n 'xmms',\n datetime.datetime.now())\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['album_id'], 0)\n\n for line in self.app.associate_albums():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['album_id'], album_pk)", "def store(self, graphs, start_date, end_date):\n documents = [{'topic_id': key,\n 'graph': graph,\n 'start_date': start_date,\n 'end_date': end_date}\n for key, graph in graphs.items()]\n self.collection.insert_many(documents)", "def save(self):\n if self.iid is not None:\n self.db().update(self.iid, self._attributes)\n else:\n self.iid = self.db().add(self._attributes)", "def save(self, record):\n self.collection.insert(record)\n self.record = record\n\n return self", "def save_object(self, obj, expected_value=None):\n doc = self.marshal_object(obj)\n if obj.id:\n url = \"/%s/%s\" % (self.db_name, obj.id)\n else:\n url = \"/%s\" % (self.db_name)\n resp = self._make_request(\"PUT\", url, body=doc.toxml())\n new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp))\n obj.id = new_obj.id\n for prop in obj.properties():\n try:\n propname = prop.name\n except AttributeError:\n propname = None\n if propname:\n value = getattr(new_obj, prop.name)\n if value:\n setattr(obj, prop.name, value)\n return obj", "def save(self, *args, **kwargs):\n if not self.id:\n self.api_key = self.__generate_key(self.__api_key_length)\n self.api_secret = self.__generate_key(self.__api_secret_length)\n super(Token, self).save(*args, **kwargs)", "def store_new_item(self, doc):\n self._collection.save(doc.document)", "def save_article(title,image,description,content,pub_date,news_url,note,user):\n article = Article(\n title=title,\n image=image,\n description=description,\n content = content,\n pub_date=datetime.strptime(pub_date, \"%Y-%m-%dT%H:%M:%SZ\"),\n news_url=news_url\n )\n \n db.session.add(article)\n \n #testing\n print(article)\n \n # 2.0 add rating and notes during save event\n #creating the relationship between user and the saved article\n saved_article = Saved(\n user=user,\n article=article,\n notes=note\n )\n # notes=notes,\n # rating=rating)\n \n db.session.add(saved_article)\n db.session.commit()\n\n #testing\n print(saved_article)", "def write_to_db(self, doc):\n self.db_connection[self.db_name][self.db_collection].insert_one(doc)", "def save_embeddings(self, str_file):\n\n with open(str_file, 'wb') as f_write:\n pickle.dump(self.embeddings_entity, f_write, True)\n pickle.dump(self.embeddings_relation, f_write, True)\n pickle.dump(self.dict_paras, f_write, True)", "def save_object(self, obj, expected_value=None):\r\n doc = self.marshal_object(obj)\r\n if obj.id:\r\n url = \"/%s/%s\" % (self.db_name, obj.id)\r\n else:\r\n url = \"/%s\" % (self.db_name)\r\n resp = self._make_request(\"PUT\", url, body=doc.toxml())\r\n new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp))\r\n obj.id = new_obj.id\r\n for prop in obj.properties():\r\n try:\r\n propname = prop.name\r\n except AttributeError:\r\n propname = None\r\n if propname:\r\n value = getattr(new_obj, prop.name)\r\n if value:\r\n setattr(obj, prop.name, value)\r\n return obj", "def add_embedded_campaign(self, id, collection, campaign, confidence,\n analyst, date, description):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n # TODO: Make sure the object does not already have the campaign\n # Return if it does. Add it if it doesn't\n obj = getattr(self.db, collection)\n result = obj.find({'_id': id, 'campaign.name': campaign})\n if result.count() > 0:\n return\n else:\n log.debug('Adding campaign to set: {}'.format(campaign))\n campaign_obj = {\n 'analyst': analyst,\n 'confidence': confidence,\n 'date': date,\n 'description': description,\n 'name': campaign\n }\n result = obj.update(\n {'_id': id},\n {'$push': {'campaign': campaign_obj}}\n )\n return result", "def save_google_analytics_credentials(self,credentials_dict):\n\t\tprint 'saving credentials'\n\t\t# store information necessary for building client\n\t\tcredentials_dict['token_expiry'] = datetime.now() + timedelta(hours=1)\n\t\tGAUM = GoogleAnalyticsUserModel(credentials_dict)\n\t\tdb.session.add(GAUM)\n\t\tdb.session.commit()\n\t\tdb.session.close()", "def save_meta(self):\n meta = self.serializer.dumps(self.meta)\n self.connection.hset(self.key, 'meta', meta)", "def _put(self, key: bytes, value: bytes, expire_time_ms: int=None):\n self.db.put(key, pickle.dumps((value, expire_time_ms)))", "def save_comment(newComment):\n conn = pymongo.Connection(\"localhost\",27017)\n db = conn[\"paperDB\"]\n infoDB = db.infoDB\n record = infoDB.find_one()\n\n record['comment'].append(newComment)\n infoDB.save(record)", "def create(path, crypt_options):\n\n content = Database.DEFAULT_CONTENT\n\n aes_key, hmac_key, salt, iterations = crypt.make_keys(\n password=crypt_options.password,\n salt=crypt_options.salt,\n iterations=crypt_options.iterations\n )\n ciphertext, iv = crypt.encrypt(content, aes_key)\n hmac = crypt.make_hmac(ciphertext, hmac_key)\n\n output = {\n \"hmac\": hmac,\n \"iterations\": crypt_options.iterations\n }\n\n for key, value in ((\"ciphertext\", ciphertext),\n (\"iv\", iv),\n (\"salt\", salt)):\n output[key] = base64.b64encode(value).decode(\"utf-8\")\n output_data = json.dumps(output).encode(\"utf-8\")\n\n with open(path, \"wb\") as f:\n f.write(output_data)\n\n database = Database(\n path=path,\n credentials=[], # empty credentials\n crypt_options=crypt_options\n )\n return database", "def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)", "def convert_to_assoc(input_filename, output_filename):\n out_stream = codecs.open(output_filename, 'w', encoding='utf-8')\n \n for info in read_json_stream(input_filename):\n startc = reduce_concept(info['start'])\n endc = reduce_concept(info['end'])\n rel = info['rel']\n weight = info['weight']\n\n if 'dbpedia' in info['sources'] and '/or/' not in info['sources']:\n # DBPedia associations are still too numerous and too weird to\n # associate.\n continue\n\n pairs = []\n if startc == '/c/en/person':\n if rel == '/r/Desires':\n pairs = [('/c/en/good', endc), ('/c/en/bad/neg', endc)]\n elif rel == '/r/NotDesires':\n pairs = [('/c/en/bad', endc), ('/c/en/good/neg', endc)]\n else:\n pairs = [(startc, endc)]\n elif startc == '/c/zh/人':\n if rel == '/r/Desires':\n pairs = [('/c/zh/良好', endc), ('/c/zh/不良/neg', endc)]\n elif rel == '/r/NotDesires':\n pairs = [('/c/zh/良好/neg', endc), ('/c/zh/不良', endc)]\n else:\n pairs = [(startc, endc)]\n else:\n negated = (rel.startswith('/r/Not') or rel.startswith('/r/Antonym'))\n if not negated:\n pairs = [(startc, endc)]\n else:\n pairs = [(startc, endc + '/neg'), (startc + '/neg', endc)]\n\n for (start, end) in pairs:\n line = \"%(start)s\\t%(end)s\\t%(weight)s\" % {\n 'start': start,\n 'end': end,\n 'weight': weight,\n }\n print(line, file=out_stream)", "def write_mesquite_association_block(self, out, domain_taxon_set_title=None, range_taxon_set_title=None):\n out.write(\"BEGIN TaxaAssociation;\\n\")\n if self.label:\n title = self.label\n else:\n title = self.oid\n out.write(\" TITLE %s;\\n\" % textutils.escape_nexus_token(title))\n if domain_taxon_set_title is None:\n if self.domain_taxon_set.label:\n domain_taxon_set_title = self.domain_taxon_set.label\n else:\n domain_taxon_set_title = self.domain_taxon_set.oid\n if range_taxon_set_title is None:\n if self.range_taxon_set.label:\n range_taxon_set_title = self.range_taxon_set.label\n else:\n range_taxon_set_title = self.range_taxon_set.oid\n out.write(\" TAXA %s, %s;\\n\" % (\n textutils.escape_nexus_token(range_taxon_set_title),\n textutils.escape_nexus_token(domain_taxon_set_title)\n ))\n out.write(\" ASSOCIATES\\n\")\n out.write(self.mesquite_association_rows() + \"\\n\")\n out.write(\" ;\\n\")\n out.write(\"END;\\n\")", "def save_db(self) -> None:", "def create_qualification() -> Dict[str, Any]:\n qualification_schema = QualificationSchema()\n qualification = qualification_schema.load(request.json, partial=(\"id\",))\n\n if \"id\" in qualification:\n raise APIError(reason=\"id_specified\", status_code=400)\n\n qualification_obj = Qualification(**qualification)\n\n try:\n db.session.add(qualification_obj)\n db.session.commit()\n except IntegrityError as exc:\n db.session.rollback()\n raise APIError(reason=\"object_exists\", status_code=400) from exc\n\n return cast(Dict[str, Any], qualification_schema.dump(qualification_obj))", "def save(self):\n if self._mode == 'dict':\n self._mode = 'shelve'\n self._shelve_mode = 'c'\n\n for key, value in self._dict.items():\n ckey = copy.copy(key)\n cvalue = copy.copy(value)\n self.add(ckey, cvalue, 'shelve', check=False)\n\n self._dict.clear()\n\n if self._mode == 'dict':\n self._mode = 'dict'\n self._shelve_mode = 'r'", "def store_recommendations(recommendations, model_version, db):\n if recommendations:\n # Insert the recommendations into the mongoDB\n db.recommendations.insert_many(recommendations)\n\n # Update the # of recommendations made by the model version today\n db.ctr_per_model.update_one(\n {\"model_version\": model_version, \"date\": str(date.today())},\n {\"$inc\": {\"num_recommends\": len(recommendations)}},\n upsert=True,\n )\n\n # Update the # of recommendations made today for the global CTR\n db.ctr_global.update_one(\n {\"date\": str(date.today())},\n {\"$inc\": {\"num_recommends\": len(recommendations)}},\n upsert=True,\n )\n\n recommendations.clear() # Clear the recommendations so that they are not added twice", "def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)", "def save(self, obj):", "async def save(\n self, item: T, *,\n _id=None,\n mode: Literal['save', 'insert', 'update'] = 'save',\n inject_default_id: bool = False,\n inject_created: bool = False\n\n ) -> T:\n document = item.dict(by_alias=True)\n\n if self.identity != '_id':\n assert '_id' not in document, \"Should not have _id in the instance if collection's identity is non default.\"\n\n if _id is not None:\n assert self.identity == '_id', \"_id parameter can be provided only if collection's identity is default.\"\n assert '_id' not in document, \"_id should be provided ether in instance or by _id param.\"\n document['_id'] = ObjectId(_id)\n\n if mode == 'save':\n if document.get(self.identity) is None: # === New document.\n if self.identity == '_id':\n if '_id' in document: del document['_id']\n else:\n document[self.identity] = self.generate_id()\n\n result: InsertOneResult = await self.collection.insert_one(document) # will fail if exists due to index=unique violation for identity\n document['_id'] = result.inserted_id # will be removed while back-parsing if not necessary\n document['__created__'] = True\n\n else: # == Possibly an existing document that needs to be updated.\n result: UpdateResult = await self.collection.update_one(\n {self.identity: document[self.identity]},\n {'$set': document}, upsert=True\n )\n if result.upserted_id is not None:\n document['_id'] = result.upserted_id\n document['__created__'] = True\n\n elif mode == 'insert':\n assert document.get(self.identity) is not None, f\"Need identity ({self.identity}) for insert mode, use save mode to insert document without identity.\"\n _result: InsertOneResult = await self.collection.insert_one(document)\n document['__created__'] = True\n\n elif mode == 'update':\n assert document.get(self.identity) is not None, f\"Need identity ({self.identity}) for update mode.\"\n mongo_query = {self.identity: document[self.identity]}\n result: UpdateResult = await self.collection.update_one(\n mongo_query,\n {'$set': document}\n )\n if not result.matched_count: raise NotFound(mongo_query)\n\n else:\n assert False, f\"Mode {mode} is not supported.\"\n\n return self.parse_document(document, inject_default_id=inject_default_id, inject_created=inject_created)", "def insert(self, media):\n insert_query = \"\"\"INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(insert_query, media.totuple())\n self.connection.commit()", "async def handle(self, item, **kwargs):\n collection_name = \"default\"\n if isinstance(item, Video):\n collection_name = \"videos\"\n elif isinstance(item, Music):\n collection_name = \"musics\"\n\n collection = self.db[collection_name]\n print(\"Saving\", item, 'to mongodb...')\n if await collection.update_one({'id': item.id}, {'$set': item.json()}, upsert=True):\n print(\"Save success\", item, 'to mongodb...')\n else:\n print(\"error save\", item, 'to mongodb...')", "def set_album(audio: EasyID3, album):\r\n audio['album'] = album\r\n audio.save()", "def test_associations(self):\n\n oj = {}\n\n if self.DEBUG:\n print \"Create A\"\n\n # create a\n a = self.save_base_metadata()\n\n oj[a.uuid] = 'A'\n\n if self.DEBUG:\n print \"Create B, point B to A\"\n\n # create b, point b to a\n b = self.save_base_metadata()\n\n oj[b.uuid] = 'B'\n\n b.add_association_to(a)\n\n # save a, b\n b.save()\n a.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, None, None, None)\n\n # test b pointing to a\n self.assertIn(a, b.my_associations)\n # test to see if a is aware of association from b\n # self.assertIn(b, a.associations_to_me)\n\n if self.DEBUG:\n print \"Create C, point C to B\"\n\n # create c, point c to b\n c = self.save_base_metadata()\n\n oj[c.uuid] = 'C'\n\n c.add_association_to(b)\n\n # a.save()\n\n # save b, c\n c.save()\n b.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, c, None, None)\n\n # test c pointing to a\n self.assertIn(a, c.my_associations)\n # test c aware of association from a\n self.assertIn(c, a.associations_to_me)\n\n # test c pointing to b\n self.assertIn(b, c.my_associations)\n # test b aware of association from c\n self.assertIn(c, b.associations_to_me)\n\n if self.DEBUG:\n print \"create D, point to B\"\n\n # create d, point to b\n d = self.save_base_metadata()\n\n oj[d.uuid] = 'D'\n\n d.add_association_to(b)\n\n # a.save()\n\n # save d, b\n d.save()\n b.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, c, d, None)\n\n # test d pointing to a\n self.assertIn(a, d.my_associations)\n # test a aware of assoication from d\n self.assertIn(d, a.associations_to_me)\n\n # test d pointing to b\n self.assertIn(b, d.my_associations)\n # test b aware of association from d\n self.assertIn(d, b.associations_to_me)\n\n if self.DEBUG:\n print \"Create E, point to A\"\n\n # create e, point to a\n e = self.save_base_metadata()\n\n oj[e.uuid] = 'E'\n\n e.add_association_to(a)\n\n # save e, a\n e.save()\n a.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, c, d, e)\n\n # test e pointing to a\n self.assertIn(a, e.my_associations)\n # test a aware of association from e\n self.assertIn(b, a.associations_to_me)", "def test_bson_serialization(self, molecule):\n serialized = molecule.to_bson()\n molecule_copy = Molecule.from_bson(serialized)\n assert molecule == molecule_copy", "def add_institute(self, institute_obj):\n internal_id = institute_obj[\"internal_id\"]\n display_name = institute_obj[\"display_name\"]\n\n # Check if institute already exists\n if self.institute(institute_id=internal_id):\n raise IntegrityError(\"Institute {0} already exists in database\".format(display_name))\n\n LOG.info(\n \"Adding institute with internal_id: {0} and \"\n \"display_name: {1}\".format(internal_id, display_name)\n )\n\n insert_info = self.institute_collection.insert_one(institute_obj)\n ##TODO check if insert info was ok\n LOG.info(\"Institute saved\")", "def save_tournament(self, serialized_tournament):\n self.tournament_table.insert(serialized_tournament)", "def _post(self, data):\n cust_id = data['customer_id']\n user_id = data['user_id']\n\n # Now check if we have this association already\n assoc_q = Query()\n assoc_data = DB_USER_CUSTOMER_RELS_TABLE.search((assoc_q.customer_id == cust_id) &\n (assoc_q.user_id == user_id))\n if assoc_data:\n flask_restful.abort(400, message=\"Bad Request - association between customer \"\n \"and user exists already\")\n\n new_association_id = DB_USER_CUSTOMER_RELS_TABLE.insert(data)\n return new_association_id", "def save_record(record, cover_metadata):\n record[\"cover_metadata\"] = cover_metadata\n record.commit()\n db.session.commit()", "def save(self, archive_writer):\n\n self.meta.id = str(uuid.uuid4())\n self.meta.based_on = []\n self.meta.enviroment = Enviroment().__dict__\n for val in self.user.__dict__.values():\n if isinstance(val, Annotation):\n val.toJsonStructRec(self.meta.id, archive_writer)\n json_struct = self.replace_natives(\n archive_writer=archive_writer, uuid=self.meta.id\n )\n elem_name = f\"{self.meta.id}/AUTOACTIVE_SESSION.json\"\n archive_writer.write_metadata(elem_name, json_struct)", "def upload_annotation_to_dsa(annotation_filepath, image_filename, collection_name, uri, token):\n with open(annotation_filepath) as annotation_json:\n dsa_annotation = json.load(annotation_json)\n\n dsa_uuid = get_item_uuid(image_filename, uri, token, collection_name)\n if dsa_uuid:\n push_annotation_to_dsa_image(dsa_uuid, dsa_annotation, uri, token)", "def insert_school(mongo_collection, **kwargs):\n result = mongo_collection.insert_one(kwargs)\n return result.inserted_id", "def put(self):\n if not self.other_raw_names:\n self.other_raw_names = []\n if self.raw_movie_name not in self.other_raw_names:\n self.other_raw_names.append(self.raw_movie_name)\n \n if self.movie:\n self.genres = self.movie.genres\n \n return db.Model.put(self)" ]
[ "0.702098", "0.6713939", "0.56481075", "0.53882486", "0.5067821", "0.50430053", "0.5034432", "0.493951", "0.49269888", "0.4900855", "0.48810253", "0.48749557", "0.4837358", "0.48064864", "0.48010197", "0.4800175", "0.47752208", "0.4773829", "0.47659233", "0.47503135", "0.47456935", "0.47419325", "0.47312295", "0.47281495", "0.47240102", "0.47158647", "0.47130147", "0.46920022", "0.4690735", "0.46792376", "0.46773526", "0.46772072", "0.46695945", "0.46551722", "0.46468565", "0.4640654", "0.46295545", "0.46239233", "0.4621464", "0.461996", "0.4612234", "0.46064353", "0.46047768", "0.4597779", "0.45919853", "0.45733923", "0.45730564", "0.45644805", "0.4555337", "0.45515504", "0.45480904", "0.45476425", "0.45434842", "0.4530851", "0.45269293", "0.45265502", "0.45265085", "0.45030332", "0.45029405", "0.44954333", "0.4485397", "0.44820148", "0.44774753", "0.44773522", "0.4459872", "0.44586354", "0.4456384", "0.44482362", "0.44472608", "0.4434073", "0.44264916", "0.44260418", "0.44239876", "0.44220674", "0.44191357", "0.44190812", "0.44115102", "0.44069397", "0.4400136", "0.4398208", "0.43887964", "0.4377732", "0.43766516", "0.43760303", "0.43758506", "0.43726498", "0.43719956", "0.4356263", "0.43439785", "0.4343119", "0.43424863", "0.43367806", "0.4326061", "0.43224597", "0.43125015", "0.43088558", "0.43046877", "0.4300336", "0.43003204", "0.42991307" ]
0.705629
0
Gets a server url and the handle and finds a matching association that has not expired. Expired associations are deleted. The association returned is the one with the most recent issuing timestamp.
def getAssociation(self, server_url, handle=None): query = {'server_url': server_url} if handle: query.update({'handle': handle.hex()}) try: mist_associations = MistAssociation.objects(**query) except me.DoesNotExist: mist_associations = [] filtered_mist_assocs = [] for assoc in mist_associations: if assoc.is_expired(): assoc.delete() else: filtered_mist_assocs.append(assoc) filtered_mist_assocs = sorted(filtered_mist_assocs, key=lambda assoc: assoc.issued, reverse=True) if len(filtered_mist_assocs) > 0: mist_assoc = filtered_mist_assocs[0] association = Association(handle=mist_assoc.handle.decode('hex'), secret=mist_assoc.secret.decode('hex'), issued=mist_assoc.issued, lifetime=mist_assoc.lifetime, assoc_type=mist_assoc.assoc_type) return association return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAssociation(self, server_url, handle=None):\n query = models.Association.all().filter('url', server_url)\n if handle:\n query.filter('handle', handle)\n\n results = query.fetch(1)\n if len(results) > 0:\n assoc = xAssociation.deserialize(results[0].association)\n if assoc.getExpiresIn() > 0:\n # hasn't expired yet\n return assoc\n\n return None", "def getAssociation(self, server_url, handle=None):\r\n query = datastore.Query('Association', {'url =': server_url})\r\n if handle:\r\n query['handle ='] = handle\r\n\r\n results = query.Get(1)\r\n if results:\r\n association = Association.deserialize(results[0]['association'])\r\n if association.getExpiresIn() > 0:\r\n # hasn't expired yet\r\n return association\r\n\r\n return None", "def removeAssociation(self, server_url, handle):\n query = models.Association.gql('WHERE url = :1 AND handle = :2',\n server_url, handle)\n return self._delete_first(query)", "def removeAssociation(self, server_url, handle):\r\n query = datastore.Query('Association',\r\n {'url =': server_url, 'handle =': handle})\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())", "def getAssociation(self, assoc_handle, dumb, checkExpiration=True):\n # Hmm. We've created an interface that deals almost entirely with\n # assoc_handles. The only place outside the Signatory that uses this\n # (and thus the only place that ever sees Association objects) is\n # when creating a response to an association request, as it must have\n # the association's secret.\n\n if assoc_handle is None:\n raise ValueError(\"assoc_handle must not be None\")\n\n if dumb:\n key = self._dumb_key\n else:\n key = self._normal_key\n assoc = self.store.getAssociation(key, assoc_handle)\n if assoc is not None and assoc.expiresIn <= 0:\n logger.info(\"requested %sdumb key %r is expired (by %s seconds)\" %\n ((not dumb) and 'not-' or '', assoc_handle,\n assoc.expiresIn))\n if checkExpiration:\n self.store.removeAssociation(key, assoc_handle)\n assoc = None\n return assoc", "def removeAssociation(self, server_url, handle):\n\n try:\n mist_associations = MistAssociation.objects(\n server_url=server_url, handle=handle.hex())\n except me.DoesNotExist:\n return False\n\n for assoc in mist_associations:\n assoc.delete()\n\n return len(mist_associations) > 0", "def storeAssociation(self, server_url, association):\r\n entity = datastore.Entity('Association')\r\n entity['url'] = server_url\r\n entity['handle'] = association.handle\r\n entity['association'] = association.serialize()\r\n datastore.Put(entity)", "def storeAssociation(self, server_url, assoc):\n assoc = models.Association(url=server_url,\n handle=assoc.handle,\n association=assoc.serialize())\n assoc.put()", "def popUrl(self):\n url = None\n#self.lock.acquire()\n try:\n url = self.__unvistedUrls.get(timeout=2) #2s\n except:\n url = None\n#self.lock.release()\n return url", "def get_expired_nscache():\n now = int(time())\n keys_to_del = []\n for key, odict in nscache.iteritems():\n for dn, ce in odict.iteritems():\n if ce._expiration - now <= 0:\n keys_to_del.append((key, dn))\n return (keys_to_del, nscache)", "def storeAssociation(self, server_url, association):\n\n mist_association = MistAssociation()\n mist_association.assoc_type = association.assoc_type\n mist_association.handle = association.handle.hex()\n mist_association.secret = association.secret.hex()\n mist_association.lifetime = association.lifetime\n mist_association.issued = association.issued\n mist_association.server_url = server_url\n\n mist_association.save()", "def test_access_token_refreshed_for_token_expired_with_get_method(self):\n with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):\n MockOAuth2Session.RESPONSES = [TokenExpiredError(), 200]\n proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)\n session = proxy._session\n first_token = session.token['access_token']\n # m.token['expires_at'] = m.token['expires_at'] - 36001\n proxy.get(\"/fake_url/1/\")\n second_token = session.token['access_token']\n self.assertEqual(len(session.get.call_args_list), 2) # Number of calls\n self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls\n session.get.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])\n self.assertEqual(AccessToken.objects.count(), 1)\n self.assertNotEquals(first_token, second_token)", "def expire(ttl):\n print(\"[+] Staring expiration of old endpoints.\")\n\n try:\n now = arrow.utcnow()\n expiration = now - timedelta(hours=ttl)\n endpoints = database.session_query(Endpoint).filter(\n cast(Endpoint.last_updated, ArrowType) <= expiration\n )\n\n for endpoint in endpoints:\n print(\n \"[!] Expiring endpoint: {name} Last Updated: {last_updated}\".format(\n name=endpoint.name, last_updated=endpoint.last_updated\n )\n )\n database.delete(endpoint)\n metrics.send(\"endpoint_expired\", \"counter\", 1)\n\n print(\"[+] Finished expiration.\")\n except Exception as e:\n sentry.captureException()", "def purgeExpiredRequests( self ):\n cmd = \"DELETE FROM `ProxyDB_Requests` WHERE ExpirationTime < UTC_TIMESTAMP()\"\n return self._update( cmd )", "def get_polling(self, obj):\n try:\n del self._cache[obj.pk]\n except KeyError:\n pass\n return self.get_polling_many((obj,))[0]", "def reverse_ns(self, nameserver, limit=None):\n params = {}\n if limit:\n params = {'limit':limit}\n return self.apiquery('/v1/{}/name-server-domains/'.format(nameserver), params)", "def get_client_by_handle(self, handle):\n candidate_client_objects = [client for client in self if client.handle == handle]\n assert len( candidate_client_objects) < 2, \"?? socket %s appears in list of client objects multiple times\" % handle\n if candidate_client_objects:\n return candidate_client_objects[0]\n return None", "def get_expired_cnamecache():\n now = int(time())\n keys_to_del = []\n for key, ce in cnamecache.iteritems():\n if ce._expiration - now <= 0:\n keys_to_del.append(key)\n return (keys_to_del, cnamecache)", "def test_refresh_associations_not_stale(self, mock_stale):\n mock_stale.return_value = False\n uuid = uuids.compute_node\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_not_called()", "def last_access_time_tracking_policy(self) -> Optional[pulumi.Input['LastAccessTimeTrackingPolicyArgs']]:\n return pulumi.get(self, \"last_access_time_tracking_policy\")", "def _do_expire(self):\n t = time.time()\n\n # Expire probes\n for ip, expire_at in self.outstanding_probes.items():\n if t > expire_at:\n self.outstanding_probes.pop(ip, None)\n if ip in self.live_servers:\n self.log.warn(\"Server %s down\", ip)\n del self.live_servers[ip]\n\n # Expire flow\n memory = self.memory.copy()\n self.memory.clear()\n for key, val in memory.items():\n ip = key[0]\n if ip in self.live_servers and val.is_expired:\n # Decrease total connection for that server\n self.total_connection[ip] -= 1\n if not val.is_expired:\n self.memory[key] = val", "def _get_foreign_object(self, instance: models.Model\n ) -> Optional[models.Model]:\n try:\n return getattr(instance, self.foreign_key)\n except models.ObjectDoesNotExist:\n # this may raise DNE while cascade deleting with Collector\n return None", "def fetch_url_record(short_url = None):\r\n\r\n if short_url:\r\n url_record = db_collection.find_one({'name': short_url, 'deleted': False})\r\n\r\n if url_record:\r\n return url_record\r\n\r\n return None", "def get_thing_event(server, request):\n\n query = parse_request_opt_query(request)\n url_name_thing = query.get(\"thing\")\n url_name_event = query.get(\"name\")\n\n if not url_name_thing or not url_name_event:\n raise aiocoap.error.BadRequest(\"Missing query arguments\")\n\n exposed_thing = server.exposed_thing_set.find_by_thing_id(url_name_thing)\n\n if not exposed_thing:\n raise aiocoap.error.NotFound(\"Thing not found\")\n\n try:\n return next(\n exposed_thing.events[key] for key in exposed_thing.events\n if exposed_thing.events[key].url_name == url_name_event)\n except StopIteration:\n raise aiocoap.error.NotFound(\"Event not found\")", "def get_last_exposure(self):\n\n try:\n exposure = Exposure.objects.latest('pk')\n except Exposure.DoesNotExist:\n exposure = None\n\n return exposure", "def raise_timeout_error_get_relationship(api_url, headers, params, timeout, proxies):\n raise requests.exceptions.Timeout", "def get_closed_order():\n try:\n result = EXCHANGE.fetch_closed_orders(CONF.pair, since=None, limit=3, params={'reverse': True})\n if result is not None and len(result) > 0:\n orders = sorted(result, key=lambda order: order['datetime'])\n last_order = Order(orders[-1])\n LOG.info('Last %s', str(last_order))\n return last_order\n return None\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_closed_order()", "def getComic(handle):\n result = self.db.query('select from comics where handle = %s limit 1', handle)\n if len(result) < 1:\n return None\n else:\n return result[0]", "def get_external_oidc():\n\n unexpired_only = flask.request.args.get(\"unexpired\", \"false\").lower() == \"true\"\n\n global external_oidc_cache\n if not external_oidc_cache:\n data = {\n \"providers\": [\n {\n # name to display on the login button\n \"name\": idp_conf[\"name\"],\n # unique ID of the configured identity provider\n \"idp\": idp,\n # hostname URL - gen3fuse uses it to get the manifests\n \"base_url\": oidc_conf[\"base_url\"],\n # authorization URL to use for logging in\n \"urls\": [\n {\n \"name\": idp_conf[\"name\"],\n \"url\": generate_authorization_url(idp),\n }\n ],\n }\n for oidc_conf in get_config_var(\"EXTERNAL_OIDC\", [])\n for idp, idp_conf in oidc_conf.get(\"login_options\", {}).items()\n ]\n }\n external_oidc_cache = data\n\n # get the username of the current logged in user.\n # `current_user` validates the token and relies on `OIDC_ISSUER`\n # to know the issuer\n client = get_oauth_client(idp=\"default\")\n flask.current_app.config[\"OIDC_ISSUER\"] = client.metadata[\"api_base_url\"].strip(\"/\")\n username = None\n try:\n user = current_user\n username = user.username\n except Exception:\n flask.current_app.logger.info(\n \"no logged in user: will return refresh_token_expiration=None for all IdPs\"\n )\n\n # get all expirations at once (1 DB query)\n idp_to_token_exp = get_refresh_token_expirations(\n username, [p[\"idp\"] for p in external_oidc_cache[\"providers\"]]\n )\n\n result = {\"providers\": []}\n for p in external_oidc_cache[\"providers\"]:\n # expiration of the current user's refresh token\n exp = idp_to_token_exp[p[\"idp\"]]\n if exp or not unexpired_only:\n p[\"refresh_token_expiration\"] = exp\n result[\"providers\"].append(p)\n\n return flask.jsonify(result), 200", "def organization_get_expired_token(self, client, id, expired_token):\n assert client.get('/organizations/' + id, headers={\n 'Authorization': 'Bearer ' + expired_token})\\\n .status == '401 UNAUTHORIZED'", "def association_id(self) -> Optional[str]:\n return pulumi.get(self, \"association_id\")", "def get_last_refreshed_on_time():\n last_checked_on = Feed.select().aggregate(fn.Max(Feed.last_checked_on))\n if last_checked_on: \n return datetime_as_epoch(last_checked_on)\n \n # Return a fallback value\n return datetime_as_epoch(datetime.utcnow())", "def get_expiration_date(self, response: ClientResponse) -> Optional[datetime]:\n try:\n expire_after = self._get_expiration_for_url(response)\n except Exception:\n expire_after = self.expire_after\n return None if expire_after is None else datetime.utcnow() + expire_after", "def get_dead_dns_connection(self):\n return self.m_connection.dead_dns", "def get_expired_campaign(self):\n kwargs = {}\n kwargs['expirationdate__lte'] = datetime.utcnow().replace(tzinfo=utc)\n return Campaign.objects.filter(**kwargs).exclude(status=CAMPAIGN_STATUS.END)", "def last_access_time_tracking_policy(self) -> pulumi.Output[Optional['outputs.LastAccessTimeTrackingPolicyResponse']]:\n return pulumi.get(self, \"last_access_time_tracking_policy\")", "def test_refresh_associations_no_last(self):\n uuid = uuids.compute_node\n # Seed the provider tree so _refresh_associations finds the provider\n self.client._provider_tree.new_root('compute', uuid, generation=1)\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_were_called(uuid)", "def cleanupAssociations(self):\n try:\n mist_associations = MistAssociation.objects()\n except me.DoesNotExist:\n mist_associations = []\n\n counter = 0\n for assoc in mist_associations:\n if assoc.is_expired():\n assoc.delete()\n counter += 1\n\n return counter", "def getOfferExpires(did, lastOnly=True, dbn='did2offer', env=None):\n global gDbEnv\n\n if env is None:\n env = gDbEnv\n\n if env is None:\n raise DatabaseError(\"Database environment not set up\")\n\n entries = []\n subDb = gDbEnv.open_db(dbn.encode(\"utf-8\"), dupsort=True) # open named sub db named dbn within env\n with gDbEnv.begin(db=subDb) as txn: # txn is a Transaction object\n with txn.cursor() as cursor:\n if cursor.set_key(did.encode(\"utf-8\")):\n if lastOnly:\n cursor.last_dup()\n entries.append(json.loads(cursor.value().decode(\"utf-8\"),\n object_pairs_hook=ODict))\n else:\n entries = [json.loads(value.decode(\"utf-8\"), object_pairs_hook=ODict)\n for value in cursor.iternext_dup()]\n\n return entries", "def get_id(self, fqdn):\n res = self.db.execute(sqlalchemy.select([ model.imaging_servers.c.id ],\n whereclause=(model.imaging_servers.c.fqdn==fqdn)))\n return self.singleton(res)", "def getEvent(self, timeout=None):\n socks = self.poller.poll(timeout)\n if not socks:\n return\n msg = socks[0][0].recv()\n d = self.mh.unserialize(msg)\n e = Event.fromDict(d)\n if self.store:\n _id = self.store.addEvent(e)\n e.id = _id\n return e", "def get_server_queue(self, server_name, timeout=5):\n start_time = time()\n while time() < start_time + timeout:\n sq = self.server_queues[server_name]\n if sq.job_queue_size() > 0:\n returnable_sq = copy.deepcopy(sq)\n del sq.job_queue[0]\n return returnable_sq\n raise ValueError(\"Server queue has no jobs in it\")", "def purgeExpiredProxies( self ):\n cmd = \"DELETE FROM `ProxyDB_Proxies` WHERE ExpirationTime < UTC_TIMESTAMP() and PersistentFlag = 'False'\"\n return self._update( cmd )", "def getServerAdaptation(self):\n for adaptation in self.serveradaptations.values():\n if adaptation.hasActualServers():\n return adaptation\n return None", "def read_last_link():\n\ttry:\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(CONFIG_FILE_NAME)\n\t\treturn unicode(config.get(\"DEFAULT\", \"last_link\"))\n\texcept Exception, e:\n\t\tprint e\n\t\treturn None", "def remove_if_expired(self, key, now):\n with self.GLOB_LOCK:\n inst = self._request_sessions.get(key, None)\n if inst is not None and (inst.last_access + self.TIMEOUT < now):\n self._request_sessions.pop(key, None)\n return True\n\n return False", "def _update_domains_on_server_delete(self, server):\n\n # find a replacement server\n replacement_server_name = None\n servers = self.central_service.find_servers(self.admin_context)\n\n for replacement in servers:\n if replacement['id'] != server['id']:\n replacement_server_name = replacement['name']\n break\n\n LOG.debug(\"This existing server name will be used to update existing\"\n \" SOA records upon server delete: %s \"\n % replacement_server_name)\n\n # NOTE: because replacement_server_name came from central storage\n # it has the trailing period\n\n # Execute the manually prepared query\n # A TX is required for, at the least, SQLite.\n try:\n self.session.begin()\n # first delete affected NS records\n self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .delete()\n\n # then update all SOA records as necessary\n # Do the SOA last, ensuring we don't trigger a\n # NOTIFY before the NS records are in place.\n #\n # Update the content field of every SOA record that\n # has the deleted server name as part of its\n # 'content' field to reflect the name of another\n # server that exists\n # both server['name'] and replacement_server_name\n # have trailing period so we are fine just doing the\n # substitution without striping trailing period\n self.session.execute(models.Record.__table__\n .update()\n .where(and_(models.Record.__table__.c.type == \"SOA\",\n models.Record.__table__.c.content.like\n (\"%s%%\" % server['name'])))\n .values(content=func.replace(\n models.Record.__table__.c.content,\n server['name'],\n replacement_server_name)))\n\n except Exception:\n with excutils.save_and_reraise_exception():\n self.session.rollback()\n else:\n self.session.commit()", "def expire_promoted():\r\n with g.make_lock(promoted_lock_key):\r\n link_names = set(get_promoted_direct())\r\n links = Link._by_fullname(link_names, data=True, return_dict = False)\r\n\r\n link_names = []\r\n expired_names = []\r\n\r\n for x in links:\r\n if (not x.promoted\r\n or x.promote_until and x.promote_until < datetime.now(g.tz)):\r\n g.log.info('Unpromoting %s' % x._fullname)\r\n unpromote(x)\r\n expired_names.append(x._fullname)\r\n else:\r\n link_names.append(x._fullname)\r\n\r\n set_promoted(link_names)\r\n\r\n return expired_names", "def get_server(self, server):\n return self._get(_server.Server, server)", "def test_refresh_associations_disabled(self, mock_time):\n self.flags(resource_provider_association_refresh=0, group='compute')\n uuid = uuids.compute_node\n # Seed the provider tree so _refresh_associations finds the provider\n self.client._provider_tree.new_root('compute', uuid, generation=1)\n\n # Called a first time because association_refresh_time is empty.\n now = mock_time.return_value\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_were_called(uuid)\n\n # Clear call count.\n self.reset_getter_mocks()\n\n # A lot of time passes\n mock_time.return_value = now + 10000000000000\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_not_called(timer_entry=uuid)\n\n self.reset_getter_mocks()\n\n # Forever passes\n mock_time.return_value = float('inf')\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_not_called(timer_entry=uuid)\n\n self.reset_getter_mocks()\n\n # Even if no time passes, clearing the counter triggers refresh\n mock_time.return_value = now\n del self.client._association_refresh_time[uuid]\n self.client._refresh_associations(self.context, uuid)\n self.assert_getters_were_called(uuid)", "def cache_expiration(self):\n\n\t\t# Iterate through servers\n\t\tfor serv in self.servers:\n\t\t\tserv.cache.hash_table.clear() # Erase the cache\n\t\t\tserv.cache.cur_size = 0 # Resets the number of items in the cache to 0", "def _check_endpoints_last_communication(self):\n while self._thread_running:\n endpoints = EndpointService.get_all_endpoints()\n for endpoint in endpoints:\n if datetime.now() - endpoint.lastCommunication > timedelta(minutes=TOKEN_EXPIRE_TIME):\n EventService.add_event(Event(\"0\", \"Lost Endpoint At \" +\n str(endpoint[EndpointKeys.LAST_COMMUNICATION_KEY]),\n \"Report\", \"IDLE\", endpoint[EndpointKeys.HOSTNAME_KEY],\n endpoint[EndpointKeys.IP_ADDRESS_KEY]))\n EndpointService.delete_endpoint(endpoint.id)\n sleep(SLEEP_TIME)", "def get_key(self, keyId):\n diff = datetime.now() - OpenIDConnectConfiguration.lastUpdated\n\n # Refresh the cache if it's more than 5 days old.\n if diff.total_seconds() > 5 * 24 * 60 * 60:\n refresh_cache()\n\n keys = OpenIDConnectConfiguration.signing_keys.get(self._url)\n if keys is not None:\n return keys.get(keyId)\n \n return None", "def getLatestValidCertification(self):\n cert = None\n lastfrom = None\n lastto = None\n for c in self.getCertifications():\n validfrom = c.getValidFrom() if c else None\n validto = c.getValidTo() if validfrom else None\n if not validfrom or not validto:\n continue\n validfrom = validfrom.asdatetime().date()\n validto = validto.asdatetime().date()\n if not cert \\\n or validto > lastto \\\n or (validto == lastto and validfrom > lastfrom):\n cert = c\n lastfrom = validfrom\n lastto = validto\n return cert", "def id(self):\n if self.cloudserver:\n return self.cloudserver.id\n else:\n return None", "def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache", "def getServer(request):\n return Server(getOpenIDStore(), getViewURL(request, endpoint))", "def get_valid_expiration_from_request(\n expiry_param=\"expires_in\", max_limit=None, default=None\n):\n return get_valid_expiration(\n flask.request.args.get(expiry_param), max_limit=max_limit, default=default\n )", "def determine_ocsp_server(self, cert_path):\n try:\n url, _err = util.run_script(\n [\"openssl\", \"x509\", \"-in\", cert_path, \"-noout\", \"-ocsp_uri\"],\n log=logger.debug)\n except errors.SubprocessError:\n logger.info(\"Cannot extract OCSP URI from %s\", cert_path)\n return None, None\n\n url = url.rstrip()\n host = url.partition(\"://\")[2].rstrip(\"/\")\n if host:\n return url, host\n else:\n logger.info(\"Cannot process OCSP host from URL (%s) in cert at %s\", url, cert_path)\n return None, None", "def test_must_be_associated(self):\n\n def handle(event):\n return 0x0000\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(BasicFilmSession)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_DELETE, handle)]\n )\n\n ae.add_requested_context(BasicFilmSession)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n assoc.release()\n assert assoc.is_released\n assert not assoc.is_established\n with pytest.raises(RuntimeError):\n assoc.send_n_delete(None, None)\n\n scp.shutdown()", "def _expire(self):\n del self.map.addr[self.name]\n self.map.notify(\"addrmap_expired\", *[self.name], **{})", "def dequeue(self, server_id):\n srv = self.get_server_dict(server_id)\n if len(srv['queue']) <= 0:\n return None\n return srv['queue'].popleft()", "def _get(context, namespace_name, resource_type_name,\n namespace_id, resource_type_id, session):\n\n # visibility check assumed done in calling routine via namespace_get\n try:\n query = session.query(models.MetadefNamespaceResourceType).filter_by(\n namespace_id=namespace_id, resource_type_id=resource_type_id)\n db_rec = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"The metadata definition resource-type association of\"\n \" resource_type=%(resource_type_name)s to\"\n \" namespace_name=%(namespace_name)s was not found.\",\n {'resource_type_name': resource_type_name,\n 'namespace_name': namespace_name})\n raise exc.MetadefResourceTypeAssociationNotFound(\n resource_type_name=resource_type_name,\n namespace_name=namespace_name)\n\n return db_rec", "def download_fetch_ical(self, uri, force=False):\n if self.calendar is None or force:\n tmpfile = os.path.join(os.path.dirname(self.ourpath), 'calcache.ics')\n fetchit = lambda: urllib.urlretrieve(uri, tmpfile)\n\n # Variables to check when the .ics file was last modified\n now = datetime.datetime.now(self.localtime)\n delta = datetime.timedelta(days=1)\n\n # If a cache file exists, lets check to see if it's younger than a day\n if os.path.exists(tmpfile):\n modtime = datetime.datetime.fromtimestamp(os.path.getmtime(tmpfile), now.tzinfo)\n one_day = datetime.timedelta(days=1)\n delttt=now-one_day\n\n # If we're older than one day, delete it and get a fresh one.\n if modtime < delttt:\n os.remove(tmpfile)\n fetchit()\n\n else:\n fetchit()\n\n # Finally load in the .ics file\n self.calendar = vobject.readOne(open(tmpfile).read())", "def get_track(session_, uri) -> Track:\n return session_.query(Track).filter_by(uri=uri).first()", "def get(self, key):\n if self.dexists('ttl', key) and int(dt.now().strftime('%s')) >= self.dget('ttl', key):\n self.rem(key)\n return None\n return super(MyCache, self).get(key)", "def get_internal_result_from_server(self, server_name, timeout=4):\n start_time = time()\n while time() < start_time + timeout:\n for i in range(len(self.internal_result_queue)):\n if self.internal_result_queue[i].processed_by == server_name:\n return_result = copy.deepcopy(self.internal_result_queue[i])\n del self.internal_result_queue[i]\n return return_result", "def acquire(self, expires=10):\n ret = self._communicate('get %d' % expires).split(' ')\n if ret[0] == 'okay':\n return ret[1]", "def testApprovalsAreCachedForLimitedTime(self):\n self.InitDefaultRouter()\n\n client_id = self.SetupClient(0)\n\n with test_lib.ConfigOverrider({\"ACL.token_expiry\": 30}):\n self.RequestAndGrantClientApproval(\n client_id, requestor=self.test_username)\n\n f = self.api.Client(client_id).CreateFlow(\n name=flow_test_lib.SendingFlow.__name__)\n\n # Move the clocks past approval expiry time but before cache expiry time.\n with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +\n rdfvalue.DurationSeconds(\"30s\")):\n # If this doesn't raise now, all answers were cached.\n self.api.Client(client_id).Flow(f.flow_id).Get()\n\n with test_lib.FakeTime(\n rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From(\n api_router.AccessChecker.APPROVAL_CACHE_TIME, rdfvalue.SECONDS)):\n # This must raise now.\n self.assertRaises(grr_api_errors.AccessForbiddenError,\n self.api.Client(client_id).Flow(f.flow_id).Get)", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def getAssociation(self, *args):\n return _libsbml.GeneAssociation_getAssociation(self, *args)", "def pop(self):\n document_metadata = DocumentMetadata()\n item = self.priority_store.pop(int(time.time()))\n if item:\n logging.debug(\"Get priority:\" + str(item[1]))\n document_metadata.url = item[1]\n document_metadata.depth = item[3]\n document_metadata.delay = item[2]\n document_metadata.source = Source.priority\n else:\n while not item:\n item = self.normal_store.pop()\n if not item:\n break\n # the following check is needed because urls are stored in seen\n # after seeing them\n # so we can have multiple identical url in normal list.\n # and we do not want to have multiple same urls in refetching list\n if not self.seen.is_new(item[1]):\n item = None\n if item:\n # In case of network error I repush url on normal queue\n # just to not loose them. So it is possible we have\n # something already seen here.\n # It is not a problem to refetch this cases\n logging.debug(\"Get normal:\" + str(item[1]))\n document_metadata.url = item[1]\n document_metadata.depth = item[0]\n document_metadata.delay = 0\n document_metadata.source = Source.normal\n else:\n item = self.refetch_store.pop(int(time.time()))\n if item:\n logging.debug(\"Get Refetch:\" + str(item[1]))\n document_metadata.url = item[1]\n document_metadata.depth = item[3]\n document_metadata.delay = item[2]\n document_metadata.source = Source.refetch\n\n return document_metadata", "def delete(self, oid):\n path = '/servers/%s' % oid\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack server: %s' % truncate(res))\n return res[0]", "def test_mail_client_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n response = self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def testExpiredClientApprovalIsNoLongerValid(self):\n self.InitDefaultRouter()\n\n client_id = self.SetupClient(0)\n gui_test_lib.CreateFileVersion(client_id, \"fs/os/foo\")\n\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n with test_lib.FakeTime(100.0, increment=1e-3):\n self.RequestAndGrantClientApproval(\n client_id, requestor=self.test_username)\n\n # This should work now.\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n token_expiry = config.CONFIG[\"ACL.token_expiry\"]\n\n # Make sure the caches are reset.\n self.ClearCache()\n\n # This is close to expiry but should still work.\n with test_lib.FakeTime(100.0 + token_expiry - 100.0):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n # Make sure the caches are reset.\n self.ClearCache()\n\n # Past expiry, should fail.\n with test_lib.FakeTime(100.0 + token_expiry + 100.0):\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()", "def fetch(self, server):\n url = None\n\n opener = urllib.build_opener()\n addheaders = {\n \"User-agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/69.0.3497.105 Mobile/15E148 Safari/605.1\"\n }\n\n # Ignore SSL certificate errors\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n try:\n req = urllib.Request(server, None, addheaders)\n url = urllib.urlopen(req, timeout=2, context=ctx)\n content = url.read()\n\n if PY3K:\n try:\n content = content.decode(\"UTF-8\")\n except UnicodeDecodeError:\n content = content.decode(\"ISO-8859-1\")\n\n m = re.search(\n \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\",\n content,\n )\n myip = m.group(0)\n return myip if len(myip) > 0 else \"\"\n except Exception as e:\n print(\"Exception raised on url {} -- \".format(server), e)\n return \"\"\n finally:\n if url:\n url.close()", "def get(self, timeout=0):\n id = self.redis.brpop(self.feed_ids, timeout)\n if id is None:\n raise Empty\n id = id[1]\n\n pipe = self.redis.pipeline()\n pipe.zadd(self.feed_claimed, **{id: int(time.time()*1000)})\n pipe.hget(self.feed_items, id)\n result = pipe.execute()\n \n return id, result[1]", "def exc(qid, qname, question): \n\n def handler(signum, frame):\n print 'Signal handler called with signal', signum\n raise OutOfTimeException(\"Query couldn't be processed in under 1 minute!\")\n try:\n # Set the signal handler and a 60 second alarm\n # as per requirement 2\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(60)\n ##clean out expired records before we handle this query\n update_caches() \n response = construct_response(*resolve(0, qid, question, qname, qname, get_best_ns(nscache, qname), [], [], []))\n signal.alarm(0) \n except (OutOfTimeException) as e:\n # Server Failure\n logger.error(e)\n signal.alarm(0)\n response = construct_response(qid, question, [], [], [], RCODE=Header.RCODE_SRVFAIL)\n return response", "def invalidate(self, assoc_handle, dumb):\n if dumb:\n key = self._dumb_key\n else:\n key = self._normal_key\n self.store.removeAssociation(key, assoc_handle)", "def test_server_timeout():\r\n with throttle_client(b\"[semaphores]\\nA=1\") as client:\r\n one = client.new_peer(expires_in=timedelta(minutes=1))\r\n two = client.new_peer(expires_in=timedelta(minutes=1))\r\n # Acquire first lease\r\n _ = client.acquire(one, \"A\")\r\n\r\n # Wait for two in a seperate thread so we do not block forever if this test\r\n # fails.\r\n def wait_for_two():\r\n client.acquire(two, \"A\", block_for=timedelta(seconds=0.5))\r\n\r\n t = Thread(target=wait_for_two)\r\n t.start()\r\n\r\n # Three seconds should be ample time for `t` to return\r\n t.join(3)\r\n # If `t` is alive, the join timed out, which should not be the case\r\n assert not t.is_alive()", "def notify_expired(self):\n logging.info('Facebook access token expired! Sending notification to user.')\n params = {\n 'template': \"Brid.gy's access to your account has expired. Click here to renew it now!\",\n 'href': 'https://www.brid.gy/facebook/start',\n # this is a synthetic app access token.\n # https://developers.facebook.com/docs/facebook-login/access-tokens/#apptokens\n 'access_token': '%s|%s' % (appengine_config.FACEBOOK_APP_ID,\n appengine_config.FACEBOOK_APP_SECRET),\n }\n url = API_NOTIFICATION_URL % self.key.id()\n resp = urllib2.urlopen(urllib2.Request(url, data=urllib.urlencode(params)),\n timeout=appengine_config.HTTP_TIMEOUT)\n logging.info('Response: %s %s' % (resp.getcode(), resp.read()))", "def getOpenId(self):\n if self.sess is None: return None\n return self.sess.data.get('openid.identity')", "def _get_expiration_for_url(self, response: ClientResponse) -> Optional[timedelta]:\n for pattern, expire_after in self.expire_after_urls.items():\n if glob_match(_base_url(response.url), pattern):\n logger.debug(f'URL {response.url} matched pattern \"{pattern}\": {expire_after}')\n return expire_after\n raise ValueError('No matching URL pattern')", "def get_resolver_rule_association(ResolverRuleAssociationId=None):\n pass", "def lookupLink(cls, session, link, model, recordID):\n checkURL = Link.httpRegexSub(link.get('url', None))\n return session.query(cls)\\\n .join(model.__tablename__)\\\n .filter(model.id == recordID)\\\n .filter(cls.url == checkURL)\\\n .one_or_none()", "def get_queue(self, modified, server_nonce):\n query = \"\"\"SELECT server,\n otp,\n modified,\n info\n FROM queue\n WHERE modified=%s\n AND server_nonce = %s\"\"\"\n self._execute(query, (modified, server_nonce))\n return self._dictfetchall()", "def _async_remove_obsolete_entities(\n hass: HomeAssistant, entry: ConfigEntry, hap: HomematicipHAP\n):\n\n if hap.home.currentAPVersion < \"2.2.12\":\n return\n\n entity_registry = er.async_get(hass)\n er_entries = async_entries_for_config_entry(entity_registry, entry.entry_id)\n for er_entry in er_entries:\n if er_entry.unique_id.startswith(\"HomematicipAccesspointStatus\"):\n entity_registry.async_remove(er_entry.entity_id)\n continue\n\n for hapid in hap.home.accessPointUpdateStates:\n if er_entry.unique_id == f\"HomematicipBatterySensor_{hapid}\":\n entity_registry.async_remove(er_entry.entity_id)", "def get_handle_from_gramps_id(self, gid):\n obj = self.dbstate.db.get_object_from_gramps_id(gid)\n if obj:\n return obj.get_handle()\n else:\n return None", "def delete_expired(self):\n check_time = datetime.now()\n if self.can_expire and self.duration:\n exp_times = deepcopy(self.exp_times)\n for key in exp_times:\n if exp_times[key] < check_time:\n self.delete(key)", "def get_latest_saved(self):\n doc = (get_latest_released_app_doc(self.domain, self._id)\n or get_latest_build_doc(self.domain, self._id))\n return self.__class__.wrap(doc) if doc else None", "def pop(self):\n cursor = self.conn.cursor()\n now = time.time()\n cursor.execute(\"\"\"\n SELECT * FROM events\n WHERE date < ? AND countdown > 0\n ORDER BY date ASC LIMIT 1\"\"\", (now, ))\n raw = cursor.fetchone()\n if raw:\n event = Event(*raw)\n return event", "def test_access_token_refreshed_for_token_expired_with_post_method(self):\n with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):\n MockOAuth2Session.RESPONSES = [TokenExpiredError(), 200]\n proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)\n session = proxy._session\n first_token = session.token['access_token']\n # m.token['expires_at'] = m.token['expires_at'] - 36001\n proxy.post(\"/fake_url/1/\")\n second_token = session.token['access_token']\n self.assertEqual(len(session.post.call_args_list), 2) # Number of calls\n self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls\n session.post.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])\n self.assertEqual(AccessToken.objects.count(), 1)\n self.assertNotEquals(first_token, second_token)", "def _get_expire(self):\n return self.__expire", "def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False", "def get_expiring_tokens(user, soon):\n soon_datetime = datetime.now() + soon\n return AppSpecificAuthToken.select().where(\n AppSpecificAuthToken.user == user,\n AppSpecificAuthToken.expiration <= soon_datetime,\n AppSpecificAuthToken.expiration > datetime.now(),\n )", "def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache", "def get_monitor_obj(link):\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n monitor = monitors.find_one({'metadata.rss_link': link}, {'_id': 0})\n return monitor", "def dynamo_create_revocation_endpoint(dynamo):\n from authlib.oauth2.rfc7009 import RevocationEndpoint\n\n class _RevocationEndpoint(RevocationEndpoint):\n def query_token(self, token_string, token_type_hint, client):\n return dynamo.get_token(token_string)\n\n def revoke_token(self, token):\n token.revoked = True\n dynamo.save_token(token)\n\n return _RevocationEndpoint", "def expiration(self) -> Optional[str]:\n return pulumi.get(self, \"expiration\")", "def remove_expired(self):\n now = time.time()\n return [self.remove_if_expired(key, now) for key in self._request_sessions.keys()[:]].count(True)" ]
[ "0.77972186", "0.76248354", "0.61887425", "0.6054781", "0.568007", "0.49513325", "0.4506718", "0.44599456", "0.44545853", "0.4377314", "0.4343994", "0.42867646", "0.42596504", "0.42539644", "0.4183411", "0.41812235", "0.41506642", "0.4147285", "0.41288793", "0.41181672", "0.41144222", "0.41048545", "0.40961102", "0.4091183", "0.40865391", "0.40860868", "0.40826693", "0.40787965", "0.407019", "0.4065627", "0.40531144", "0.4048055", "0.4045517", "0.40398297", "0.40389693", "0.40312114", "0.4030156", "0.40213734", "0.40195227", "0.4019275", "0.3985147", "0.3983398", "0.39819404", "0.39790443", "0.3974963", "0.3972399", "0.3963454", "0.39457968", "0.39434773", "0.394341", "0.39277047", "0.39141935", "0.38923073", "0.38894245", "0.38804442", "0.38764042", "0.38758853", "0.3874174", "0.38667697", "0.3862371", "0.38518232", "0.38507378", "0.3848384", "0.38397026", "0.38356534", "0.38190266", "0.38149217", "0.38148162", "0.38144168", "0.38142377", "0.3813594", "0.38084272", "0.38080025", "0.38072264", "0.37973148", "0.3796611", "0.37877795", "0.37843367", "0.37817082", "0.37790152", "0.3768386", "0.3767431", "0.37660468", "0.3763523", "0.3742037", "0.37420067", "0.374192", "0.3737551", "0.37360358", "0.3728996", "0.37267533", "0.37244275", "0.37235376", "0.37230113", "0.37171617", "0.37148914", "0.37135482", "0.37128782", "0.3708216", "0.37040395" ]
0.7585846
2
This method removes the matching association if it's found, and returns whether the association was removed or not.
def removeAssociation(self, server_url, handle): try: mist_associations = MistAssociation.objects( server_url=server_url, handle=handle.hex()) except me.DoesNotExist: return False for assoc in mist_associations: assoc.delete() return len(mist_associations) > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeAssociation(self, *args):\n return _libsbml.FbcOr_removeAssociation(self, *args)", "def removeAssociation(self, *args):\n return _libsbml.Association_removeAssociation(self, *args)", "def removeAssociation(self, *args):\n return _libsbml.FbcAnd_removeAssociation(self, *args)", "def remove(self) -> object:\n return self._contains.pop()", "def remove(self, el: T) -> bool:\n if el in self:\n del self[el]\n return True\n else:\n return False", "def unsetAssociation(self):\n return _libsbml.GeneProductAssociation_unsetAssociation(self)", "def clearAssociations(self):\n return _libsbml.Association_clearAssociations(self)", "def unsetAssociation(self):\n return _libsbml.GeneAssociation_unsetAssociation(self)", "def remove(self, *args):\n return _libsbml.ListOfFbcAssociations_remove(self, *args)", "def remove(self, *args):\n return _libsbml.ListOfGeneAssociations_remove(self, *args)", "def remove(categoria: Categoria) -> bool:\n return remove_id(categoria.idd)", "def remove(self, elem):\n if self.inicio == None:\n raise ValueError(\"{} nao esta na lista\".format(elem))\n elif self.inicio.dado == elem:\n self.inicio = self.inicio.prox\n self._size = self._size - 1\n return True\n else:\n ancestor = self.inicio\n ponteiro = self.inicio.prox\n while ponteiro:\n if ponteiro.dado == elem:\n ancestor.prox = ponteiro.prox\n ponteiro.prox = None\n ancestor = ponteiro\n ponteiro = ponteiro.prox\n self._size = self._size - 1\n return True\n raise ValueError(\"{} nao esta na lista\".format(elem))", "def _remove(self, key: bytes) -> bool:\n if self._get(key) != None:\n self.db.delete(key)\n return True\n else:\n return False", "def remove(self):\n return self.delete_instance()", "def delete_element_from_store(entry_sequence, is_propagated_call = False):\n\t\tglobal board, node_id\n\t\tsuccess = False\n\t\ttry:\n\t\t\tdel board[int(entry_sequence)]\n\t\t\tsuccess = True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn success", "def remove(self, *args, **kwargs):\n return False", "def unsetReference(self):\n return _libsbml.Association_unsetReference(self)", "def remove(self, criteria):\n return self.connection.remove(criteria)", "def remove(self):\n cursor = yield self._pool.execute(\n self._delete(), [self._get_value_or_default(self.PRIMARY_KEY)])\n count = cursor.rowcount\n result = True if count == 1 else False\n return result", "def removeGeneAssociation(self, *args):\n return _libsbml.FbcModelPlugin_removeGeneAssociation(self, *args)", "def remove_section(self, section):\n existed = section in self._sections\n if existed:\n del self._sections[section]\n return existed", "def cleanupAssociations(self):\n try:\n mist_associations = MistAssociation.objects()\n except me.DoesNotExist:\n mist_associations = []\n\n counter = 0\n for assoc in mist_associations:\n if assoc.is_expired():\n assoc.delete()\n counter += 1\n\n return counter", "def removeAssociation(self, server_url, handle):\r\n query = datastore.Query('Association',\r\n {'url =': server_url, 'handle =': handle})\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())", "def delete(self, item):\n is_found, active, node = self._find(item)\n if is_found and active:\n idx = node.items.index(item)\n node.active[idx] = False\n return True\n else:\n return False", "def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })", "def _remove_record(self, model, row_id) -> bool:\n try:\n model = self.session.query(model).filter_by(id=row_id).first()\n if model:\n self.session.delete(model)\n self.session.commit()\n\n return True\n else:\n return False\n\n except Exception:\n self.session.rollback()\n\n raise", "def isSetAssociation(self):\n return _libsbml.GeneProductAssociation_isSetAssociation(self)", "def remove(self, val):\n temp = self.table.pop(val, None)\n if temp is None:\n return False\n return True", "def drop(cls):\n objects = cls.get_all()\n if isinstance(objects, dict) is False:\n for i in cls.get_all():\n i.delete()\n return True\n else:\n return True", "def remove(self):\n path = os.path.abspath(path)\n if path in self.files:\n del self.files[path]\n return True\n return False", "def isSetAssociation(self):\n return _libsbml.GeneAssociation_isSetAssociation(self)", "def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False", "def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False", "def remove(self, _id):\n if self.objects.get(_id):\n self.objects.pop(_id)", "def remove(self, element) -> bool:\n\n target_node = self.__find_node(element)\n\n if target_node is None:\n return False\n\n self.__size -= 1\n\n if target_node.left is None or target_node.right is None:\n self.__remove_node(target_node)\n else:\n successor_node = self.__get_largest_node(target_node.left)\n target_node.data = successor_node.data\n\n self.__remove_node(successor_node)\n\n if AVLTree.__DEBUG and not self.__is_balanced(self.__root):\n raise AssertionError(\"This AVL Tree is not balanced any more.\")\n\n return True", "def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)", "def remove(self, callback) -> bool:\n for i, tracker in enumerate(self.trackers):\n if tracker.original_callback == callback:\n del self.trackers[i]\n return True\n return False", "def remove(self):\n if self.removed:\n return\n self._remove()\n self.removed = True", "def remove(cls, obj1, obj2):\n if not cls._meta.many_to_many:\n raise Exception(\"ERROR: Remove called on non many to many model\")\n\n query = RemoveQuery(cls, obj1, obj2)\n yield query.execute()\n\n if obj2 in getattr(obj1, obj2._meta.name):\n getattr(obj1, obj2._meta.name).remove(obj2)\n\n if obj1 in getattr(obj2, obj1._meta.name):\n getattr(obj2, obj1._meta.name).remove(obj1)", "def delete(self):\n try:\n db.session.delete(self)\n db.session.commit()\n return True\n except SQLAlchemyError:\n db.session.rollback()\n return False", "def remove(self, val):\n i = self.d.get(val)\n if i is None:\n return False\n assert 0 <= i < len(self.l)\n last_val = self.l[-1]\n if val != last_val:\n self.d[last_val] = i\n self.l[i] = last_val\n del self.d[val]\n _ = self.l.pop()\n return True", "def undelete(self, item):\n is_found, active, node = self._find(item)\n if is_found and not active:\n idx = node.items.index(item)\n node.active[idx] = True\n return True\n else:\n return False", "def remove(self, val):\n if not val in self.record:\n return False\n index = self.record[val]\n self.data[index], self.data[-1] = self.data[-1], self.data[index]\n self.record[self.data[index]] = index\n self.data.pop()\n self.record.pop(val)\n return True", "def remove(self, val):\n in_ds = False\n if val in self.ds:\n self.ds.remove(val)\n in_ds = True\n return in_ds", "def remove(self, spec_or_id=None):\n if isinstance(spec_or_id, ObjectId) or \\\n isinstance(spec_or_id, basestring):\n return self.database.connection.request.delete_document(\n self.database.name, self.name, spec_or_id)\n if not spec_or_id:\n spec_or_id = {}\n return self.database.connection.request.delete_replace_documents(\n self.database.name, self.name, spec_or_id, [])", "def delete(self, key):\n if key in self._datastore:\n del self._datastore[key]\n return True\n else:\n raise KeyError(\n \"Tried to delete a non existing record\"\n )", "def _remove_subsystem(self, subname):\n if subname in self._subs:\n # Remove the subsystem\n self._subs.pop(subname)\n # reset the iterator\n self._iter_subs = sorted(self._subs.keys())\n self._iter_ind = 0\n return True\n return False", "def delete(self):\n try:\n db.session.delete(self)\n db.session.commit()\n return True\n except SQLAlchemyError as error_message:\n app_logger.error(error_message)\n return False", "def remove(self, val):\n if val in self.dic:\n i = self.dic[val]\n if i<len(self.data)-1:\n self.data[i]=self.data[-1]\n self.dic[self.data[i]]=i\n self.data.pop()\n self.dic.pop(val,0)\n return True\n else:\n return False", "def remove(self):\r\n\t\tself._delete()", "def delete(self, cascade=False):\n if cascade == True:\n raise NotImplementedError()\n else:\n result = config.item_remove(self._original_attributes)\n self._event(level=\"write\", message=\"Object was deleted\")\n return result", "def remove(self, path):\r\n return self.paths.remove(path)", "def removeAssociation(self, server_url, handle):\n query = models.Association.gql('WHERE url = :1 AND handle = :2',\n server_url, handle)\n return self._delete_first(query)", "def __remove_restriction(self, restrictionType: str, object, categoryName: str):\n category = self.__switch_category(restrictionType)\n categorySet = category.get(categoryName)\n if categorySet == None:\n return False\n objectRemoved = object in categorySet\n categorySet.discard(object)\n if len(categorySet) == 0:\n category.pop(categoryName)\n return objectRemoved", "def delete(self, key):\n _filter = {'_id': key}\n count = self.collection.count(_filter)\n\n if count:\n self.collection.remove(_filter)\n return True\n return False", "def remove_movie(self, movie_id):\n if movie_id in self.movies:\n \t\tdel self.movies[movie_id]\n \t\treturn True\n \telse:\n \t\treturn False", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self,data):\n ret = self._rest_call(data, 'DELETE')\n return ret[0] == 200", "def unsubscribe(self, first, second):\n for sub in self._subscriptions.values():\n if sub == (first, second):\n self._subscriptions.pop(sub.ID)\n return True\n elif sub.contains(first) and sub.hasParent( second ):\n self._subscriptions.pop(sub.ID)\n return True\n \n return False", "def remove(self,item):\r\n raise AbstractError\r\n return False", "def remove(self, value: object) -> bool:\n for _ in range(self.da.length()):\n if value == self.da[_]:\n self.da.remove_at_index(_)\n return True\n return False", "def remove(self, key):\r\n\t\tif self.head is None:\r\n\t\t\treturn False\r\n\t\tif self.head.key == key:\r\n\t\t\tself.head = self.head.next\r\n\t\t\tself.size = self.size - 1\r\n\t\t\treturn True\r\n\t\tcur = self.head.next\r\n\t\tprev = self.head\r\n\t\twhile cur is not None:\r\n\t\t\tif cur.key == key:\r\n\t\t\t\tprev.next = cur.next\r\n\t\t\t\tself.size = self.size - 1\r\n\t\t\t\treturn True\r\n\t\t\tprev = cur\r\n\t\t\tcur = cur.next\r\n\t\treturn False", "def remove(self):\n raise NotImplementedError", "def remove(self):\n raise NotImplementedError", "def remove(self, val):\n ind = self.table.pop(val, None)\n if ind is None:\n return False\n key = self.ls.pop()\n if len(self.ls)!=0 and len(self.ls) != ind:\n self.ls[ind] = key\n self.table[key] = ind\n return True", "def _should_remove(self, mac, obj):\n ret = False\n if getattr(obj, self.toggle_val) == self.toggle_check\\\n and self.toggle.state == 'down':\n ret = True\n return ret", "def remove(query):\n # type: (str) -> bool\n if not query or not SEARCH_SAVED:\n return False\n searches = retrieve()\n if query in searches:\n searches.remove(query)\n save(searches)\n return True\n return False", "def remove(self, key):\n if self.head is None:\n return False\n if self.head.key == key:\n self.head = self.head.next\n self.size = self.size - 1\n return True\n cur = self.head.next\n prev = self.head\n while cur is not None:\n if cur.key == key:\n prev.next = cur.next\n self.size = self.size - 1\n return True\n prev = cur\n cur = cur.next\n return False", "def remove(self, key):\n if self.head is None:\n return False\n if self.head.key == key:\n self.head = self.head.next\n self.size = self.size - 1\n return True\n cur = self.head.next\n prev = self.head\n while cur is not None:\n if cur.key == key:\n prev.next = cur.next\n self.size = self.size - 1\n return True\n prev = cur\n cur = cur.next\n return False", "def remove(self, document):\n return self.db.pop(document['id'], None)", "def deleteLast(self):\n if not self.isEmpty():\n self._data.pop()\n return True\n else:\n return False", "def remove(self, value, _sa_initiator=None):\n\n key = self.keyfunc(value)\n # Let self[key] raise if key is not in this collection\n # testlib.pragma exempt:__ne__\n if not self.__contains__(key) or value not in self[key]:\n raise sa_exc.InvalidRequestError(\n \"Can not remove '%s': collection holds '%s' for key '%s'. \"\n \"Possible cause: is the MappedCollection key function \"\n \"based on mutable properties or properties that only obtain \"\n \"values after flush?\" %\n (value, self[key], key))\n self.__getitem__(key, _sa_initiator).remove(value)", "def remove_id(idd: int) -> bool:\n conn = GenericDao.connect()\n cursor = conn.execute(\"DELETE FROM categorias where categoria_id = ?\", (str(idd),))\n conn.commit()\n conn.close()\n if debug:\n print('Categoria eliminada: ' + str(cursor.rowcount))\n return cursor.rowcount > 0", "def remove(self, uid):\n marker = object()\n name = self._reverse.get(uid, marker)\n if name is not marker:\n del self._reverse[uid]\n try:\n del self._forward[name]\n except KeyError:\n # If it isn't there, good, that is the outcome we wanted,\n # right?\n pass", "def remove(self, val: int) -> bool:\n if val in self.hashmap:\n last_elem, idx = self.array[-1], self.hashmap[val]\n self.array[idx], self.hashmap[last_elem] = last_elem, idx\n self.array.pop()\n self.hashmap.pop(val)\n return True\n return False", "def unloadEvent(self, eid):\n try: \n tmp = self._subscriptions\n self._subscriptions.clear()\n for k in tmp.keys(): #FIXME: There needs to be a faster way of doing this.\n if tmp[k].eid != eid:\n self._subscriptions[k] = tmp[k]\n \n return self._events.pop(eid, None) is not None\n except: return False", "def _remove(self):\n pass", "def remove(self, value: object) -> bool:\n # Loops through the indices of the underlying dynamic array.\n end = self.size()\n for ind in range(end):\n # If the value is found, the value is removed from the dynamic array and True is returned.\n if self.da[ind] == value:\n self.da.remove_at_index(ind)\n return True\n # Else false is returned.\n return False", "def remove(self, key: int) -> bool:\n current = self.root.find(key) if not self.empty() else None\n if current is None: # if no such key, failure\n return False\n\n self.root = current.remove() # update root\n return True", "def delete(self, target):\n if self.rear == None:\n return False\n curr = self.rear.next # curr at first\n prev = self.rear\n while True:\n if curr.data == target:\n prev.next = curr.next\n if curr == self.rear:\n # curr is last node, prev becomes new last\n if prev == self.rear:\n # list has only one node\n self.rear = None\n else:\n self.rear = prev\n return True\n prev = curr\n curr = curr.next\n if prev == self.rear:\n break\n return False", "def remove(self, other):\n self._check_item(other)\n self._set.remove(other)", "def forceRemove( self ):\n scene = self.scene()\n if ( scene ):\n scene.forceRemove(self)", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None", "def remove(self):\n\t\treturn self._flist.remove(self)", "def delete(self) -> bool:\n return False", "def remove(self, val: int) -> bool:\n idx = self.store_dict.get(val)\n if idx is None:\n return False\n\n l = len(self.store_list)\n self.store_dict[self.store_list[l - 1]] = idx\n self.store_list[idx], self.store_list[l - 1] = self.store_list[l - 1],self.store_list[idx]\n self.store_list.pop()\n del self.store_dict[val]\n return True" ]
[ "0.69179744", "0.6731579", "0.67246026", "0.5947728", "0.58918077", "0.5864309", "0.5859789", "0.58411855", "0.5758755", "0.5709778", "0.56879985", "0.5626436", "0.5593932", "0.55795133", "0.5561436", "0.55258703", "0.550188", "0.5499768", "0.54942656", "0.5483335", "0.5459692", "0.54500484", "0.5444971", "0.5407289", "0.5371318", "0.53271973", "0.531121", "0.5304047", "0.52924246", "0.5285162", "0.52690935", "0.5267976", "0.5267976", "0.5250128", "0.524508", "0.5228255", "0.5217408", "0.5213962", "0.5212007", "0.52073854", "0.5197932", "0.5196335", "0.5189229", "0.5188585", "0.5183789", "0.51832795", "0.51788026", "0.5154452", "0.5149982", "0.51468086", "0.51462793", "0.51428246", "0.51401037", "0.5127313", "0.5111609", "0.51089215", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509424", "0.509091", "0.50837857", "0.50811523", "0.50804645", "0.50713825", "0.5068552", "0.5068552", "0.5049669", "0.5046201", "0.50460637", "0.50426644", "0.50426644", "0.5041918", "0.5029814", "0.50278443", "0.5018944", "0.5016431", "0.5011875", "0.50075626", "0.5006848", "0.4990997", "0.49855825", "0.4984438", "0.4981366", "0.49797568", "0.49724957", "0.49696308", "0.49665615", "0.49648437", "0.49498105" ]
0.64031404
3
Called when using a nonce. This method should return C{True} if the nonce has not been used before, and store it for a while to make sure nobody tries to use the same value again. If the nonce has already been used or the timestamp is not current, return C{False}. You may use L{openid.store.nonce.SKEW} for your timestamp window.
def useNonce(self, server_url, timestamp, salt): if is_nonce_old(timestamp): return False try: mist_nonces = MistNonce.objects(server_url=server_url, salt=salt, timestamp=timestamp) except me.DoesNotExist: mist_nonces = [] if len(mist_nonces) == 0: print("Timestamp = %s" % timestamp) MistNonce( server_url=server_url, salt=salt, timestamp=timestamp ).save() return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def useNonce(self, nonce):\r\n query = datastore.Query('Nonce')\r\n query['nonce ='] = nonce\r\n query['created >='] = (datetime.datetime.now() -\r\n datetime.timedelta(hours=6))\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())\r\n return True\r\n else:\r\n return False", "def validateNonce(lastNonce, lastHash, nonce):\n sha = hashlib.sha256(f'{lastNonce}{lastHash}{nonce}'.encode())\n return sha.hexdigest()[:4] == '0000'", "def _nonce():\n return str(round(100000 * time.time()) * 2)", "def check_one_time_nonce(self, user_supplied_nonce):\n\n if self.nonce_action_auth_valid_uses > 0:\n self.nonce_action_auth_valid_uses -= 1\n ret = util.safe_string_compare(user_supplied_nonce, self.nonce_action_auth)\n if ret is True: # explicitly checking for boolean True\n return True\n return False\n return False", "def _nonce(self):\n return str(int(round(time.time() * 10000)))", "def get_nonce() -> int:\n return int(time.time() * FACTOR)", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def set_nonce(self, nonce=None):\n if nonce is None:\n nonce = os.urandom(32)\n self.nonce = nonce", "def _nonce(self):\n # Note: if we use multithreading for a single exchange, this may\n # cause an issue.\n delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)\n return int(delta.total_seconds() * 1000)", "def nonce():\n return random.randint(0, 4294967295)", "def nonce():\n return random.randint(0, 4294967295)", "def __expired_timestamp(self, timestamp):\n return int(time.time()) > timestamp + self.__ttl", "def suc_nonce(self, nonce = None):\n\n # if we don't provide a nonce. We will use the internal one\n if nonce is None:\n nonce = self.nonce\n\n # We convert the nonce in bit in order to work on it\n bit_nonce = int_to_bitstr(nonce, 32)\n\n \"\"\" Generate the feedback bit based on the nonce's \n second half, because the last 16 bits of the nonce is\n identical to the 16 bits prng state. \"\"\"\n fbit = self.prng_feedback(bit_nonce[16,:])\n\n # The left bit is discarded and the feedback bit is added\n nonce = bit_nonce[1:] + fbit\n\n # We will update the internal nonce/prng to the suc(nonce/prng)\n if nonce is None:\n\n # The internal prng is updated with the second part of the nonce\n self.prng = bitstr_to_int(bit_nonce[16,:])\n self.nonce = bitstr_to_int(bit_nonce)\n\n # Return nonce, it will be sent to the reader\n return self.nonce\n else:\n return bitstr_to_int(nonce)", "def storeNonce(self, nonce):\r\n entity = datastore.Entity('Nonce')\r\n entity['nonce'] = nonce\r\n entity['created'] = datetime.datetime.now()\r\n datastore.Put(entity)", "def verify_and_burn_nonce(nonce):\n ret = re.match(r'^001[2-9][0-9]{3}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])'\n r'T([01][0-9]|2[0-3])(:[0-5][0-9]){2}Z[A-Za-z0-9]{6}$', nonce)\n if ret:\n date = parser.parse(nonce[3:-6])\n now = datetime.utcnow().replace(tzinfo=tz.tzutc())\n ret = date < (now + timedelta(minutes=2)) and date > (now + timedelta(hours=-1))\n\n return ret # TODO: keep a record (at least for the last hour) of burned nonces", "def _is_expired(self):\n current_time = datetime.now()\n if (current_time > self._expires_at):\n logging.debug('token expired')\n return True\n else:\n return False", "def check_token(self, user, token):\n\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n recomputed_token = self._make_token_with_timestamp(user, ts)\n\n log.debug(\"Ricalcolo re_token=%s token=%s\" % (recomputed_token, token))\n if not constant_time_compare(recomputed_token, token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_days(self._today()) - ts) > settings.REFERRAL_TOKEN_RESET_TIMEOUT_DAYS:\n return False\n\n return True", "def is_safe_cache(self):\n if self.get_last_update() > self.timestamp:\n return False\n return True", "def generate_nonce():\n return str(int(round(time.time() * 1000)))", "def make_nonce (self, request):\r\n ip = request.channel.server.ip\r\n now = str(long(time.time()))\r\n if now[-1:] == 'L':\r\n now = now[:-1]\r\n private_key = str (id (self))\r\n nonce = ':'.join([ip, now, private_key])\r\n return self.apply_hash (nonce)", "def _get_nonce():\n return uuid.uuid4().get_hex()", "def _about_to_expire(self, secret: Secret) -> bool:\n return secret.is_expired(datetime.now(UTC) + self.expiry_margin)", "def isFresh(self, timestamp):\n pass;", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def is_valid(self):\n return self.access_token is not None \\\n and time.time() < self._expiration_timestamp", "def token_is_stale(self):\n return self.m_token_expiry < datetime.datetime.now(tz=pytz.utc)", "def is_outdated(self, timestamp):\n\n expiry_time = datetime.datetime.now() - self.cache_time\n return expiry_time > timestamp", "def _generate_nonce(self):\n return str(random.randrange(100000, 999999))", "def has_expired(self):\n if not self._initialized:\n return True\n\n expires_in = self.expires_in\n if expires_in > 0:\n return False\n else:\n return True", "def only_once(self) -> bool:\n return self.times == 1", "def default_nonce_duration():\n return now() + timedelta(hours=4)", "def nonce(length=40, prefix=\"access_token\"):\n rbytes = os.urandom(length)\n return \"{}_{}\".format(prefix, str(hashlib.sha1(rbytes).hexdigest()))", "def loginCheckSuccess(self, output):\n result = True\n now = datetime.now()\n if (self.token_timestamp is None) or ((now - self.token_timestamp).\n days >= T411.token_ttl):\n result = False\n return result", "def is_expired(self):\n if not self.is_signed:\n return True\n return int(self._token_claims.get(self.__class__.exp_claim, 0)) < int(\n time.time()\n )", "def isExpired(self):\n return True/False", "def generate_nonce():\n return uuid4().hex", "async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)\n self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n\n responses = await asyncio.gather(*(to_asyncio_future(self.fetch(\"/tx\", method=\"POST\", body={\"tx\": tx})) for tx in txs))\n\n ok = 0\n bad = 0\n for resp in responses:\n if resp.code == 200:\n ok += 1\n else:\n bad += 1\n self.assertEqual(ok, 1)\n self.assertEqual(bad, no_tests - 1)", "def has_expired(self, now):\n if now < self._expires:\n return False\n\n return self._enclave_wait_timer.has_expired()", "def is_expired(self):\n\n return time.time() * 1000 - self._refreshed_on > self._expire", "def used(self)->bool:\n return self._lic.params['timeFirstAccess'].valid", "def lock_timed_out(self, timestamp=None):\n\n timeout = getattr(settings, 'LOGINLOCK_LOCK_TIMEOUT', LOCK_TIMEOUT)\n if not timestamp:\n timestamp = datetime.now()\n return timestamp > self.last_attempt_at + timeout", "def generateNonce():\r\n hash = hashlib.sha1()\r\n hash.update(str(time.time()).encode('utf-8'))\r\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())", "def get_initial_nonce(self):\n\n #First we will initiate the nonce with the prng.\n bit_nonce = int_to_bitstr(self.prng, 16)\n\n \"\"\" Then we generate the second part by taking only \n the last 16 bits until we have 32 bits in total. \"\"\"\n for i in range(16):\n bit_nonce += self.prng_feedback(bit_nonce[i:i+16])\n\n \"\"\" The new state of the prng will be the last 16 bits\n of the nonce, because we discarded 16 bits during the\n feedback loop. The initial nonce has 32 bits now. \"\"\"\n bit_prng = bit_nonce[16:]\n\n self.prng = bitstr_to_int(bit_prng)\n self.nonce = bitstr_to_int(bit_nonce)\n\n return self.nonce", "def __update_token(self) -> bool:\r\n\r\n self.__sess.cookies.clear()\r\n\r\n r = self.__sess.get(f'{DOMAIN}/')\r\n m = re.search(r'var token = \\'(\\S{42,48})\\';', r.text)\r\n\r\n if not m:\r\n self.__log_msg(f'No token found!', is_err=True)\r\n return False\r\n\r\n old_token = self.__payload.get('token', None)\r\n self.__payload['token'] = m[1]\r\n\r\n # midnight today\r\n self.__token_expiration_date = datetime.now(self.__tz).replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(1)\r\n\r\n if old_token:\r\n self.__log_msg(f'TOKEN UPDATED: \"{old_token}\" -> \"{m[1]}\"')\r\n else:\r\n self.__log_msg(f'TOKEN SET: \"{m[1]}\"')\r\n return True", "def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()", "def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False", "def _check_timestamp(self, timestamp):\n if timestamp is None:\n return True\n if not isinstance(timestamp, float):\n return False\n\n # is it sufficiently in the future to be unreasonable?\n if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):\n return False\n else:\n return True", "def is_locked(self) -> bool:\n return self.words is None", "def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False", "def _should_save_cookiejar(self):\n max_staleness = 60 * 60 * 24 # 1 day\n if not self._last_cookiejar_save:\n return True\n return time() - self._last_cookiejar_save > max_staleness", "def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True", "def isExpired(self):\n return self.sess is not None and not self.sess.isValid()", "def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts", "def create_nonce():\n default_seed = 'ifh2847fhsn\"lqOEYd@#Djh(&'\n hash = sha.new(default_seed)\n hash.update(str(datetime.utcnow()))\n return hash.hexdigest()", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def is_expired(self) -> bool:\n return now() > self.expires", "def is_once(self):\n return self.subscription_list.mode == gnmi_pb2.SubscriptionList.ONCE", "def is_expired(self):\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False", "def heartbeat(self):\n if self.acquired:\n poked = self.client.put(self._lock_url,\n data={\"ttl\": self.ttl,\n \"prevExist\": \"true\"}, make_url=False)\n self._node = poked['node']\n errorcode = poked.get(\"errorCode\")\n if not errorcode:\n return True\n LOG.warning(\"Unable to heartbeat by updating key '%s' with \"\n \"extended expiry of %s seconds: %d, %s\", self.name,\n self.ttl, errorcode, poked.get(\"message\"))\n return False", "def expiry(self):\n return time() + self.ttl * (0.95 + 0.1 * random())", "def is_expired(snap):\n exp_epoch = int(snap.split(\"_\")[const.VLAB_SNAP_EXPIRES])\n current_time = int(time.time())\n return exp_epoch < current_time", "def valid(self):\r\n return self.resumable and self.sessionID", "def is_session_valid(self, logonTimestamp):\n time_diff = time.time() - logonTimestamp\n return (time_diff / 60) < self.session_time_limit", "def is_authentication_token(self, candidate):\n current_token = self.__current_authentication_token()\n # TODO: Add expiry checking\n if (current_token and\n self.__valid_token_format(current_token) and\n self.__valid_token_format(candidate) and\n constant_time_equals(current_token, candidate)):\n return True\n else:\n return False", "def _is_stale(self, request: SecretRequest,\n secret: Optional[Secret]) -> bool:\n return secret is None or \\\n (secret.is_expired() and self._can_freshen(request, secret))", "def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)", "def test_encrypt_nonce(self):\n key = b'0' * 32\n message = 'message'\n\n assert encrypt(message, key=key) != encrypt(message, key=key)", "def valid(self):\n return self.expiry > timezone.now()", "def timed_out(timestamp):\n if timestamp is None:\n return False\n return timestamp < time.time()", "def _has_expired(self):\n try:\n expires = datetime.fromtimestamp(\n os.stat(self.lockfile).st_mtime\n )\n except OSError as e:\n if e in self.NOT_EXIST_ERRORS:\n return False\n raise\n return datetime.now() > expires", "def isStale(self):\n return self.m_expirationDate < datetime.datetime.now(tz=pytz.utc)", "def is_expired(self):\n return utcnow() >= self.expires", "def is_fresh(self):\n return not self.used", "def is_valid(self):\n return self.is_signed and not self.is_expired", "def isValid( self ):\n\n assert self.issueDate\n now = int(time.time())\n\n if (now - self.issueDate) > const.SESSION_TICKET_LIFETIME:\n log.debug(\"Ticket is not valid anymore.\")\n return False\n\n return True", "def get_unique_mirotime(self):\r\n with self._nonce_lock:\r\n microtime = int(time.time() * 1E6)\r\n if microtime <= self._last_unique_microtime:\r\n microtime = self._last_unique_microtime + 1\r\n self._last_unique_microtime = microtime\r\n return microtime", "def checkAtFinalTime():\n global final_time\n if final_time <= current_second:\n return True\n return False", "def updated(self):\n return self.expires != self.orig_expires", "def expired(self) -> bool:\n if not self.use_wts:\n return False\n\n return datetime.now() > self.expire", "def test_expired_thread_token_is_valid(self):\n self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)\n assert not self.token.is_valid()", "def log_once(key):\r\n\r\n global _last_logged\r\n\r\n if _disabled:\r\n return False\r\n elif key not in _logged:\r\n _logged.add(key)\r\n _last_logged = time.time()\r\n return True\r\n elif _periodic_log and time.time() - _last_logged > 60.0:\r\n _logged.clear()\r\n _last_logged = time.time()\r\n return False\r\n else:\r\n return False", "def get_last_nonce(app, key, nonce):\n uk = ses.query(UserKey).filter(UserKey.key==key)\\\n .filter(UserKey.last_nonce<nonce * 1000).first()\n if not uk:\n return None\n lastnonce = copy.copy(uk.last_nonce)\n # TODO Update DB record in same query as above, if possible\n uk.last_nonce = nonce * 1000\n try:\n ses.commit()\n except Exception as e:\n current_app.logger.exception(e)\n ses.rollback()\n ses.flush()\n return lastnonce", "def verify_auth_token(shared_key, eppn, token, nonce, timestamp, generator=sha256):\n # check timestamp to make sure it is within 300 seconds from now\n logger.debug(\"Trying to authenticate user {!r} with auth token {!r}\".format(eppn, token))\n # check timestamp to make sure it is within -300..900 seconds from now\n now = int(time.time())\n ts = int(timestamp, 16)\n if (ts < now - 300) or (ts > now + 900):\n logger.debug(\"Auth token timestamp {!r} out of bounds ({!s} seconds from {!s})\".format(\n timestamp, ts - now, now))\n raise HTTPForbidden(_('Login token expired, please await confirmation e-mail to log in.'))\n # verify there is a long enough nonce\n if len(nonce) < 16:\n logger.debug(\"Auth token nonce {!r} too short\".format(nonce))\n raise HTTPForbidden(_('Login token invalid'))\n\n expected = generator(\"{0}|{1}|{2}|{3}\".format(\n shared_key, eppn, nonce, timestamp)).hexdigest()\n # constant time comparision of the hash, courtesy of\n # http://rdist.root.org/2009/05/28/timing-attack-in-google-keyczar-library/\n if len(expected) != len(token):\n logger.debug(\"Auth token bad length\")\n raise HTTPForbidden(_('Login token invalid'))\n result = 0\n for x, y in zip(expected, token):\n result |= ord(x) ^ ord(y)\n logger.debug(\"Auth token match result: {!r}\".format(result == 0))\n return result == 0", "def is_locked(self):\n return self._unit_got == False", "def has_expired(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def gen_nonce(self, length=32):\n if(length < 32):\n res = {\"message\": 'Invalid nonce length'}, 400\n else:\n nonce = secrets.token_hex(floor(length))\n nonces_file = \"client-generate-nonces.txt\"\n res = self.check_nonce(nonce, nonces_file, length)\n return res", "def has_been_n_seconds_since_last(self, identifier, seconds):\r\n current_time = time.time()\r\n if identifier not in self._last_time or \\\r\n (current_time - self._last_time[identifier] > seconds):\r\n self._last_time[identifier] = current_time\r\n return True\r\n return False", "def is_trashed(self):\n return self.has_label(TRASHED_LABEL)", "def is_token_for_current_time(token, known_plaintext, current_time):\n expected_seed_times = compute_seed_from_known_pt(known_plaintext, token)\n for expected_time in expected_seed_times:\n if expected_time == (current_time & 0xFFFF):\n return True\n return False", "def check_TTS_data(self):\n if os.path.exists(self.tts_output_data):\n ctime = os.stat(self.tts_output_data).st_ctime\n since = time.time() - ctime\n if since > self.token_expiration:\n self.log.debug(\"Token about to expire. Need refreshing\")\n TTS_data = self.get_TTS_data(True)\n else:\n return True\n else:\n self.exchanged_token = self.get_exchange_token(self.client_id, self.client_secret, self.audience, self.token_endpoint, self.iam_token)\n if isinstance(self.exchanged_token, int):\n self.log.error(\"get_exchange_token error\")\n return False \n else:\n TTS_data = self.get_TTS_data(self.exchanged_token)\n\n return TTS_data", "def is_expired(self):\n return int(time.time()) - self.time > self.interval", "def validate(self, encrypted_token: str) -> bool:\n payload, timestamp_ms, crc = self.unsleeve(encrypted_token)\n ts_bytes = timestamp_ms.to_bytes(8, 'big')\n\n computed_crc = zlib.crc32(payload + ts_bytes)\n\n if crc == computed_crc:\n return in_range(timestamp_ms, deadline=self.token_life_ms)\n\n return False", "def is_cache_valid(self):\n if os.path.isfile(self.cache_filename):\n mod_time = os.path.getmtime(self.cache_filename)\n current_time = time()\n if (mod_time + self.cache_max_age) > current_time:\n return True\n return False", "def is_locked(self):\n now = get_current_time()\n if self.end <= now:\n return True\n return False", "def isFresh(self, timestamp):\n if not os.path.exists(self.__resource):\n return False;\n return os.path.getmtime(self.__resource) < timestamp;", "def expired(self):\n return int(time.time()) > self.expires_at", "def get_nonce_for_account(self, address: str, block_identifier: Optional[str] = 'latest'):\n return self.w3.eth.getTransactionCount(address, block_identifier=block_identifier)" ]
[ "0.73136973", "0.6600226", "0.6195637", "0.61672926", "0.61513644", "0.6094848", "0.6070164", "0.6020712", "0.6004455", "0.5996191", "0.5996191", "0.59095937", "0.5881976", "0.5801852", "0.5756545", "0.569583", "0.5590977", "0.5563972", "0.55559796", "0.5550194", "0.5542444", "0.5532732", "0.5522533", "0.55122817", "0.5469757", "0.5463364", "0.5442366", "0.5439037", "0.54208225", "0.54062593", "0.53611284", "0.5360911", "0.5360293", "0.5345018", "0.531311", "0.5305834", "0.52879125", "0.52638304", "0.52507615", "0.523517", "0.5230409", "0.5206732", "0.52055985", "0.5204208", "0.52035093", "0.51923114", "0.5169431", "0.51692945", "0.51628554", "0.5160389", "0.5142778", "0.5138364", "0.51304644", "0.512347", "0.51223665", "0.5117854", "0.5117854", "0.51079595", "0.510597", "0.5104079", "0.50991297", "0.50944746", "0.50744945", "0.5073772", "0.50630176", "0.5061014", "0.5058795", "0.5056406", "0.5055285", "0.5052446", "0.5049838", "0.50488436", "0.5040789", "0.5032975", "0.50292856", "0.5027723", "0.50231814", "0.50214416", "0.5021334", "0.5011817", "0.5010879", "0.5007038", "0.5004385", "0.49987474", "0.4995362", "0.49659318", "0.495579", "0.49488464", "0.49350864", "0.49319983", "0.49164113", "0.4909117", "0.49065572", "0.48963615", "0.4893862", "0.4886934", "0.48788956", "0.48788252", "0.48783055", "0.48758587" ]
0.76029676
0
Remove expired nonces from the store. Discards any nonce from storage that is old enough that its timestamp would not pass L{useNonce}. This method is not called in the normal operation of the library. It provides a way for store admins to keep their storage from filling up with expired data.
def cleanupNonces(self): try: mist_nonces = MistNonce.objects() except me.DoesNotExist: mist_nonces = [] counter = 0 for n in mist_nonces: if n.is_old(): n.delete() counter += 1 return counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_expired(self):\n with self.__lock:\n is_changed = False\n for k in list(self._d.keys()):\n if self._d[k].is_expired():\n log.debug(\"removing expired item: {}\".format(self._d[k]))\n del self[k]\n is_changed = True\n\n if (is_changed is True) and (self.is_persistent):\n # save changed cache file\n self.save()", "def delete_expired(self):\n check_time = datetime.now()\n if self.can_expire and self.duration:\n exp_times = deepcopy(self.exp_times)\n for key in exp_times:\n if exp_times[key] < check_time:\n self.delete(key)", "def purgeExpiredRecords(self):\n if hasattr(self, \"_test_time\"):\n now = self._test_time\n else:\n now = time.time()\n\n for indexType in self._cache:\n for key, (cachedTime, _ignore_record) in self._cache[indexType].items():\n if now - self._expireSeconds > cachedTime:\n del self._cache[indexType][key]", "def _purge_expired_user_tokens():\n expired = models.Token.query.filter_by(\n _user_fk=current_user.id).filter(\n models.Token.expiration_date <= datetime.utcnow()\n ).all()\n if expired:\n for token in expired:\n db.session.delete(token)\n db.session.commit()", "def clear_expired(self):\n raise NotImplementedError", "def purge(self):\n if not self.index:\n return\n now = time()\n \n while self.expiry[0].orig_expires <= now or len(self.index) > MAX_ASSOCS:\n self.remove_one()\n if not self.expiry:\n if not self.index:\n return\n self.rotate_lists()\n return", "def prune_database():\n now = datetime.now()\n expired = BlacklistedToken.query.filter(BlacklistedToken.expires < now).all()\n for token in expired:\n db.session.delete(token)\n db.session.commit()\n current_app.running_context.cache.set(\"number_of_operations\", 0)", "def cache_clean(self):\n\t\tnow = time.time()\n\t\tkeys_for_removal = collections.deque()\n\t\tfor key, (_, expiration) in self.__cache.items():\n\t\t\tif expiration < now:\n\t\t\t\tkeys_for_removal.append(key)\n\t\tfor key in keys_for_removal:\n\t\t\tdel self.__cache[key]", "def useNonce(self, nonce):\r\n query = datastore.Query('Nonce')\r\n query['nonce ='] = nonce\r\n query['created >='] = (datetime.datetime.now() -\r\n datetime.timedelta(hours=6))\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())\r\n return True\r\n else:\r\n return False", "def purgeExpiredRequests( self ):\n cmd = \"DELETE FROM `ProxyDB_Requests` WHERE ExpirationTime < UTC_TIMESTAMP()\"\n return self._update( cmd )", "def _purge_expired_items():\n now = time.time()\n keys = data_table.keys()\n\n for key in keys:\n num_unique_vals = len(data_table[key])\n\n # We iterate through in reverse, because otherwise deleting an \n # entry will cause frivolous edge cases.\n iteration_scheme = range(num_unique_vals)\n iteration_scheme.reverse()\n\n # value format: (value variable, expiration time)\n for value in iteration_scheme:\n expiration_time = data_table[key][value][1]\n temp_value = data_table[key][value][0] # For logging purposese only.\n if now > expiration_time:\n # The entry is expired.\n del data_table[key][value]\n if len(data_table[key]) == 0:\n del data_table[key]\n if (verbose):\n logstring = str(\"Entry purged: \" + str(key) + \": \" + str(temp_value) + \"\\n\")\n _log_with_timestamp(logstring)\n\n return", "def remove_expired(self):\n now = time.time()\n return [self.remove_if_expired(key, now) for key in self._request_sessions.keys()[:]].count(True)", "def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')", "def remove_expired_files():\n from models import FlowFile\n FlowFile.objects.filter(\n state__in=[FlowFile.STATE_UPLOADING, FlowFile.STATE_UPLOAD_ERROR],\n updated__lte=datetime.datetime.date() - datetime.timedelta(days=FLOWJS_EXPIRATION_DAYS)\n ).delete()", "def delete_expired(cls):\n now = datetime.now(timezone.utc)\n\n sql = \"\"\"DELETE FROM qiita.{0} WHERE exp<%s\"\"\".format(cls._table)\n qdb.sql_connection.perform_as_transaction(sql, [now])", "def _clean(self):\n limit = datetime.now() - timedelta(seconds=self._timeout)\n \n for uid in [uid for uid, timestamp in self._reservedUID.iteritems()\n if timestamp < limit]:\n del self._reservedUID[uid]", "def flush_expired_tokens(self):\n raise exception.NotImplemented() # pragma: no cover", "def purgeExpiredProxies( self ):\n cmd = \"DELETE FROM `ProxyDB_Proxies` WHERE ExpirationTime < UTC_TIMESTAMP() and PersistentFlag = 'False'\"\n return self._update( cmd )", "def gc_expired_tokens(expiration_window):\n (\n AppSpecificAuthToken.delete()\n .where(AppSpecificAuthToken.expiration < (datetime.now() - expiration_window))\n .execute()\n )", "def do_expire(self):\n # Deep copy to avoid RuntimeError: dictionary changed size during iteration\n _timeouts = deepcopy(self.timeouts)\n for key, value in _timeouts.items():\n if value - self.clock.now() < timedelta(0):\n del self.timeouts[key]\n # removing the expired key\n if key in self.redis:\n self.redis.pop(key, None)", "def get_expired_nscache():\n now = int(time())\n keys_to_del = []\n for key, odict in nscache.iteritems():\n for dn, ce in odict.iteritems():\n if ce._expiration - now <= 0:\n keys_to_del.append((key, dn))\n return (keys_to_del, nscache)", "def clear_expired_exceptions():\n print(\"Clearing out exceptions that have an expired TTL...\")\n clear_old_exceptions()\n print(\"Completed clearing out exceptions that have an expired TTL.\")", "def clean_local_cache(self):\n to_expire = []\n now = int(time())\n\n try:\n for k, (_, _, grace) in six.iteritems(self._local_cache):\n if now > grace:\n to_expire.append(k)\n except RuntimeError:\n # It's possible for the dictionary to be mutated in another thread\n # while iterating, but this case is rare, so instead of making a\n # copy and iterating that, it's more efficient to just let it fail\n # gracefully. It'll just get re-run later.\n return\n\n for k in to_expire:\n try:\n del self._local_cache[k]\n except KeyError:\n # This could only exist in a race condition\n # where another thread has already deleted this key,\n # but we'll guard ourselves against it Justin Case.\n pass", "async def delete_expired_responses(self):\n logger.info(f'Deleting all responses more than {self.expire_after} hours old')\n keys_to_delete = set()\n\n for key in await self.responses.keys():\n response = await self.get_response(key)\n if response and response.is_expired:\n keys_to_delete.add(key)\n\n logger.info(f'Deleting {len(keys_to_delete)} expired cache entries')\n for key in keys_to_delete:\n await self.delete(key)", "def _expire(self):\n with self._lock:\n self._items.popleft()", "def _expire(self):\n with self._lock:\n self._items.popleft()", "def clearExpired(self):\n self.sleep_approx(1)\n playersOnPage = self.driver.find_elements_by_tag_name(\"li.listFUTItem\")\n\n num_players_expired = 0\n for player in playersOnPage:\n bidStatus = player.get_attribute(\"class\")\n bidStatus = str(bidStatus)\n\n if \"expired\" in bidStatus:\n num_players_expired += 1\n\n if num_players_expired > 0:\n clearExpired = self.driver.find_element(\n By.XPATH, \"/html/body/main/section/section/div[2]/div/div/div/section[4]/header/button\")\n self.driver.execute_script(\n \"arguments[0].scrollIntoView(true);\", clearExpired)\n WebDriverWait(self.driver, 20).until(EC.element_to_be_clickable(\n (By.XPATH, \"/html/body/main/section/section/div[2]/div/div/div/section[4]/header/button\"))).click()\n self.sleep_approx(1)\n log_event(self.queue, \"Cleared expired\")\n self.sleep_approx(1)", "def clear_obsolete_trash():\n minute_ago = datetime.now() - timedelta(minutes=1)\n Picture.trash.filter(trashed_time__lt=minute_ago).delete()", "def purge_cache(self):\n\n self.local_store.purge_cache()", "def remove_expired_records(self, database): #pylint: disable-msg=R0201\n #special case to ignore eat files that are uploaded every day\n self.remove_eat_upload_records(database)\n \n expiry_time = 20 # 20 in seconds\n aborted_expiry_time = 43200 # 12 hours in seconds\n now = datetime.datetime.utcnow()\n \n for rec in database:\n #finished records\n if rec.get('finished_time_insert', None) and (now - rec['finished_time_insert']) > datetime.timedelta(seconds = expiry_time) :\n #archive finished record\n self.archiver.archive(rec)\n database.delete(rec)\n #aborted for more than 12 hours records \n elif rec.get('aborted', None) and ( now - rec.get('last_update', now) ) > datetime.timedelta(seconds = aborted_expiry_time) :\n #archive aborted record that was never finished\n self.archiver.archive(rec)\n database.delete(rec)", "def expire_deleted_fixtures(self):\n logger.info(\"Purging instances deleted %s days ago\", common.DELETED_INSTANCE_GC_DAYS)\n n_days_ago_in_usec = int((time.time() * 1e6) - (common.DELETED_INSTANCE_GC_DAYS * common.NANOSECONDS_IN_A_DAY))\n expired = self.instances.find({\n 'mtime': {'$lt': n_days_ago_in_usec},\n 'status': InstanceStatus.DELETED\n })\n count = 0\n for fix_doc in expired:\n logger.info(\"Deleting expired fixture %s (id: %s)\", fix_doc['name'], fix_doc['_id'])\n self.axdb_client.delete_fixture_instance(fix_doc['_id'])\n self.instances.delete_one({'_id': fix_doc['_id']})\n count += 1\n logger.info(\"Expired %s instances\", count)", "def expired(self):\n try:\n cursor = self.connection.execute('DELETE FROM session WHERE expires < NOW()')\n return cursor.rowcount\n except database.Error, e:\n self.connection.rollback()\n else:\n self.connection.commit()\n finally:\n cursor.close()", "def delete_expired_users(self):\n for profile in self.all():\n if profile.activation_key_expired():\n user = profile.user\n if not user.is_active:\n user.delete()", "def delete_expired_transactions():\n call_command('delete_expired_transactions')", "def delete_expired_users(self):\r\n for profile in self.all():\r\n if profile.activation_key_expired():\r\n user = profile.user\r\n if not user.is_active:\r\n user.delete()", "def clean_up (self):\n\n self._exec (\n 'delete from table_name where expires < %(now)s',\n now = self.now ()\n )", "def _do_expire(self):\n t = time.time()\n\n # Expire probes\n for ip, expire_at in self.outstanding_probes.items():\n if t > expire_at:\n self.outstanding_probes.pop(ip, None)\n if ip in self.live_servers:\n self.log.warn(\"Server %s down\", ip)\n del self.live_servers[ip]\n\n # Expire flow\n memory = self.memory.copy()\n self.memory.clear()\n for key, val in memory.items():\n ip = key[0]\n if ip in self.live_servers and val.is_expired:\n # Decrease total connection for that server\n self.total_connection[ip] -= 1\n if not val.is_expired:\n self.memory[key] = val", "def cache_expiration(self):\n\n\t\t# Iterate through servers\n\t\tfor serv in self.servers:\n\t\t\tserv.cache.hash_table.clear() # Erase the cache\n\t\t\tserv.cache.cur_size = 0 # Resets the number of items in the cache to 0", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def clear_expired_local_nodes(db, c):\n # typically these are left over from running the local correlation command and killing it before\n # it has a chance to clean itself up\n execute_with_retry(db, c, \"DELETE FROM nodes WHERE is_local = 1 AND TIMESTAMPDIFF(HOUR, last_update, NOW()) >= 1\", \n commit=True)\n\n if c.rowcount:\n logging.warning(\"removed {} expired local nodes\".format(c.rowcount))", "def clean(self):\n with self._lock:\n self.__cache.expire()", "def prune_database():\n # Get current time\n now = datetime.now()\n tokens = BlacklistToken.query.all()\n for token in tokens:\n if token.blacklisted_on < now:\n # Get the difference between now and time when token was blacklisted\n diff = now - token.blacklisted_on\n # if difference in days is more than 7 (> 1 week old tokens), delete\n if diff.days > 7:\n token.delete()", "def delete_expired_users(self):\n for profile in self.all():\n if profile.activation_key_expired():\n user = profile.user\n if not user.is_active:\n user.delete() # Removing the ``User`` will remove the ``RegistrationProfile``, too.", "def delete_old_tickets(**kwargs):\n sender = kwargs.get('sender', None)\n now = datetime.now()\n expire = datetime(now.year, now.month, now.day - 2)\n sender.objects.filter(created__lt=expire).delete()", "def delete_old():\n objs = (Snapshot\n .objects\n .filter(timestamp__lte=(datetime.now() - timedelta(days=35)))\n )\n objs.delete()", "def clean_expired_task():\n day_ago = datetime.datetime.now(pytz.timezone(\"UTC\")) - datetime.timedelta(days=ASYNC_EXPORT_FILE_EXPIRED_DAYS)\n # 获取过期的内网下载文件\n expired_task_list = AsyncTask.objects.filter(created_at__lt=day_ago, is_clean=False)\n # nfs文件需要进行定期清理操作\n storage_type = FeatureToggleObject.toggle(FEATURE_ASYNC_EXPORT_COMMON).feature_config.get(\n FEATURE_ASYNC_EXPORT_STORAGE_TYPE\n )\n\n if storage_type or storage_type == RemoteStorageType.NFS.value:\n # 删除NFS文件\n for expired_task in expired_task_list:\n target_file_dir = os.path.join(settings.EXTRACT_SAAS_STORE_DIR, expired_task.file_name)\n if os.path.isfile(target_file_dir):\n os.remove(os.path.abspath(target_file_dir))\n expired_task.is_clean = True\n expired_task.save()", "def clear(self, lifetime):\n\n # find all entries with the specified lifetime\n to_remove = [k for (k, v) in self.store.items() if v[0] == lifetime]\n # remove all these entries from the list\n for t in to_remove:\n del self.store[t]", "def clean_out_addonpremium(days=DAYS_OLD):\n old = datetime.now() - timedelta(days=days)\n objs = AddonPremium.objects.filter(addon__premium_type=amo.ADDON_FREE,\n created__lt=old)\n log.info('Deleting %s old addonpremiums.' % objs.count())\n for obj in objs:\n log.info('Delete addonpremium %s which was created on %s' %\n (obj.addon_id, obj.created))\n obj.delete()", "def purgeLogs( self ):\n cmd = \"DELETE FROM `ProxyDB_Log` WHERE TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > 15552000\"\n return self._update( cmd )", "def expire(event, context):\n # scan the database for expired files\n expiry_at = datetime.utcnow() - runtime_context.NONSTORED_TIMEOUT\n files = FileModel.list_expired(expiry_at)\n # remove all files and all items one-by-one\n for file in files:\n file_id = file['id']['S']\n FileModel.update({\n 'id': file_id,\n 'deleted_at': datetime.utcnow()\n })\n LOGGER.debug('Files item updated (expired). service=ddb method=update_item id={}'.format(file_id))\n S3_CLIENT.delete_object(\n Bucket=runtime_context.BUCKET_NAME,\n Key=file_id\n )\n LOGGER.debug('S3 object deleted. service=s3 method=delete_object id={}'.format(file_id))", "def revoke_access_token(cls, jti: str) -> None:\n redis = cls._conn_redis(cls)\n expired_time = int(timedelta(minutes=cls._ACCESS_TOKEN_EXPIRES).total_seconds())\n redis.setex(jti,expired_time,'true')", "def _prune_cache(self):\n default_expiry = datetime.datetime.utcnow() - datetime.timedelta(minutes=self.cache_resources_for)\n for resource_id, resource in self.local_resource_status.items():\n if 'cache_until' in resource:\n if datetime.datetime.utcnow() > resource['cache_until']:\n self._delete_cache(resource_id)\n elif resource['last_accessed'] < default_expiry:\n self._delete_cache(resource_id)", "def _remove_expired_task(self):\n with self.lock:\n curr_time = datetime.utcnow()\n tasks_list = self.tasks_to_remove\n for task_id, t in tasks_list:\n time_elapsed = curr_time - t\n if (time_elapsed.total_seconds() < TASK_EXPIRE_DURATION_SEC):\n break\n self.tasks_to_remove.remove((task_id, t))\n self.task_map.pop(task_id)", "def __remove_expired_freezers(self, event: Event):\n if len(self.__freeze_map) == 0:\n # freeze option disabled\n return False\n self.__active_freezers = [freezer for freezer in self.__active_freezers\n if event.max_timestamp - freezer.min_timestamp <= self._pattern.window]", "def test_access_token_all_expired(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)\n exp.delete()", "def get_expired_cnamecache():\n now = int(time())\n keys_to_del = []\n for key, ce in cnamecache.iteritems():\n if ce._expiration - now <= 0:\n keys_to_del.append(key)\n return (keys_to_del, cnamecache)", "def _purge_old(self):\n now = dt_util.utcnow()\n\n _LOGGER.debug(\n \"%s: purging records older then %s(%s)\",\n self.entity_id,\n dt_util.as_local(now - self._samples_max_age),\n self._samples_max_age,\n )\n\n while self.ages and (now - self.ages[0]) > self._samples_max_age:\n _LOGGER.debug(\n \"%s: purging record with datetime %s(%s)\",\n self.entity_id,\n dt_util.as_local(self.ages[0]),\n (now - self.ages[0]),\n )\n self.ages.popleft()\n self.states.popleft()", "def _drop_old_data(self, current_time):\n for k in self._buf.keys():\n timelimit = current_time - self._lifetime\n if (k < timelimit):\n del self._buf[k]", "def clear_expired_locks(db, c):\n execute_with_retry(db, c, \"DELETE FROM locks WHERE TIMESTAMPDIFF(SECOND, lock_time, NOW()) >= %s\",\n (saq.LOCK_TIMEOUT_SECONDS,))\n db.commit()\n if c.rowcount:\n logging.debug(\"removed {} expired locks\".format(c.rowcount))", "def remove_if_expired(self, key, now):\n with self.GLOB_LOCK:\n inst = self._request_sessions.get(key, None)\n if inst is not None and (inst.last_access + self.TIMEOUT < now):\n self._request_sessions.pop(key, None)\n return True\n\n return False", "def __cleanup(self, ttl_in_sec):\n ttl_in_ms = ttl_in_sec * 1000\n while True:\n logging.debug(\"cleanup action...\")\n current_ts = self.__current_timestamp_in_ms()\n self.lock.acquire()\n for key, value in self.orderedDict.items():\n if value[1] > current_ts - ttl_in_ms:\n break\n else:\n self.orderedDict.pop(key, None)\n self.lock.release()\n time.sleep(ttl_in_sec)", "def erase(self):\n for b in self.posted_on:\n b.erase(self)\n self._catalog.erase(self)\n if self._logbook is not None:\n self._logbook.close()\n self._logbook = None\n if self.state != states.UNCLAIMED:\n self._claimer.unclaim(self, self.owner)", "def clearStore(self):\n os.remove(self.uid+\".pcl\")\n self.items = []", "def clean_cache(self):\n timer = Timer()\n entries = []\n for file_in_cache in self.find_archives():\n cache_metadata = self.read_metadata(file_in_cache)\n last_accessed = cache_metadata.get('last-accessed', 0)\n entries.append((last_accessed, file_in_cache))\n to_remove = sorted(entries)[:-self.cache_limit]\n if to_remove:\n for last_used, file_in_cache in to_remove:\n logger.debug(\"Removing archive from cache: %s\", file_in_cache)\n metadata_file = self.get_metadata_file(file_in_cache)\n self.context.execute('rm', '-f', file_in_cache, metadata_file)\n logger.verbose(\"Took %s to remove %s from cache.\",\n timer, pluralize(len(to_remove), \"archive\"))\n else:\n logger.verbose(\"Wasted %s checking whether cache needs to be cleaned (it doesn't).\", timer)", "def revoke_refresh_token(cls, jti: str) -> None:\n redis = cls._conn_redis(cls)\n expired_time = int(timedelta(days=cls._REFRESH_TOKEN_EXPIRES).total_seconds())\n redis.setex(jti,expired_time,'true')", "def delete_expired_users(self):\n days_valid = settings.ACCOUNT_ACTIVATION_DAYS\n expired = datetime.now() - timedelta(days=days_valid)\n prof_ids = self.filter(user__date_joined__lt=expired)\n prof_ids = prof_ids.values_list('id', flat=True)\n for chunk in chunked(prof_ids, 1000):\n _delete_registration_profiles_chunk.apply_async(args=[chunk])", "def _periodically_cleanup_candidates(self):\n while True:\n yield 5 * 60.0\n\n now = time()\n for key, candidate in [(key, candidate) for key, candidate in self._candidates.iteritems() if candidate.is_all_obsolete(now)]:\n if __debug__: dprint(\"removing obsolete candidate \", candidate)\n del self._candidates[key]\n self.wan_address_unvote(candidate)", "def expire(self):\n logging.debug(\"Expiring token as wanted...\")\n self.expiration = datetime.now() - timedelta(seconds=(10))", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def expire(self):\n Slate.expire(self)\n\n one_year = 60 * 60 * 24 * 365\n e = time.time() - one_year\n cherrypy.serving.response.cookie[self.session_cookie] = 'expired'\n cherrypy.serving.response.cookie[self.session_cookie]['expires'] = httputil.HTTPDate(e)", "def expire_token(self):\n self.user_in_db = User.users_db.get(self.email)\n\n self.user_in_db.update({'token': ''})\n\n User.users_db.put(self.user_in_db)\n\n return {'success': True}", "def invalidate(self):\n\n dogpile_region, cache_key = self._get_cache_plus_key()\n dogpile_region.delete(cache_key)", "async def test_deleted_cached_time(self):\n await self.cog._unsilence(self.text_channel)\n self.cog.unsilence_timestamps.delete.assert_awaited_once_with(self.text_channel.id)", "def evictOldkeys(self, cutOff):\n for key in self.values.keys():\n time = self.values[key][3]\n if time < cutOff:\n del self.values[key]", "def ageout(self) -> int:\n now = time.monotonic()\n delete = [\n k\n for k, (_, insertion_time) in self._cache.items()\n if (now - insertion_time) > self._lifetime\n ]\n for k in delete:\n del self[k]\n\n return len(delete)", "def delete_expired_aws_users(self):\n user_list = self.iam_client.list_users(MaxItems=100)\n arn_list = []\n\n for user in user_list['Users']:\n user_name = user['UserName']\n user_tags = self.iam_client.list_user_tags(UserName=user_name)\n if {'Key': 'kindredgroup.com/temp-access-resource', 'Value': 'true'} in user_tags['Tags']:\n user_arn = user['Arn']\n\n for tag in user_tags['Tags']:\n tag_values = list(tag.values())\n if tag_values[0] == 'kindredgroup.com/expireTimestamp':\n expire_timestamp = int(tag_values[1])\n\n if not expire_timestamp:\n print(\"User: name={} does not have {} annotation!\".format(user_name, self.expire_annotation))\n elif expire_timestamp < self.now:\n print(\"User: name={} is expired! Removing it ... \".format(user_name))\n accesskeyids = self.iam_client.list_access_keys(UserName=user_name)\n\n for accesskeyid in accesskeyids['AccessKeyMetadata']:\n self.delete_access_key(user_name, accesskeyid['AccessKeyId'])\n\n response = self.iam_client.delete_user(UserName=user_name)\n if response['ResponseMetadata']['HTTPStatusCode'] == self.success:\n print(\"User: name={} is removed!\".format(user_name))\n else:\n pprint(response)\n\n arn_list.append(user_arn)\n\n return arn_list", "def __del__(self):\n self.token_revoke()", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def purge(self):\n self.remaining = 0", "def _purge():\r\n _cache.clear()", "def delete_old(self):\n retention_time = Host().get_retention_time()\n snapshots = self._list_snapshots()\n\n for snap in snapshots['Snapshots']:\n snapshot_id = snap['SnapshotId']\n start_time = snap['StartTime']\n if start_time <= retention_time:\n self.resource.delete_snapshot(\n SnapshotId=snapshot_id,\n DryRun=DRY_RUN\n )\n self.deleted_ids.append(snapshot_id)\n\n notify = Notifier()\n notify.send(self.created_id, self.deleted_ids)", "def remove_obsolete_users(self, date_limit):\n for user in User.objects.filter(last_login__lt=date_limit):\n if not ServiceProvider.objects.filter(admins=user):\n self.output(\"Removing user: \" + user.username)\n if not self.list_only:\n user.delete()", "def expired_alarm():\n temp_events = Events_list.copy()\n for x in range(len(temp_events)):#iterates for the whole events list\n if time.time() >= convert_to_epoch(temp_events[x][1]):#if the time set is less than current time it must be expired\n event_remove(temp_events[x][0])", "def remove_inactive_consumers():\n\n THRESHOLD_MINUTES = 5\n\n schema = get_schema()\n for subscription in schema.subscription_type.fields.keys():\n to_remove = []\n for consumer in frappe.cache().hkeys(get_subscription_redis_key(subscription)):\n subscription_info = frappe.cache().hget(\n get_subscription_redis_key(subscription), consumer)\n\n should_remove = True\n if subscription_info.last_ping:\n last_ping = get_datetime(subscription_info.last_ping)\n if last_ping + timedelta(minutes=THRESHOLD_MINUTES) >= now_datetime():\n should_remove = False\n\n if should_remove:\n to_remove.append(consumer)\n\n if len(to_remove):\n frappe.cache().hdel(\n get_subscription_redis_key(subscription), *to_remove)", "def _rm_edx4edx(self):\r\n def_ms = modulestore()\r\n course_path = '{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR))\r\n try:\r\n # using XML store\r\n course = def_ms.courses.get(course_path, None)\r\n except AttributeError:\r\n # Using mongo store\r\n course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))\r\n\r\n # Delete git loaded course\r\n response = self.client.post(\r\n reverse('sysadmin_courses'),\r\n {\r\n 'course_id': course.id.to_deprecated_string(),\r\n 'action': 'del_course',\r\n }\r\n )\r\n self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path))\r\n\r\n return response", "def distribution_removed(self, uid):\n with self.__lock:\n for store in self._stores:\n store.remove(uid)", "def clean_sessions():\n while not QUIT:\n # Find number of known tokens\n size = conn.zcard('recent:')\n\n if size <= LIMIT:\n time.sleep(1)\n continue\n\n # Collect tokens to remove\n end_index = min(size - LIMIT, 100)\n sessions = conn.zrange('recent:', 0, end_index - 1)\n\n # Collect key names for tokens\n session_keys = []\n for sess in sessions:\n session_keys.append('viewed:' + token)\n session_keys.append('cart:' + token)\n\n # Delete view, login, and recent keys\n conn.delete(*session_keys)\n conn.hdel('login:', *tokens)\n conn.zrem('recent:', *tokens)", "def __del__(self):\n self.set_cas(0)", "def garbage_collect(self, timestamp):\n stale_keys = []\n for key, window in list(self.occurrences.items()):\n if timestamp - lookup_es_key(window.data[-1][0], self.ts_field) > self.rules['timeframe']:\n stale_keys.append(key)\n list(map(self.occurrences.pop, stale_keys))", "def _clean_outdated(self):\n now = _now()\n outdated = []\n for request_no, request_info in self._current_requests.items():\n if now - request_info.start_time > self._force_clean_after:\n outdated.append(request_no)\n if outdated:\n logging.error(\"There are {} requests which were started but haven't \"\n \"been finished in more than {}s.\"\n .format(len(outdated), self._force_clean_after))\n for request_no in outdated:\n del self._current_requests[request_no]\n self._last_autoclean_time = now", "def clean(self):\n cutoff = int(time.time()) - int(self.__ttl)\n logging.info(\"Cleaning cache with cutoff time %d\" % cutoff)\n\n start_key = None\n while True:\n # Split in small transactions to avoid blocking other processes.\n with self.__env.begin(self.__metric_to_metadata_db, write=True) as txn:\n with txn.cursor() as cursor:\n if start_key is not None:\n if not cursor.set_range(self._encode(start_key)):\n break\n start_key = self._clean_some(txn, cursor, cutoff)\n if start_key is None:\n break", "def expire_stale_session_requests(self) -> None:\n last_check_before = timezone.now() - timedelta(\n seconds=SESSION_QUEUE_CHECK_TIMEOUT\n )\n SessionRequest.objects.filter(\n project=self.project, last_check__lte=last_check_before\n ).delete()\n\n creation_before = timezone.now() - timedelta(\n seconds=SESSION_QUEUE_CREATION_TIMEOUT\n )\n SessionRequest.objects.filter(\n project=self.project, created__lte=creation_before, last_check__isnull=True\n ).delete()", "def test_remove_expired(self):\n req1 = FakeRequest(1, True)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, True)\n req5 = FakeRequest(5, False)\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.remove_expired()\n\n self.assertTrue(\n req2 in self.request_buffer.requests and\n req5 in self.request_buffer.requests\n )", "def _purge_stale_checkpoints(self):\n if len(self._checkpoint_files) > self.max_checkpoints:\n purge_files = self._checkpoint_files[: -self.max_checkpoints]\n self._checkpoint_files = self._checkpoint_files[-self.max_checkpoints:]\n for chk in purge_files:\n silent_try(chk.purge_values)", "def purgeSecrets(self, clientIP):\n\n now = time.time()\n min_purge_time = now\n\n nr_purged = 0\n\n # first get rid of expired keys\n results = self._secret_table.query(ip_address__eq=clientIP,\n not_after__lt=min_purge_time,\n consistent=False,\n index='IPNotAfter')\n \n for result in results:\n logging.info('purging secret: %s %d' % (result['ip'],result['not_before']))\n result.delete()\n nr_purged += 1\n \n # now the fun...\n result_list = []\n results = self._secret_table.query(ip_address__eq=clientIP,\n not_before__lt=min_purge_time,\n consistent=False)\n\n for result in results:\n result_list.append(result)\n\n\n # delete results if there are more than PURGE_RETAIN_NR_ACTIVE results,\n # we want the oldest not befores to be removed first\n if len(result_list) > PURGE_RETAIN_NR_ACTIVE_KEYS:\n for result in sorted(result_list, key=lambda result: result['not_before'])[:-PURGE_RETAIN_NR_ACTIVE_KEYS]:\n logging.info('purging secret: %s %d' % (result['ip'],result['not_before']))\n result.delete()\n nr_purged += 1\n\n\n return nr_purged", "def __removing_loop(self) -> None:\r\n\r\n # repeat until stop flag is set\r\n while not self.__stopper.wait(self.CLEANUP_EXPIRED_INTERVAL):\r\n now = int(datetime.now(self.__tz).timestamp())\r\n log.debug('Removing...')\r\n\r\n # iterate through database and remove expired encounters\r\n for enc_id, despawn_time in self.__pokes_db.copy().items():\r\n if despawn_time - now < 5:\r\n del self.__pokes_db[enc_id]", "def purgeSecrets(self, clientIP):\n\n now = time.time()\n min_purge_time = now\n\n nr_purged = 0\n\n # first get rid of expired keys\n results = self._secretdb.execute('delete from %s where ip_address=:ip_address and not_after <:not_after' % self._table_name,\n {'ip_address': clientIP,\n 'not_after': min_purge_time}).fetchall()\n\n for result in results:\n logging.info('purging secret: %s %d' % (result['ip'],result['not_before']))\n result.delete()\n nr_purged += 1\n \n # now the fun...\n result_list = []\n\n results = self._secretdb.execute('select ip_address,not_before,not_after from %s where ip_address=:ip_address and not_before < :not_before order by not_before' % self._table_name,\n {'ip_address': clientIP,\n 'not_before': min_purge_time}).fetchall()\n\n for result in results:\n result_list.append(result)\n \n\n # delete results if there are more than PURGE_RETAIN_NR_ACTIVE results,\n # we want the oldest not befores to be removed first\n if len(result_list) > PURGE_RETAIN_NR_ACTIVE_KEYS:\n for result in result_list:\n self._secretdb.execute('delete from %s where ip_address=:ip_address and not_before=:not_before and not_after = :not_after' % self._table_name,\n {'ip_address': clientIP,\n 'not_before': result[1],\n 'not_after': result[2]})\n nr_purged += 1\n self._secretdb.commit()\n\n\n return nr_purged", "def cleanPMUserCache(cache):\n\n removeUser = []\n now = int(time.time())\n\n for user, utime in cache.items():\n if now > utime:\n log.debug(\"removing author %s from recent list\", user)\n removeUser.append(user)\n\n for ku in removeUser:\n del cache[ku]", "def clean(self, now):\n def work():\n member = db.get(self.key())\n index = 0\n while index < len(member.tags):\n if member.stop_times[index] <= now:\n # We don't bother to update member_count here;\n # update_tagstats will eventually take care of it.\n member.remove_tag(member.tags[index])\n else:\n index += 1\n member.put()\n return member\n # Before starting a transaction, test if cleaning is needed.\n if self.stop_times and min(self.stop_times) <= now:\n return db.run_in_transaction(work)\n return self", "def clear_cache(self):\n cache.delete(\"site-%s-consumer-count\" % self.site.id)" ]
[ "0.6667037", "0.66389847", "0.66271514", "0.6594752", "0.65522724", "0.64164263", "0.640086", "0.62734115", "0.6258594", "0.6247243", "0.6166327", "0.6157293", "0.6084007", "0.6060935", "0.60464877", "0.5984581", "0.592853", "0.58712083", "0.5768196", "0.5718193", "0.56924313", "0.5687182", "0.56777984", "0.5669639", "0.5647465", "0.5647465", "0.5640548", "0.55891865", "0.55529207", "0.5550815", "0.5521541", "0.55011374", "0.5496176", "0.54649407", "0.54554963", "0.5452541", "0.54425275", "0.5442373", "0.54170513", "0.5406961", "0.54064703", "0.5393813", "0.5356777", "0.53220797", "0.5318676", "0.5309575", "0.53062236", "0.5304524", "0.5288991", "0.52756673", "0.5271779", "0.52604735", "0.525772", "0.5239033", "0.52214676", "0.52128184", "0.5210997", "0.52098614", "0.51668274", "0.51445156", "0.5140253", "0.5140188", "0.51156807", "0.51100117", "0.510519", "0.50995064", "0.50990343", "0.5084946", "0.50838137", "0.50813526", "0.50691175", "0.50575423", "0.50444645", "0.50371486", "0.50364196", "0.5036293", "0.5024651", "0.50234467", "0.5022326", "0.5021808", "0.50161034", "0.50060064", "0.5005424", "0.5003846", "0.4992475", "0.49849024", "0.49815887", "0.49705082", "0.4953683", "0.4953079", "0.4952479", "0.49507278", "0.49489743", "0.49431416", "0.49425578", "0.49409735", "0.49361455", "0.49361315", "0.4932053", "0.49161285" ]
0.5977415
16
Remove expired associations from the store. This method is not called in the normal operation of the library. It provides a way for store admins to keep their storage from filling up with expired data.
def cleanupAssociations(self): try: mist_associations = MistAssociation.objects() except me.DoesNotExist: mist_associations = [] counter = 0 for assoc in mist_associations: if assoc.is_expired(): assoc.delete() counter += 1 return counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_expired(self):\n with self.__lock:\n is_changed = False\n for k in list(self._d.keys()):\n if self._d[k].is_expired():\n log.debug(\"removing expired item: {}\".format(self._d[k]))\n del self[k]\n is_changed = True\n\n if (is_changed is True) and (self.is_persistent):\n # save changed cache file\n self.save()", "def clear_expired(self):\n raise NotImplementedError", "def purgeExpiredRecords(self):\n if hasattr(self, \"_test_time\"):\n now = self._test_time\n else:\n now = time.time()\n\n for indexType in self._cache:\n for key, (cachedTime, _ignore_record) in self._cache[indexType].items():\n if now - self._expireSeconds > cachedTime:\n del self._cache[indexType][key]", "def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')", "def delete_expired(self):\n check_time = datetime.now()\n if self.can_expire and self.duration:\n exp_times = deepcopy(self.exp_times)\n for key in exp_times:\n if exp_times[key] < check_time:\n self.delete(key)", "def purge(self):\n if not self.index:\n return\n now = time()\n \n while self.expiry[0].orig_expires <= now or len(self.index) > MAX_ASSOCS:\n self.remove_one()\n if not self.expiry:\n if not self.index:\n return\n self.rotate_lists()\n return", "def _purge_expired_user_tokens():\n expired = models.Token.query.filter_by(\n _user_fk=current_user.id).filter(\n models.Token.expiration_date <= datetime.utcnow()\n ).all()\n if expired:\n for token in expired:\n db.session.delete(token)\n db.session.commit()", "def delete_expired_users(self):\n for profile in self.all():\n if profile.activation_key_expired():\n user = profile.user\n if not user.is_active:\n user.delete() # Removing the ``User`` will remove the ``RegistrationProfile``, too.", "def delete_expired_users(self):\n for profile in self.all():\n if profile.activation_key_expired():\n user = profile.user\n if not user.is_active:\n user.delete()", "def delete_expired_users(self):\r\n for profile in self.all():\r\n if profile.activation_key_expired():\r\n user = profile.user\r\n if not user.is_active:\r\n user.delete()", "def _expire(self):\n with self._lock:\n self._items.popleft()", "def _expire(self):\n with self._lock:\n self._items.popleft()", "def remove_expired_files():\n from models import FlowFile\n FlowFile.objects.filter(\n state__in=[FlowFile.STATE_UPLOADING, FlowFile.STATE_UPLOAD_ERROR],\n updated__lte=datetime.datetime.date() - datetime.timedelta(days=FLOWJS_EXPIRATION_DAYS)\n ).delete()", "def _purge_old(self):\n now = dt_util.utcnow()\n\n _LOGGER.debug(\n \"%s: purging records older then %s(%s)\",\n self.entity_id,\n dt_util.as_local(now - self._samples_max_age),\n self._samples_max_age,\n )\n\n while self.ages and (now - self.ages[0]) > self._samples_max_age:\n _LOGGER.debug(\n \"%s: purging record with datetime %s(%s)\",\n self.entity_id,\n dt_util.as_local(self.ages[0]),\n (now - self.ages[0]),\n )\n self.ages.popleft()\n self.states.popleft()", "def cache_clean(self):\n\t\tnow = time.time()\n\t\tkeys_for_removal = collections.deque()\n\t\tfor key, (_, expiration) in self.__cache.items():\n\t\t\tif expiration < now:\n\t\t\t\tkeys_for_removal.append(key)\n\t\tfor key in keys_for_removal:\n\t\t\tdel self.__cache[key]", "def clean_up (self):\n\n self._exec (\n 'delete from table_name where expires < %(now)s',\n now = self.now ()\n )", "def purgeExpiredProxies( self ):\n cmd = \"DELETE FROM `ProxyDB_Proxies` WHERE ExpirationTime < UTC_TIMESTAMP() and PersistentFlag = 'False'\"\n return self._update( cmd )", "def remove_appointments():\n appointments = Appointment.objects.all()\n now = timezone.now()\n for appointment in appointments:\n if appointment.date < now:\n appointment.delete()", "def _purge_expired_items():\n now = time.time()\n keys = data_table.keys()\n\n for key in keys:\n num_unique_vals = len(data_table[key])\n\n # We iterate through in reverse, because otherwise deleting an \n # entry will cause frivolous edge cases.\n iteration_scheme = range(num_unique_vals)\n iteration_scheme.reverse()\n\n # value format: (value variable, expiration time)\n for value in iteration_scheme:\n expiration_time = data_table[key][value][1]\n temp_value = data_table[key][value][0] # For logging purposese only.\n if now > expiration_time:\n # The entry is expired.\n del data_table[key][value]\n if len(data_table[key]) == 0:\n del data_table[key]\n if (verbose):\n logstring = str(\"Entry purged: \" + str(key) + \": \" + str(temp_value) + \"\\n\")\n _log_with_timestamp(logstring)\n\n return", "def prune_database():\n now = datetime.now()\n expired = BlacklistedToken.query.filter(BlacklistedToken.expires < now).all()\n for token in expired:\n db.session.delete(token)\n db.session.commit()\n current_app.running_context.cache.set(\"number_of_operations\", 0)", "def purgeExpiredRequests( self ):\n cmd = \"DELETE FROM `ProxyDB_Requests` WHERE ExpirationTime < UTC_TIMESTAMP()\"\n return self._update( cmd )", "def delete_expired(cls):\n now = datetime.now(timezone.utc)\n\n sql = \"\"\"DELETE FROM qiita.{0} WHERE exp<%s\"\"\".format(cls._table)\n qdb.sql_connection.perform_as_transaction(sql, [now])", "def clear_obsolete_trash():\n minute_ago = datetime.now() - timedelta(minutes=1)\n Picture.trash.filter(trashed_time__lt=minute_ago).delete()", "def expire_deleted_fixtures(self):\n logger.info(\"Purging instances deleted %s days ago\", common.DELETED_INSTANCE_GC_DAYS)\n n_days_ago_in_usec = int((time.time() * 1e6) - (common.DELETED_INSTANCE_GC_DAYS * common.NANOSECONDS_IN_A_DAY))\n expired = self.instances.find({\n 'mtime': {'$lt': n_days_ago_in_usec},\n 'status': InstanceStatus.DELETED\n })\n count = 0\n for fix_doc in expired:\n logger.info(\"Deleting expired fixture %s (id: %s)\", fix_doc['name'], fix_doc['_id'])\n self.axdb_client.delete_fixture_instance(fix_doc['_id'])\n self.instances.delete_one({'_id': fix_doc['_id']})\n count += 1\n logger.info(\"Expired %s instances\", count)", "def remove_expired_records(self, database): #pylint: disable-msg=R0201\n #special case to ignore eat files that are uploaded every day\n self.remove_eat_upload_records(database)\n \n expiry_time = 20 # 20 in seconds\n aborted_expiry_time = 43200 # 12 hours in seconds\n now = datetime.datetime.utcnow()\n \n for rec in database:\n #finished records\n if rec.get('finished_time_insert', None) and (now - rec['finished_time_insert']) > datetime.timedelta(seconds = expiry_time) :\n #archive finished record\n self.archiver.archive(rec)\n database.delete(rec)\n #aborted for more than 12 hours records \n elif rec.get('aborted', None) and ( now - rec.get('last_update', now) ) > datetime.timedelta(seconds = aborted_expiry_time) :\n #archive aborted record that was never finished\n self.archiver.archive(rec)\n database.delete(rec)", "def remove_expired(self):\n now = time.time()\n return [self.remove_if_expired(key, now) for key in self._request_sessions.keys()[:]].count(True)", "def flush_expired_tokens(self):\n raise exception.NotImplemented() # pragma: no cover", "def handle_expired_profiles():\n expired_profiles = InstallationRecord.objects.filter(expires__lte=timezone.now(), active=True)\n for record in expired_profiles:\n device = record.device\n profile = record.profile\n device.installed.remove(profile)\n record.active = False\n record.save()", "def expire_all(self) -> None:\n for state in self.identity_map.all_states():\n state._expire(state.dict, self.identity_map._modified)", "def clear_expired_exceptions():\n print(\"Clearing out exceptions that have an expired TTL...\")\n clear_old_exceptions()\n print(\"Completed clearing out exceptions that have an expired TTL.\")", "def invalidate(self):\n\n dogpile_region, cache_key = self._get_cache_plus_key()\n dogpile_region.delete(cache_key)", "def purge_cache(self):\n\n self.local_store.purge_cache()", "def do_expire(self):\n # Deep copy to avoid RuntimeError: dictionary changed size during iteration\n _timeouts = deepcopy(self.timeouts)\n for key, value in _timeouts.items():\n if value - self.clock.now() < timedelta(0):\n del self.timeouts[key]\n # removing the expired key\n if key in self.redis:\n self.redis.pop(key, None)", "def _do_expire(self):\n t = time.time()\n\n # Expire probes\n for ip, expire_at in self.outstanding_probes.items():\n if t > expire_at:\n self.outstanding_probes.pop(ip, None)\n if ip in self.live_servers:\n self.log.warn(\"Server %s down\", ip)\n del self.live_servers[ip]\n\n # Expire flow\n memory = self.memory.copy()\n self.memory.clear()\n for key, val in memory.items():\n ip = key[0]\n if ip in self.live_servers and val.is_expired:\n # Decrease total connection for that server\n self.total_connection[ip] -= 1\n if not val.is_expired:\n self.memory[key] = val", "def clear(self, lifetime):\n\n # find all entries with the specified lifetime\n to_remove = [k for (k, v) in self.store.items() if v[0] == lifetime]\n # remove all these entries from the list\n for t in to_remove:\n del self.store[t]", "def erase(self):\n for b in self.posted_on:\n b.erase(self)\n self._catalog.erase(self)\n if self._logbook is not None:\n self._logbook.close()\n self._logbook = None\n if self.state != states.UNCLAIMED:\n self._claimer.unclaim(self, self.owner)", "def invalidate_cache(self):\n #self.objects.objects = []\n return True", "def delete_expired_users(self):\n days_valid = settings.ACCOUNT_ACTIVATION_DAYS\n expired = datetime.now() - timedelta(days=days_valid)\n prof_ids = self.filter(user__date_joined__lt=expired)\n prof_ids = prof_ids.values_list('id', flat=True)\n for chunk in chunked(prof_ids, 1000):\n _delete_registration_profiles_chunk.apply_async(args=[chunk])", "def clearStore(self):\n os.remove(self.uid+\".pcl\")\n self.items = []", "def delete_old():\n objs = (Snapshot\n .objects\n .filter(timestamp__lte=(datetime.now() - timedelta(days=35)))\n )\n objs.delete()", "def delete(self):\n for obj in self:\n _unset_related_objects_relations(obj)\n\n self.update(deleted=now())", "def PurgeAll(self):\n\t\tself.acad.ActiveDocument.PurgeAll()", "def clean(self):\n with self._lock:\n self.__cache.expire()", "def clear_cache(self):\n self.mongo_database.cache.delete_many({})", "def clean_db():\n yest = datetime.utcnow() - timedelta(days=1)\n try:\n Load.query.filter(Load.time < yest).delete()\n db.session.commit()\n except:\n db.session.rollback()", "async def delete_expired_responses(self):\n logger.info(f'Deleting all responses more than {self.expire_after} hours old')\n keys_to_delete = set()\n\n for key in await self.responses.keys():\n response = await self.get_response(key)\n if response and response.is_expired:\n keys_to_delete.add(key)\n\n logger.info(f'Deleting {len(keys_to_delete)} expired cache entries')\n for key in keys_to_delete:\n await self.delete(key)", "def cleanup(self):\n deletes = []\n for item in self._collect.find({'status': 'started'}, {'_id': True}):\n deletes.append(pymongo.DeleteOne(item))\n # Remove them\n if len(deletes):\n print(\"Delete\", self._collect.bulk_write(deletes).deleted_count)", "def purge_expired (aging_hash, interval=aging_hash_interval):\n\n expired = []\n for k, v in aging_hash.items():\n set_time = v[0]\n if (time.time() - set_time) > aging_hash_interval:\n expired.append(k)\n for ex_k in expired:\n del aging_hash[ex_k]", "def _clean(self):\n limit = datetime.now() - timedelta(seconds=self._timeout)\n \n for uid in [uid for uid, timestamp in self._reservedUID.iteritems()\n if timestamp < limit]:\n del self._reservedUID[uid]", "def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)", "def expired_alarm():\n temp_events = Events_list.copy()\n for x in range(len(temp_events)):#iterates for the whole events list\n if time.time() >= convert_to_epoch(temp_events[x][1]):#if the time set is less than current time it must be expired\n event_remove(temp_events[x][0])", "def remove_old_entries(self, expires_before):\n if expires_before.tzinfo is None:\n # if expires_before is not timezone-aware, assume local time\n expires_before = expires_before.astimezone()\n\n keys_to_delete = set()\n for key, (response, _) in self.responses.items():\n if response.expiration_date is not None and response.expiration_date < expires_before:\n keys_to_delete.add(key)\n\n for key in keys_to_delete:\n self.delete(key)", "def clean_local_cache(self):\n to_expire = []\n now = int(time())\n\n try:\n for k, (_, _, grace) in six.iteritems(self._local_cache):\n if now > grace:\n to_expire.append(k)\n except RuntimeError:\n # It's possible for the dictionary to be mutated in another thread\n # while iterating, but this case is rare, so instead of making a\n # copy and iterating that, it's more efficient to just let it fail\n # gracefully. It'll just get re-run later.\n return\n\n for k in to_expire:\n try:\n del self._local_cache[k]\n except KeyError:\n # This could only exist in a race condition\n # where another thread has already deleted this key,\n # but we'll guard ourselves against it Justin Case.\n pass", "def removeOldItems(self):\n pass", "def remove_training_reserves():\n reserves = TrainingReserve.objects.all()\n now = timezone.now()\n for reserve in reserves:\n if reserve.date < now:\n reserve.delete()", "def _purge(self):\n for _ in self.all():\n self.delete(_)", "def expire(event, context):\n # scan the database for expired files\n expiry_at = datetime.utcnow() - runtime_context.NONSTORED_TIMEOUT\n files = FileModel.list_expired(expiry_at)\n # remove all files and all items one-by-one\n for file in files:\n file_id = file['id']['S']\n FileModel.update({\n 'id': file_id,\n 'deleted_at': datetime.utcnow()\n })\n LOGGER.debug('Files item updated (expired). service=ddb method=update_item id={}'.format(file_id))\n S3_CLIENT.delete_object(\n Bucket=runtime_context.BUCKET_NAME,\n Key=file_id\n )\n LOGGER.debug('S3 object deleted. service=s3 method=delete_object id={}'.format(file_id))", "def get_expired_invoices(self):\n return self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n )", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def _remove_expired_task(self):\n with self.lock:\n curr_time = datetime.utcnow()\n tasks_list = self.tasks_to_remove\n for task_id, t in tasks_list:\n time_elapsed = curr_time - t\n if (time_elapsed.total_seconds() < TASK_EXPIRE_DURATION_SEC):\n break\n self.tasks_to_remove.remove((task_id, t))\n self.task_map.pop(task_id)", "def remove_old(self, delay):\n self.logger.log_deleting_old(delay)\n self.json_collection.delete_many({'_time': {\"$lt\": int(time.time()) - int(delay)}})", "def gc_expired_tokens(expiration_window):\n (\n AppSpecificAuthToken.delete()\n .where(AppSpecificAuthToken.expiration < (datetime.now() - expiration_window))\n .execute()\n )", "def delete_expired_transactions():\n call_command('delete_expired_transactions')", "def remove_old_cart_items():\n print \"Removing old carts\"\n remove_before = datetime.now() + timedelta(days=-settings.SESSION_AGE_DAYS)\n cart_ids = []\n old_items = CartItem.objects.values('cart_id').annotate(last_change=Max('date_added')).filter(\n last_change__lt=remove_before).order_by()\n for item in old_items:\n cart_ids.append(item['cart_id'])\n to_remove = CartItem.objects.filter(cart_id__in=cart_ids)\n to_remove.delete()\n print str(len(cart_ids)) + \" carts were removed\"", "def purge(self):\n pass", "def clear(self) -> None:\n self._store.clear()", "def expired(self):\n try:\n cursor = self.connection.execute('DELETE FROM session WHERE expires < NOW()')\n return cursor.rowcount\n except database.Error, e:\n self.connection.rollback()\n else:\n self.connection.commit()\n finally:\n cursor.close()", "def remove_all_recs(self):\n return self.storage.clear()", "def clean_old_info(recent_sessions, include_ended):\n expire_secs = OLD_INFO_EXPIRE_SECS if include_ended else 0\n now = time()\n i = 0\n while i < len(recent_sessions):\n session = recent_sessions[i]\n if session.end and now - session.end > expire_secs:\n recent_sessions.pop(i)\n i -= 1\n else:\n j = 0\n while j < len(session.requests):\n request = session.requests[j]\n if request.end and now - request.end > expire_secs:\n session.requests.pop(j)\n j -= 1\n j += 1\n i += 1", "def delete(self):\n\t\tdel self.scheduler.find(self)\n\t\tdel self", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def delete_old_tickets(**kwargs):\n sender = kwargs.get('sender', None)\n now = datetime.now()\n expire = datetime(now.year, now.month, now.day - 2)\n sender.objects.filter(created__lt=expire).delete()", "def cleanup(self):\n for attribute in self._all_db_field_names:\n delattr(self, attribute)", "def destroy(self, request, *args, **kwargs):\n\n instance = self.get_object()\n for reg in instance.registrations.all():\n if reg.person.expiry_date is None:\n raise exceptions.ValidationError(\n ('Organization has registrations associated with it. ')\n ('Remove this organization from registration records first.'))\n instance.expiry_date = timezone.now()\n instance.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def cancel(self):\n if self.activated:\n return\n Sched.timers.remove(self)\n heapq.heapify(Sched.timers)", "def rollback_loaded_assignments(assignments: List[Assignment]):\n logger.info(\n \"**** Rolling back %s assignments via Canvas API - for testing purposes\",\n len(assignments),\n )\n\n for assignment in assignments:\n assignment.delete()\n\n logger.info(\"**** Successfully deleted %s assignments\", len(assignments))", "def invalidate(self):\n self._reset_cache()\n return", "def invalidate(self):\n self.modified = True\n self._invalidated = True\n self._session.clear()", "def delete_old(self):\n retention_time = Host().get_retention_time()\n snapshots = self._list_snapshots()\n\n for snap in snapshots['Snapshots']:\n snapshot_id = snap['SnapshotId']\n start_time = snap['StartTime']\n if start_time <= retention_time:\n self.resource.delete_snapshot(\n SnapshotId=snapshot_id,\n DryRun=DRY_RUN\n )\n self.deleted_ids.append(snapshot_id)\n\n notify = Notifier()\n notify.send(self.created_id, self.deleted_ids)", "def clean_up(self):\n while len(self.__refs_for_deletion): \n attr = self.__refs_for_deletion.pop()\n obj = getattr(self, attr)\n if hasattr(obj, 'clean_up'):\n obj.clean_up()\n delattr(self, attr)", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def migrateDown(self):\n subStore = self.store.parent.getItemByID(self.store.idInParent)\n ssph = self.store.parent.findUnique(\n _SubSchedulerParentHook,\n _SubSchedulerParentHook.subStore == subStore,\n default=None)\n if ssph is not None:\n te = self.store.parent.findUnique(TimedEvent,\n TimedEvent.runnable == ssph,\n default=None)\n if te is not None:\n te.deleteFromStore()\n ssph.deleteFromStore()", "def remove_apps(self):\n self.membership_class.objects.filter(obj=self).delete()", "def cleanup(self):\n\t\tself.removeObservers()\n\t\tself.active = False", "def __del__(self):\n self.evaler.db.flush()", "def clear(self):\n self._store = {}", "def delete(self):\n _unset_related_objects_relations(self)\n self.deleted = now()\n self.save()\n\n return self", "def test_expired_user_deletion_ignore_activated(self):\n user = (self.registration_profile.objects\n .create_inactive_user(\n site=Site.objects.get_current(),\n username='bob',\n password='secret',\n email='[email protected]'))\n profile = self.registration_profile.objects.get(user=user)\n _, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertTrue(activated)\n # Expire the activation window.\n user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n user.save()\n\n deleted_count = self.registration_profile.objects.delete_expired_users()\n self.assertEqual(deleted_count, 0)\n self.assertEqual(self.registration_profile.objects.count(), 1)\n self.assertEqual(UserModel().objects.get(username='bob'), user)", "def to_be_deleted(self):\n return self.filter(start__lte=timezone.now() - datetime.timedelta(days=1))", "def distribution_removed(self, uid):\n with self.__lock:\n for store in self._stores:\n store.remove(uid)", "def cleanup_expired_dev_entries(**kwargs):\n logging.debug(\"In the cleanup_expired_dev_entries() method.\")\n logging.info(\"Checking for expired Developer Objects.\")\n\n # Get all rules for this ACP.\n\n all_acp_rules = ACPRule(fmc=fmc1, acp_name=acp_name)\n all_rules = all_acp_rules.get()\n if all_rules.get('items', '') is '':\n logging.warning(\"No rules found for Access Control Policy: {}.\".format(kwargs['acp_name']))\n else:\n for item in all_rules['items']:\n if 'Dev-' in item['name']:\n namesplit = item['name'].split('-')\n if int(namesplit[2]) < kwargs['threshold_time']:\n logging.info(\"Deleting {} rule from {}.\".format(item['name'], kwargs['acp_name']))\n tmp_rule = None\n tmp_rule = ACPRule(fmc=fmc1, acp_name=acp_name)\n tmp_rule.get(name=item['name'])\n tmp_rule.delete()\n # Now Delete any expired Host objects.\n all_ips = IPAddresses(fmc=fmc1)\n all_hosts = all_ips.get()\n for item in all_hosts['items']:\n if 'Dev-' in item['name']:\n namesplit = item['name'].split('-')\n if int(namesplit[2]) < kwargs['threshold_time']:\n logging.info(\"Deleting {} host object.\".format(item['name']))\n tmp_rule = None\n tmp_rule = IPHost(fmc=fmc1)\n tmp_rule.get(name=item['name'])\n tmp_rule.delete()\n # Finally Delete any expired Port objects.\n all_ports = ProtocolPort(fmc=fmc1)\n response = all_ports.get()\n for item in response['items']:\n if 'Dev-' in item['name']:\n namesplit = item['name'].split('-')\n if int(namesplit[2]) < kwargs['threshold_time']:\n logging.info(\"Deleting {} port object.\".format(item['name']))\n tmp_rule = None\n tmp_rule = ProtocolPort(fmc=fmc1)\n tmp_rule.get(name=item['name'])\n tmp_rule.delete()", "def _prune_cache(self):\n default_expiry = datetime.datetime.utcnow() - datetime.timedelta(minutes=self.cache_resources_for)\n for resource_id, resource in self.local_resource_status.items():\n if 'cache_until' in resource:\n if datetime.datetime.utcnow() > resource['cache_until']:\n self._delete_cache(resource_id)\n elif resource['last_accessed'] < default_expiry:\n self._delete_cache(resource_id)", "def clean_out_addonpremium(days=DAYS_OLD):\n old = datetime.now() - timedelta(days=days)\n objs = AddonPremium.objects.filter(addon__premium_type=amo.ADDON_FREE,\n created__lt=old)\n log.info('Deleting %s old addonpremiums.' % objs.count())\n for obj in objs:\n log.info('Delete addonpremium %s which was created on %s' %\n (obj.addon_id, obj.created))\n obj.delete()", "def remove(self):\n for db in self.values():\n db.remove()", "def _expire(self):\n del self.map.addr[self.name]\n self.map.notify(\"addrmap_expired\", *[self.name], **{})", "def ageout(self) -> int:\n now = time.monotonic()\n delete = [\n k\n for k, (_, insertion_time) in self._cache.items()\n if (now - insertion_time) > self._lifetime\n ]\n for k in delete:\n del self[k]\n\n return len(delete)", "def invalidate(self, *objects):\r\n keys = [k for o in objects for k in o._cache_keys()]\r\n invalidator.invalidate_keys(keys)", "def clearAssociations(self):\n return _libsbml.Association_clearAssociations(self)", "def clean_cache(self):\n timer = Timer()\n entries = []\n for file_in_cache in self.find_archives():\n cache_metadata = self.read_metadata(file_in_cache)\n last_accessed = cache_metadata.get('last-accessed', 0)\n entries.append((last_accessed, file_in_cache))\n to_remove = sorted(entries)[:-self.cache_limit]\n if to_remove:\n for last_used, file_in_cache in to_remove:\n logger.debug(\"Removing archive from cache: %s\", file_in_cache)\n metadata_file = self.get_metadata_file(file_in_cache)\n self.context.execute('rm', '-f', file_in_cache, metadata_file)\n logger.verbose(\"Took %s to remove %s from cache.\",\n timer, pluralize(len(to_remove), \"archive\"))\n else:\n logger.verbose(\"Wasted %s checking whether cache needs to be cleaned (it doesn't).\", timer)" ]
[ "0.6875887", "0.6844624", "0.67177826", "0.65542144", "0.6533008", "0.6387391", "0.6350593", "0.6350029", "0.63188756", "0.62800354", "0.6261716", "0.6261716", "0.62265635", "0.61934316", "0.6192193", "0.6032369", "0.6010329", "0.600182", "0.59811556", "0.5909272", "0.58980876", "0.588563", "0.58636814", "0.5861482", "0.5837452", "0.5833156", "0.5815208", "0.5803512", "0.57887113", "0.5787814", "0.5787443", "0.5768639", "0.57499564", "0.57154983", "0.571041", "0.57014245", "0.56752735", "0.5659699", "0.5646204", "0.5646193", "0.563149", "0.5608241", "0.55773485", "0.5575046", "0.5571318", "0.5571124", "0.5534918", "0.55206305", "0.5514391", "0.5497429", "0.5493233", "0.54852283", "0.5472734", "0.5459021", "0.5456455", "0.5455044", "0.5446478", "0.54438746", "0.5414412", "0.5409968", "0.5408214", "0.53992236", "0.53573304", "0.534169", "0.5330773", "0.5321101", "0.5312153", "0.5309301", "0.5300884", "0.5294692", "0.5294239", "0.52893776", "0.5281537", "0.52806395", "0.527282", "0.5272488", "0.52659297", "0.52576476", "0.52567786", "0.5244829", "0.52394974", "0.52394974", "0.5238182", "0.5236263", "0.5235907", "0.5234169", "0.52323204", "0.5231898", "0.5227988", "0.52225953", "0.52053416", "0.5202293", "0.5195052", "0.5190612", "0.5185977", "0.51858866", "0.5181018", "0.5173791", "0.51585", "0.5152203" ]
0.68006694
2
Shortcut for C{L{cleanupNonces}()}, C{L{cleanupAssociations}()}. This method is not called in the normal operation of the library. It provides a way for store admins to keep their storage from filling up with expired data.
def cleanup(self): return self.cleanupNonces(), self.cleanupAssociations()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup(self, *args, **kwargs):", "def _cleanup(self):\n pass", "def cleanup():", "def cleanup(*args, **kwargs): # real signature unknown\n pass", "def cleanup(self):\n\n pass", "def cleanup(self):\n raise NotImplementedError", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup (self):\n pass", "def post_cleanup(self):\n pass", "def cleanup(self):\r\n pass", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):\r\n pass", "def cleanup(self):\r\n pass", "def cleanup(self) -> None:\n raise NotImplementedError()", "def cleanup(self):\n raise NotImplementedError()", "def cleanup(self):\n raise NotImplementedError()", "def final_cleanup(self):\n raise NotImplementedError()", "def cleanup(ctx):\n pass", "def _final_cleanup(self):\n # Clean up and remove the temporary gisdbase\n self._cleanup()\n # Remove resource directories\n if \"error\" in self.run_state or \"terminated\" in self.run_state:\n self.storage_interface.remove_resources()", "def cleanup():\n management.call_command('cleanup')", "def cleanup(self):\n raise NotImplementedError(\"Cleanup not supported for servers\")", "def cleanUp():\n pass", "def horde_cleanup(self):", "def cleanup(self):\n if self.cleanup_function:\n self.cleanup_function(self.data)", "def __del__(self):\r\n self.cleanup()", "def cleanup(self):\n logging.debug(\"cleanup called\")\n self.delete_networks()\n self.delete_machines()", "def cleanupStorage(self, oStorCfg):\n return oStorCfg.cleanup();", "def cleanup(self):\r\n print(\"Cleanup not implemented\")", "def cleanup(self):\n self._faucet_collector.cleanup()", "def cleanup(self):\n log.info(\"== Stage 7: Cleanup ==\")\n # Close current connection to free up all the temporary resource\n # and locks\n cleanup_start_time = time.time()\n try:\n self.rename_back()\n self.start_slave_sql()\n if self.is_myrocks_table and self.is_myrocks_ttl_table:\n self.enable_ttl_for_myrocks()\n self.release_osc_lock()\n self.close_conn()\n except Exception:\n log.exception(\n \"Ignore following exception, because we want to try our \"\n \"best to cleanup, and free disk space:\"\n )\n self._cleanup_payload.mysql_user = self.mysql_user\n self._cleanup_payload.mysql_pass = self.mysql_pass\n self._cleanup_payload.socket = self.socket\n self._cleanup_payload.get_conn_func = self.get_conn_func\n self._cleanup_payload.cleanup(self._current_db)\n self.stats[\"time_in_cleanup\"] = time.time() - cleanup_start_time", "def _clean_up(self):", "def cleanup(self):\n raise Exception(\"{0} type does not have cleanup implemented\".format(type(self)))", "def cleanUp(self):\r\n pass", "def cleanup(self):\n superClass.cleanup(self)\n # TODO Release resources and cleanup stuff here", "def cleanup(self):\n self._tmp_obj.cleanup()", "def cleanup(self):\n return True;", "def __del__(self):\n self._cleanup()", "def clean_up (self):\n\n self._exec (\n 'delete from table_name where expires < %(now)s',\n now = self.now ()\n )", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def cleanupAssociations(self):\n try:\n mist_associations = MistAssociation.objects()\n except me.DoesNotExist:\n mist_associations = []\n\n counter = 0\n for assoc in mist_associations:\n if assoc.is_expired():\n assoc.delete()\n counter += 1\n\n return counter", "def __cleanup(self):\n \n self.logger.debug(\"Deleting assignment and definition of policy\")\n self.interactor.delete_policy_assignment(self.assignment_id)\n self.interactor.delete_policy_definition(self.policy_id)", "async def clean_up(self) -> None:", "def do_cleanup_cruft(self): # pragma: no cover\n\n for cruft in self.get_cruft():\n cruft.cleanup()\n self.post_cleanup()", "def cleanup(self):\n #\n # TODO: Implement this if needed, be symmetrical with prepare(...)\n #\n client.cleanup(self)", "def finalizer():\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)", "def agent_cleanup():\n\t# clean up\n\n\treturn", "def clean_up(self):\n pass", "def clean_up(self):\n pass", "def test_cleanup(self):\n # _attempt_cleanup | pylint: disable=protected-access\n self.auth._attempt_cleanup = True\n self.auth.cleanup([self.achall])\n\n expected = [\n mock.call.del_txt_record(DOMAIN, \"_acme-challenge.\" + DOMAIN, mock.ANY)\n ]\n self.assertEqual(expected, self.mock_client.mock_calls)", "def cleanup(self):\r\n logging.info(\"entered the cleanup\")", "def cleanup(self):\n if self.cleanup_allowed:\n shutil.rmtree(self.out_dir)\n self.train_df, self.valid_df, self.test_df = None, None, None", "def agent_cleanup():\n # clean up\n return", "def agent_cleanup():\n # clean up\n return", "def on_cleanup(self):\n raise NotImplementedError", "def cleanUp(self):\r\n # All intermediates should be removed by app controller\r\n pass", "def agent_cleanup(self):\n pass", "def clean_up(self):\n\t\tpass", "def cleanup(self, context, instance, network_info, block_device_info=None,\n destroy_disks=True):\n pass", "def check_instance_shared_storage_cleanup(self, context, data):\n pass", "def _cleanup(self):\n # If module object not yet created, return\n if getattr(self, \"this\", None) is None:\n return\n\n deallocate = getattr(self, \"deallocate\", None)\n if callable(deallocate):\n deallocate()", "def cleanupNonces(self):\n try:\n mist_nonces = MistNonce.objects()\n except me.DoesNotExist:\n mist_nonces = []\n\n counter = 0\n for n in mist_nonces:\n if n.is_old():\n n.delete()\n counter += 1\n\n return counter", "def agent_cleanup():\n\treturn", "def test_z_cleanup(self):\n\n TestController.tearDown(\n self,\n clear_all_tables=True,\n del_global_app_set=True,\n dirs_to_destroy=['user', 'morphology', 'corpus', 'morphological_parser'])", "def clean_up(self):\n # TODO: Implement if needed\n pass", "def cleanup_with_force(self):\n log.info(\n \"--force-cleanup specified, cleaning up things that may left \"\n \"behind by last run\"\n )\n cleanup_payload = CleanupPayload(charset=self.charset, sudo=self.sudo)\n # cleanup outfiles for include_id and exclude_id\n for filepath in (self.outfile_exclude_id, self.outfile_include_id):\n cleanup_payload.add_file_entry(filepath)\n # cleanup outfiles for detailed checksum\n cleanup_payload.add_file_entry(\n \"{}*\".format(\n self._outfile_name(\n suffix=\".old\", chunk_id=0, skip_compressed_extension=True\n )\n )\n )\n cleanup_payload.add_file_entry(\n \"{}*\".format(\n self._outfile_name(\n suffix=\".new\", chunk_id=0, skip_compressed_extension=True\n )\n )\n )\n # cleanup outfiles for table dump\n file_prefixes = [\n self.outfile,\n \"{}.old\".format(self.outfile),\n \"{}.new\".format(self.outfile),\n ]\n for file_prefix in file_prefixes:\n log.debug(\"globbing {}\".format(file_prefix))\n for outfile in glob.glob(\n \"{}.[0-9]*\".format(file_prefix),\n ):\n cleanup_payload.add_file_entry(outfile)\n for trigger in (\n self.delete_trigger_name,\n self.update_trigger_name,\n self.insert_trigger_name,\n ):\n cleanup_payload.add_drop_trigger_entry(self._current_db, trigger)\n for tbl in (\n self.new_table_name,\n self.delta_table_name,\n self.renamed_table_name,\n ):\n partitions = self.fetch_partitions(tbl)\n cleanup_payload.add_drop_table_entry(self._current_db, tbl, partitions)\n cleanup_payload.mysql_user = self.mysql_user\n cleanup_payload.mysql_pass = self.mysql_pass\n cleanup_payload.socket = self.socket\n cleanup_payload.get_conn_func = self.get_conn_func\n cleanup_payload.cleanup(self._current_db)\n cleanup_payload.close_conn()", "def clean(_context):", "def clean(self):\n self.clean_rally_conf()\n rally.RallyBase.clean_rally_logs()\n if self.image_alt:\n self.cloud.delete_image(self.image_alt)\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def cleanup_all(cls):\n for i in tuple(cls.instances):\n i.cleanup()", "def cleanup(self):\n logging.debug(\"TrackerPlugin.cleanup\")", "def purge(self):\n pass", "def cleanup(self, action):\n pass", "def trial_clean_up(self):\n pass", "def cleanup(self):\n self._write_transaction(tx.cleanup)", "def cleanup(self):\n self.GP.cleanup()", "def cleanup(self):\r\n for action in self._actions:\r\n action.cleanup()", "async def _clean_up_cache_invalidation_wrapper(self) -> None:\n delete_up_to: int = (\n self.hs.get_clock().time_msec() - RETENTION_PERIOD_OF_CACHE_INVALIDATIONS_MS\n )\n\n in_backlog = await self._clean_up_batch_of_old_cache_invalidations(delete_up_to)\n\n # Vary how long we wait before calling again depending on whether we\n # are still sifting through backlog or we have caught up.\n if in_backlog:\n next_interval = CATCH_UP_CLEANUP_INTERVAL_MS\n else:\n next_interval = REGULAR_CLEANUP_INTERVAL_MS\n\n self.hs.get_clock().call_later(\n next_interval / 1000, self._clean_up_cache_invalidation_wrapper\n )", "def cleanup(self):\n super(Test200SmartSanityDownload004, self).cleanup()", "def cleanupResources():\n None", "def purge() -> None:\r\n _purge_func(False)", "def cleanup(self):\n del self.quester.db.test_quest_counter", "def destroy_check(self):\n pass", "def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()", "def cleanup(self):\r\n # XXX should be fixed properly!!!\r\n try:\r\n self.unlock()\r\n except:\r\n pass", "def cleanup_backups():\n try:\n yield\n finally:\n shutil.rmtree(\"tmp/backups\")", "def _cleanup():\n for (\n _,\n transformation,\n transformation_dict,\n _,\n _,\n increfed,\n _,\n ) in _queued_transformations:\n # For some reason, the logic here is different than for the async version\n # (see run_transformation_dict_async)\n if (\n increfed\n and bytes.fromhex(transformation) in transformation_cache.transformations\n ):\n transformation_cache.decref_transformation(transformation_dict, increfed)", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def cleanup():\n global TRANSPORT, NOTIFIER\n assert TRANSPORT is not None\n assert NOTIFIER is not None\n TRANSPORT.cleanup()\n TRANSPORT = NOTIFIER = None", "def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()" ]
[ "0.7016629", "0.69023955", "0.66813576", "0.6666886", "0.66557264", "0.65890634", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6582056", "0.6575037", "0.65191555", "0.6517952", "0.649991", "0.649991", "0.649991", "0.64862424", "0.64862424", "0.64682287", "0.6465783", "0.6465783", "0.64360785", "0.6423862", "0.6396054", "0.63841957", "0.6363836", "0.63603634", "0.63446623", "0.6341605", "0.6329155", "0.63003665", "0.62640256", "0.6224572", "0.6206769", "0.62063587", "0.620492", "0.61981064", "0.6190639", "0.61903405", "0.6184481", "0.6174722", "0.6163211", "0.6149647", "0.61460066", "0.613429", "0.6131975", "0.61190003", "0.6110315", "0.61098003", "0.60836375", "0.6067382", "0.6066129", "0.6066129", "0.60529375", "0.60493547", "0.6047501", "0.6025133", "0.6025133", "0.60040283", "0.60017955", "0.5998971", "0.5995298", "0.5988688", "0.59771305", "0.59693563", "0.5966018", "0.5961399", "0.5961109", "0.5957693", "0.5955114", "0.59516305", "0.5950946", "0.59431744", "0.5930141", "0.5927289", "0.5923746", "0.59230816", "0.59199584", "0.5908754", "0.59061205", "0.58924365", "0.58882886", "0.5886935", "0.586259", "0.5860634", "0.5857779", "0.58517754", "0.5838999", "0.5833433", "0.5824391", "0.57985806", "0.579832", "0.5792504" ]
0.73417866
0
Return top level command handler.
def init(): @click.command() @click.option('--cell', callback=cli.handle_context_opt, envvar='TREADMILL_CELL', expose_value=False, required=True) @click.argument('app-or-svc') @click.option('--host', help='Hostname where to look for the logs', required=True) @click.option('--uniq', help='The container uniq id', required=False) @click.option('--service', help='The name of the service for which the logs are ' 'to be retreived', required=False) def logs(app_or_svc, host, uniq, service): """View application's service logs.""" try: app, uniq, logtype, logname = app_or_svc.split('/', 3) except ValueError: app, uniq, logtype, logname = app_or_svc, uniq, 'service', service if any(param is None for param in [app, uniq, logtype, logname]): cli.bad_exit('Incomplete parameter list') _host, port = _nodeinfo_endpoint(host) api = 'http://{0}:{1}'.format(host, port) logurl = '/local-app/%s/%s/%s/%s' % ( urllib_parse.quote(app), urllib_parse.quote(uniq), logtype, urllib_parse.quote(logname) ) log = restclient.get(api, logurl) click.echo(log.text) return logs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_command_handler(self) -> Callable:\n try:\n return globals()[self.command_handler]\n except KeyError:\n logging.error(\"command_handler function '%s' for command '%s' not found in global scope\" %\n (self.command_handler, self.name))\n except AttributeError:\n logging.error(\"command_handler for command '%s' not defined in command_definition.py\" % self.name)", "def get_command_handler(self, command):\n try:\n command_handler = getattr(self, \"command_{}\".format(command))\n except AttributeError:\n raise AttributeError(\"Unknown command: '{}'\".format(command))\n\n return command_handler", "async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def get_command(self):\n return self.command", "def _get_handler(self, name):\n\n if name not in self._handlers:\n raise CLICoreTemplateHandlerNotFoundError('Command [{name}] is not valid. '\n 'available commands: {commands}.'\n .format(name=name,\n commands=\n list(self._handlers.keys())))\n\n return self._handlers[name][0]", "def default(self, handler: Handler):\n if asyncio.iscoroutinefunction(handler):\n self._default_handler = AsyncCommandProxy(handler, self.parser)\n else:\n self._default_handler = CommandProxy(handler, self.parser)\n return handler", "def get_command(self, ctx, cmd_name):\n path = \"%s.%s\" % (__name__, cmd_name)\n path = path.replace(\"-\", \"_\")\n try:\n module = importlib.import_module(path)\n return getattr(module, 'cli')\n except ModuleNotFoundError as ex:\n print(ex.name)\n return None", "def _command(self, *cmd, handler=None):", "def handler(self, command, args=[]):\n ###\n # command parsing and handling logic to be implemented by child\n ###\n if not command and not hasattr(self, 'handle_'):\n return f'Service {str(self.__class__.__name__)}: {self.__doc__ or \"\"}'\n methodname = 'handle_{}'.format(command or '')\n logger.info('method name: {}'.format(methodname))\n logger.info('args: {}'.format(args))\n method = self.__getattribute__(methodname)\n return method(args)", "def get_command_handlers(self):\n\t\treturn self.command_handlers", "def get_module_command_handler(self, name: str) -> Callable:\n if self.module is None:\n return\n cmnd = getattr(self.module, name, None)\n if cmnd is None or not (callable(cmnd) and hasattr(cmnd, FILEBASE_API_API_METHOD_MARKER_ATTRIB_NAME)):\n return None\n return cmnd", "def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name", "def _get_cli_handler(self, name):\n\n if name not in self._cli_handlers:\n raise CLIHandlerNotFoundError('CLI handler [{name}] not found.'\n .format(name=name))\n\n return self._cli_handlers[name]", "def get_handler(self):\n return self._Handler(self)", "def menu_handler(self):\n return self._menu_handler", "def getHandler(self):\n raise NotImplementedError(\"Shouldn't be called\")", "def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('cli.commands.cmd_' + name, None, None, ['cli'])\n except ImportError:\n exit(1)\n\n return mod.cli", "def command(\n self,\n handler: Handler = None,\n *,\n name: str = None,\n aliases: Sequence[str] = (),\n help_text: str = None,\n ) -> CommandProxy:\n\n def inner(func: Handler) -> CommandProxy:\n kwargs = {\"aliases\": aliases}\n\n help_text_ = help_text or func.__doc__\n if help_text_:\n kwargs[\"help\"] = help_text_.strip()\n\n name_ = name or func.__name__\n if asyncio.iscoroutinefunction(func):\n proxy = AsyncCommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n else:\n proxy = CommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n\n self._add_handler(proxy, name_, aliases)\n\n return proxy\n\n return inner(handler) if handler else inner", "def get_command(self):\n if self.command is not None:\n return self.command\n elif self.parent is not None:\n return self.parent.get_command()\n else:\n return None", "def get_command(self):\n while True:\n text = raw_input('> ').split()\n try:\n if text[0] == \"exit\":\n return UserCommand(COMMAND_EXIT)\n if text[0] == \"save\":\n return UserCommand(COMMAND_SAVE, {\n \"type\": text[1],\n \"name\": text[2],\n })\n elif text[0] == \"list\":\n return UserCommand(COMMAND_LIST)\n elif text[0] == \"show\":\n return UserCommand(COMMAND_SHOW, {\n \"type\": text[1],\n \"name\": text[2],\n })\n elif text[0] == \"execute\":\n return UserCommand(COMMAND_EXECUTE, {\n \"type\": text[1],\n \"name\": text[2],\n })\n\n except IndexError:\n print \"Illegal # of arguments\"", "def fetch_command(self, subcommand):\n try:\n app_name = get_commands()[subcommand]\n except KeyError:\n sys.stderr.write(\"Unknown command: %r\\nType '%s help'\"\n \" for usage.\\n\" % \\\n (subcommand, self.prog_name))\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def fetch_command(self, subcommand):\n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def main():\n args = arg_parser().parse_args()\n logger = _configure_logger(args.verbose, args.debug)\n\n if args.command == \"check\":\n handler = HandleCheck(arg_input=args.INPUT, logger=logger)\n\n if args.command == \"config\":\n handler = HandleConfig(arg_name=args.name, arg_raw=args.raw, logger=logger)\n\n if args.command == \"create\":\n handler = HandleCreate(\n arg_input=args.input,\n arg_labels=args.labels,\n arg_name=args.name,\n arg_size=args.size,\n arg_pixel_size=args.pixel_size,\n arg_testsplit=args.testsplit,\n arg_validsplit=args.validsplit,\n arg_minspots=args.minspots,\n logger=logger,\n )\n\n if args.command == \"download\":\n handler = HandleDownload(\n arg_input=args.input, arg_list=args.list, arg_all=args.all, logger=logger\n )\n\n if args.command == \"predict\":\n handler = HandlePredict(\n arg_model=args.model,\n arg_input=args.input,\n arg_output=args.output,\n arg_radius=args.radius,\n arg_shape=args.shape,\n arg_probability=args.probability,\n arg_pixel_size=args.pixel_size,\n logger=logger,\n )\n\n if args.command == \"train\":\n handler = HandleTrain(arg_config=args.config, arg_gpu=args.gpu, logger=logger)\n\n if args.command == \"visualize\":\n handler = HandleVisualize(\n arg_dataset=args.dataset,\n arg_subset=args.subset,\n arg_index=args.index,\n arg_image=args.image,\n arg_prediction=args.prediction,\n logger=logger,\n )\n\n try:\n handler()\n except UnboundLocalError:\n logger.warning(f\"args.command defined as {args.command}. no handler defined\")", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "def getCommand(self):\n return self.__cmd", "def command(self):\n return self._command", "def handle(self, *args, **options):\n if not self.server:\n print 'Error : %s' % self.init_error\n return\n\n handler_choice = {\n 'proxy': self.proxy_handle,\n 'server': self.server_handle,\n }\n\n sub_command = options['sub_command']\n handler_choice.get(sub_command)(options)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def getCommand(self, name):\n return self.commands[name]()", "def get_output_handler(self):\n if not hasattr(self, \"_handler\"):\n handler_class = self.output_handler_class\n self._handler = handler_class(self.basename)\n return self._handler", "def dispatch_command(self, args):\n\t\targuments = {k: v for k, v in vars(args).items() if v is not None}\n\t\tfor c in self.COMMANDS.keys():\n\t\t\tcmd = arguments.get(c, False)\n\t\t\tidx = c\n\t\t\tif cmd:\n\t\t\t\tbreak\n\t\telse:\n\t\t\treturn None\n\n\t\tif cmd not in self.COMMANDS[idx]:\n\t\t\traise CommandNotFoundError(\"{cmd} not registered\".format(cmd=cmd))\n\n\t\treturn getattr(self, self.COMMANDS[idx][cmd])(arguments)", "def default_handler(self, _: argparse.Namespace) -> int:\n print(\"No command specified!\")\n self.parser.print_usage()\n return 1", "def get_handler(cls):\n if not cls.hnd:\n raise ValueError((\"You must set handler by using set_hnd() method, \"\n \"before calling get_handler() method.\"))\n return cls.hnd", "def main(self, function):\n captured = self.command(function)\n self.default_command = captured.__name__\n return captured", "def get_command(self, ctx, cmd_name):\n cmd_name = self.MAP.get(cmd_name, cmd_name)\n return click.Group.get_command(self, ctx, cmd_name)", "def handler(self):\n\t\treturn self.handle_request", "def command():\n return _config.command", "def get_command(self):\n req_type = type(self.req)\n\n if req_type == ureq.CreateEntryRequest:\n return commands.CreateCommand(self.req.results)\n elif req_type == ureq.ReadEntryRequest:\n return commands.ReadCommand(self.req.results)\n elif req_type == ureq.UpdateEntryRequest:\n return commands.UpdateCommand(self.req.results)\n elif req_type == ureq.DeleteEntryRequest:\n return commands.DeleteCommand(self.req.results)", "def get_cmd(self, command):\n return self.commands[command][\"cmd\"]", "def handler(req):\n name = gethandlername(req.uri)\n if name == \"dispatcher\":\n raise404(\"Can't display the dispatcher\")\n handlerfunc = gethandlerfunc(name)\n return handlerfunc(req)", "def get_commands(self):\n\t\treturn list(self.command_handlers.keys())", "def get_command(self, kword: str):\n # Step Zero is to make sure that the name does not belong to a REAL command.\n zero, mod = super().get_command(kword)\n if zero:\n return zero, mod\n\n # Otherwise, first, ensure that the keyword does in fact exist in the custom list.\n command = self.config.commands.get(kword, None)\n if not command:\n return None, None\n response = command[\"com\"]\n\n # Build the function to return the response. Note that \"self\" exists already.\n async def cmd_custom(args, src, **_):\n if args:\n member = self.get_member(src, args[0].strip())\n tag = member.mention if member else None\n else:\n tag = None\n\n nsfw = command.get(\"nsfw\", False)\n if nsfw and src.channel.id not in self.config.get(\"nsfwChannels\"):\n return None\n\n # Replace tags where needed.\n try:\n output = response.format(\n self=src.author.name,\n myID=src.author.id,\n tag=tag or src.author.mention,\n )\n except KeyError:\n return None\n else:\n return output\n\n # Specify the docstring and name so that !help will work on this.\n short = response.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n if len(short) > 80:\n short = short[:77] + \"...\"\n cmd_custom.__doc__ = (\n \"__Custom command__: Return the following text: ```{}```\\n\\n\".format(short)\n + command.get(\n \"desc\",\n \"This is a custom command, so available help text is limited, but at the same time, the command is very simple. All it does is return a string, although the string may include formatting tags for invoker name, invoker ID, and a targeted mention.\",\n )\n + \"\\n\\nSyntax: `{p}\"\n + kword.lower()\n + (\" <user_ID>\" if \"{tag}\" in response else \"\")\n + \"`\"\n )\n cmd_custom.__name__ = \"cmd_\" + kword.lower()\n\n return cmd_custom, None", "def get_command(self):\n return self.c_dict['COMMAND']", "def get_cmd(self):\n return self.cmd", "def get_command(pid):", "def get_command(self, ctx: Context, cmd_name: str) -> Command:\n next_command = self.command.pop(0)\n if not self.command:\n return DocsBaseCommand(\n name=next_command,\n short_help=f\"Documentation for {self.command_string}\",\n callback=self.command_callback,\n )\n return DocsSubCommand(command=self.command)", "def mainCommand(self, args):\r\n command = args.pop(0).lower() # calls exception if no arguments present\r\n if command in vars(CommandManager):\r\n vars(CommandManager)[command](self, *args) # calls exception if wrong amount of arguments\r", "def get_request_handler(self):\n if not hasattr(self, '_oauth_handler'):\n handler_class = self.get_handler_class()\n server = self.get_server()\n self._oauth_handler = handler_class(server)\n return self._oauth_handler", "def get_command_line_parser():\n command_line_parser = argparse.ArgumentParser(\n description=\"Execute data workflows defined in flo.yaml files\",\n )\n subcommand_creator = command_line_parser.add_subparsers(\n title='SUBCOMMANDS',\n )\n for command_module in COMMAND_MODULES:\n command = command_module.Command(subcommand_creator)\n\n # this sets a default value for the command \"option\" so\n # that, when this Command is selected by argparse from the\n # command line, we know which comman instance it\n # corresponds with. See run_subcommand function below.\n command.option_parser.set_defaults(command=command)\n return command_line_parser", "def get_action_command(self):\n if self.action.value == \"start\":\n self.action_command = self.ServerStartSubCommand()\n else:\n self.action_command = None", "def _getCommand(self, cmd):\n try:\n cmd_str = cmd.decode('utf-8')\n return getattr(self, 'do_' + cmd_str, None)\n except:\n return None", "def defaultHandler( ):\n return OBJHandler()", "def get_command(self, player):\n return super().get_command(player)", "def get_command(self, object_name, user_key = None):\n\t\treturn self.get_object('command',object_name, user_key = user_key)", "def get_command(self, ctx, name):\n commands = self._iter_commands()\n return commands[name].load()", "def get_command(self, cmd_str):\n try:\n return self.commands[cmd_str]\n except KeyError:\n raise ServerException('invalid command')", "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "def command(self):\n return self.package(\"SyntaxObjects\").Command", "def get_command(self, context, name):\n\t\tif name not in self.commands:\n\t\t\tclue = lnk.errors.Message('Did you mess up the default settings?',\n\t\t\t\t\t\t\t\t\t level=0)\n\t\t\ttry_message = lnk.errors.Message(\"See what 'lnk config -k service'\"\n\t\t\t\t\t\t\t\t\t \t\t \" says.\", level=1)\n\t\t\traise lnk.errors.UsageError('Invalid default service.',\n\t\t\t\t\t\t\t\t\t\tClue=clue,\n\t\t\t\t\t\t\t\t\t\tTry=try_message)\n\t\treturn self.commands[name]", "def __addCommandHandler(self, command, type = 'channel', requiresdb = False):\n try:\n # ensure we are dealing with booleans\n if not requiresdb:\n requiresdb = False\n else:\n requiresdb = True\n\n # add the handler\n # check for existing command type\n if self.__commandHandlerTypeExists(type):\n cmdExec = self.__getFullCommandName(command, type)\n\n # if database required but no database available raise exception\n if requiresdb and not self.__databaseAvailable:\n raise ConfigurationException(CONFIG_DATABASE_NOT_AVAILABLE % cmdExec)\n\n # add handler only if the correct method exists\n if self.__commandExists(command, type):\n cmdHandler = {'func': getattr(self, cmdExec),\n 'db': requiresdb}\n self.__commandHandlers[type][command] = cmdHandler\n else:\n raise ConfigurationException(CONFIG_COMMAND_EXEC_NOT_FOUND % cmdExec)\n else:\n raise ConfigurationException(CONFIG_COMMAND_TYPE_NOT_FOUND % type)\n\n except ConfigurationException, (e):\n print 'Configuration failed: ',\n print 'Could not add the command handler for %s: ' % command\n print e.parameter", "def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')", "def dispatch(self):\n try:\n # dispatch\n options = self._parser.parse_args()\n\n # only dispatch args if available\n if len(vars(options)) > 1:\n options.func(self.app, options)\n else:\n options.func(self.app)\n except AttributeError:\n # no command selected\n print_failure('Please specify a command')\n self._parser.print_help(sys.stderr)\n sys.exit(2)", "def get_handler(self):\n return self.connection_handle", "def work(self):\n\n cmd = self.options.command\n cmdargs = self.options.args\n\n # find function\n fname = \"cmd_\" + cmd.replace('-', '_')\n if not hasattr(self, fname):\n self.log.error('bad subcommand, see --help for usage')\n sys.exit(1)\n fn = getattr(self, fname)\n\n b = inspect.signature(fn).bind(*cmdargs)\n\n fn(*b.args, **b.kwargs)", "def command(self, function=None, prefix=None):\n def _command(func):\n captured_f = self.capture(func, prefix=prefix)\n self.commands[func.__name__] = captured_f\n return captured_f\n\n if function is not None:\n return _command(function)\n else:\n return _command", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def load_command(command_path):\n module_path, class_name = command_path.rsplit(\".\", 1)\n module = importlib.import_module(module_path)\n return getattr(module, class_name)()", "def detect_handler(flags_dict: Dict[Text, Any]) -> base_handler.BaseHandler:\n # TODO(b/132286477):Autodetect engine from environment\n raise NotImplementedError('Orchestrator '+flags_dict['engine']+\n ' missing in the environment.')", "def load_parent_command(name):\n app_name = get_parent_commands()[name]\n module = import_module('%s.management.commands.%s' % (app_name, name))\n return module.Command", "def command():\n pass", "def command(self, function=None, name=None):\r\n if name is None:\r\n return self._command(function)\r\n else:\r\n return partial(self._command, name=name)", "def cmd(self, cmd):\n return cmd", "def run(self, argv: Optional[List[str]] = None) -> Optional[int]:\n if self._name is not None:\n raise CLIError(\"run can only be called on root command\")\n parser = argparse.ArgumentParser(**self._kwargs)\n self.build_parsers(parser)\n\n args = parser.parse_args(argv)\n\n if args._cmd._children:\n # Print help if it has deeper subcommands not specified by the\n # command line arguments yet.\n args._parser.print_help()\n return\n\n cmd: Optional[_Command] = args._cmd\n cmds: List[_Command] = []\n while cmd is not None:\n cmds.append(cmd)\n cmd = cmd._parent\n\n # Process the command handlers from root to leaf.\n for cmd in reversed(cmds):\n # Extract the function parameters from parsed arguments.\n params = inspect.signature(cmd._func).parameters\n unwrapped_args = {k: getattr(args, k) for k in params}\n\n # Invoke the handler and return early if there is an error\n # indicated by return code.\n ret = cmd(**unwrapped_args)\n\n if ret is not None and ret != 0:\n return ret", "def commandInterface():\n\tusername = session[\"username\"]\n\tuserInput = request.form[\"commands\"]\n\toutput = cli.cli(userInput, username)\n\treturn render_template(\"index.html\", output=output)", "def create_command(cmd, args):\n\n\t\tfor cls in BaseCommand.__subclasses__():\n\t\t\tif cls.cmd() == cmd:\n\t\t\t\treturn cls(args)\n\n\t\treturn None", "def LocalCommand(TestinfraBackend):\n return testinfra.get_backend(\"local://\").get_module(\"Command\")", "def __luanch_handlers(self):\n\n self.__updater = Updater(self.__token, use_context=True)\n self.__dp = self.__updater.dispatcher\n # on different commands - answer in Telegram\n self.__dp.add_handler(CommandHandler(\"start\", self.start_message))\n self.__dp.add_handler(CommandHandler(\"help\", self.help))\n self.__dp.add_handler(CommandHandler(\"history\", self.history))\n self.__dp.add_handler(CommandHandler(\"request\", self.request))\n self.__dp.add_handler(CommandHandler(\"cancel\", self.cancel))\n self.__dp.add_handler(CommandHandler(\"show\", self.show))\n self.__dp.add_handler(CommandHandler(\"promote\", self.promote))\n self.__dp.add_handler(CommandHandler(\"demote\", self.demote))\n self.__dp.add_handler(CommandHandler(\"checkadmin\", self.check_admin))\n self.__dp.add_handler(CommandHandler(\"kick\", self.kick))\n self.__dp.add_handler(CommandHandler(\"stop\", self.stop_all))\n self.__dp.add_handler(CommandHandler(\"whatsmyid\", self.__whatsmyid))\n self.__updater.start_polling()", "def _default_command(cmds, argv):\n if len(cmds) != 1 or cmds[0].__name__ != DEFAULT_COMMAND:\n return None\n dc = cmds[0]\n spec = inspect.getargspec(dc)\n if not (spec.varargs and spec.keywords):\n return dc\n save_argv = argv[:]\n\n def _wrap_default_command():\n return dc(*save_argv)\n\n del argv[:]\n return _wrap_default_command", "def command(self, index):\n return self._commands[index]", "def get_command(self, player):\n last_output = player._program_output[-1]\n if last_output == \"COMMAND\":\n return self._cmd_main(player)\n elif last_output == \"SHIELD CONTROL INOPERABLE\": # I don;t think this can happen. It always prints \"COMMAND\" after an error\n # TODO Should check all the error messages to COMMAND, like \"SHIELD CONTROL INOPERABLE\", and handle them.\n return self._cmd_main(player) # Pick a different command.\n elif last_output == \"PHOTON TORPEDO COURSE (1-9)\":\n return self._cmd_torpedos(player)\n elif last_output == \"COMPUTER ACTIVE AND AWAITING COMMAND\":\n return self._cmd_computer(player)\n elif last_output == \"COURSE (0-9)\":\n return self._cmd_course(player)\n elif last_output.endswith(\"NUMBER OF UNITS TO SHIELDS\"):\n return self._cmd_shield_units(player)\n elif last_output == \"WARP FACTOR (0-8)\" or last_output == 'WARP FACTOR (0-0.2)':\n return self._cmd_warp(player)\n elif last_output == ' INITIAL COORDINATES (X,Y)' or last_output == ' FINAL COORDINATES (X,Y)':\n return self._cmd_coords(player)\n elif last_output == \"NUMBER OF UNITS TO FIRE\":\n return self._cmd_pha_units(player)\n elif last_output == \"LET HIM STEP FORWARD AND ENTER 'AYE'\":\n return self._cmd_aye(player)\n elif last_output == \"WILL YOU AUTHORIZE THE REPAIR ORDER (Y/N)\":\n return self._cmd_repair(player)\n\n raise Exception(F\"Unknown prompt in trek_bot: '{last_output}'\")", "def get_command(command):\n for _cmd in commands:\n if _cmd.command == command:\n return _cmd\n raise UserWarning(\"telegram command not found.\")", "def dispatch_handler(self, opts: argparse.Namespace) -> int:\n handler_name = getattr(opts, self.handler_dest, None)\n\n if self._prefix:\n handler_name = f\"{self._prefix}:{handler_name}\"\n handler = self._handlers.get(handler_name, self._default_handler)\n\n return handler(opts)", "def command(self) -> TelnetCommand:\n return self._command", "def command(self):\n raise NotImplementedError", "def getListener(self, sender):\n if isinstance(sender, (Player, )):\n return (sender).getHandle()\n if isinstance(sender, (BlockCommandSender, )):\n return (sender).getTileEntity()\n if isinstance(sender, (CommandMinecart, )):\n return ((sender).getHandle()).getCommandBlock()\n if isinstance(sender, (RemoteConsoleCommandSender, )):\n return (MinecraftServer.getServer()).remoteControlCommandListener\n if isinstance(sender, (ConsoleCommandSender, )):\n return (sender.getServer()).getServer()\n if isinstance(sender, (ProxiedCommandSender, )):\n return (sender).getHandle()\n raise IllegalArgumentException(\"Cannot make \" + sender + \" a vanilla command listener\")", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"command\")", "def create_parser(self, prog_name, subcommand):\r\n # Hack __main__ so --help in dev_appserver_main works OK.\r\n sys.modules['__main__'] = dev_appserver_main\r\n return super(Command, self).create_parser(prog_name, subcommand)", "def get_cmd(self):\n return self.cmds.pop(0) if self.cmds else None", "def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)", "def get_cmd(self, cmd_class, argument_string=\"\"): \n cmd = cmd_class()\n cmd.caller = self.char1\n cmd.cmdstring = cmd_class.key\n cmd.args = argument_string\n cmd.cmdset = None\n cmd.obj = self.char1\n return cmd", "def command(self):\n if self.model is self.model_action:\n return self.command_action\n else:\n return self.command_candidate", "def default_command(self, function):\r\n if Inspection.find_calling_module() == '__main__':\r\n if None in self._commands:\r\n defaults = (self._commands[None].__name__, function.__name__)\r\n raise self.Error('Found two default commands: %s and %s' % defaults)\r\n self._commands[None] = function\r\n return function", "def command_name(self):\n return None", "def onecmd(self, line):\n statement = self.parser_manager.parsed(line)\n funcname = self._func_named(statement.parsed.command)\n if not funcname:\n return self.default(statement)\n try:\n func = getattr(self, funcname)\n except AttributeError:\n return self.default(statement)\n stop = func(statement)\n return stop", "def _class_wrapper(command_class):\n WebBot().register_command(command_class)\n return command_class", "def getRedisHandler(self):\n (redis_ip, redis_port) = self.parserBns()\n return self.getHandlerByIp(redis_ip, redis_port)", "def handle_command(self, command, channel, user):\r\n response = \"Hello. Type \\\"@hexbot help\\\" for more information\"\r\n command = command.split()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n if command[0] == self.HELP_COMMAND:\r\n response = self.help()\r\n elif command[0] == self.DEBUG_COMMAND:\r\n response = self.debug(command, channel);\r\n elif command[0] == self.ASSASSIN_COMMAND:\r\n command.pop(0)\r\n response = self.assassin(command, channel, user);\r\n \r\n return response", "def _get_default_command(self, command):\n # @formatter:off\n command_mapper = {\n 'delimeter1': '>',\n 'delimeter2': '#',\n 'pattern': r\"\\({}.*?\\) (\\(.*?\\))?[{}|{}]\",\n 'disable_paging': 'no pager',\n 'priv_enter': 'enable',\n 'priv_exit': 'disable',\n 'config_enter': 'conf',\n 'config_exit': 'end',\n 'config_check': ')#',\n }\n # @formatter:on\n return command_mapper[command]" ]
[ "0.7392408", "0.69905674", "0.6935066", "0.6817442", "0.6468187", "0.6460933", "0.6309276", "0.62970257", "0.62815976", "0.6280105", "0.6223721", "0.62009555", "0.61951655", "0.6180926", "0.6155766", "0.61281425", "0.60346127", "0.600372", "0.6003017", "0.59750646", "0.58490795", "0.5800881", "0.57802993", "0.5764859", "0.576081", "0.5758173", "0.57425064", "0.57376987", "0.5721055", "0.57077146", "0.5695891", "0.5668519", "0.5634704", "0.5616201", "0.56062955", "0.55969954", "0.5592554", "0.5590298", "0.55885166", "0.5584341", "0.55801755", "0.5567131", "0.5515607", "0.55059135", "0.54983", "0.5495798", "0.54700077", "0.5468675", "0.54671353", "0.545981", "0.54526025", "0.5429561", "0.5423122", "0.54193074", "0.5416747", "0.54130787", "0.54045767", "0.5385809", "0.53767586", "0.5363961", "0.53595406", "0.5358714", "0.53467876", "0.53421324", "0.5341478", "0.5339762", "0.53147906", "0.53112376", "0.52976406", "0.52953786", "0.52950054", "0.5283319", "0.5283165", "0.5277101", "0.5275049", "0.52648205", "0.5259136", "0.52585536", "0.52533334", "0.5249036", "0.5239348", "0.5238923", "0.52359694", "0.52265745", "0.52169925", "0.52160263", "0.5213702", "0.5213702", "0.5213702", "0.5213262", "0.52108824", "0.520868", "0.5206758", "0.52030516", "0.5194282", "0.5184973", "0.51752216", "0.51669705", "0.51558006", "0.51548344", "0.51480776" ]
0.0
-1
View application's service logs.
def logs(app_or_svc, host, uniq, service): try: app, uniq, logtype, logname = app_or_svc.split('/', 3) except ValueError: app, uniq, logtype, logname = app_or_svc, uniq, 'service', service if any(param is None for param in [app, uniq, logtype, logname]): cli.bad_exit('Incomplete parameter list') _host, port = _nodeinfo_endpoint(host) api = 'http://{0}:{1}'.format(host, port) logurl = '/local-app/%s/%s/%s/%s' % ( urllib_parse.quote(app), urllib_parse.quote(uniq), logtype, urllib_parse.quote(logname) ) log = restclient.get(api, logurl) click.echo(log.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLogs():", "def getLogs():", "def logs(self):\n return self.logger.logs()", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "def logs(self, shell=False):\n if self.app_id:\n return self.yarn_api.logs(self.app_id, shell=shell)\n else:\n raise KnitException('Cannot get logs, app not started')", "def logs(self, container: Container) -> str:", "def log_services(self):\n return log_service.LogServiceCollection(\n self._conn,\n utils.get_sub_resource_path_by(self, \"LogServices\"),\n redfish_version=self.redfish_version,\n )", "def logs(self, **kwargs):\n return self.client.api.logs(self.id, **kwargs)", "def log(service_name):\n start_time = time.time()\n\n print(_green(\"Started...\"))\n env.environment = None\n while env.environment not in ('Staging', 'Production'):\n environment = prompt('Please specify target environment: ')\n setattr(env, 'environment', environment.strip().capitalize())\n\n try:\n fabconf, env_config = parse_ini('appserver', check_all=False)\n except Exception as e:\n print(_red('Exception parsing config file: {}'.format(str(e))))\n exit()\n env.user = fabconf['SERVER_USERNAME']\n env.key_filename = fabconf['SSH_PRIVATE_KEY_PATH']\n\n from recipes.default_appserver import log_services as recipe\n command = recipe['%s' % service_name]\n from misc import _oven\n\n conn = boto.connect_ec2(ec2_key, ec2_secret)\n reservations = conn.get_all_instances()\n instances = [i for r in reservations for i in r.instances]\n for instance in instances:\n tags = instance.tags\n if instance.state == 'running' and 'Env' in tags:\n if tags['Env'] == env.environment and tags['Name'] == 'AppServer':\n print(_yellow('Restarting service on instance: %s' %\n instance.id))\n env.host_string = instance.public_dns_name\n env.user = fabconf['SERVER_USERNAME']\n env.key_filename = fabconf['SSH_PRIVATE_KEY_PATH']\n _oven(command)\n\n end_time = time.time()\n print(_green(\"Runtime: %f minutes\" % ((end_time - start_time) / 60)))\n print(_green(env.host_string))", "def GetLogs(self):\n stdout, _, _ = RunKubectlCommand(['logs', self.name])\n return stdout", "def get_logs(self, name):\n logs = self.get_status()\n\n for pod in self.list_pods(namespace=self.project):\n if name in pod.name: # get just logs from pods related to app\n pod_logs = pod.get_logs()\n if pod_logs:\n logs += pod_logs\n\n return logs", "def GetLogs(self):\n raise NotImplementedError()", "def get_logs(self):\n return self.network.get_logs()", "def admin_applog(request):\r\n rdict = request.GET\r\n\r\n # Support optional filter parameters\r\n days = int(rdict.get('days', 1))\r\n status = rdict.get('status', None)\r\n message = rdict.get('message', None)\r\n\r\n log_list = AppLogMgr.find(\r\n days=days,\r\n message_filter=message,\r\n status=status,\r\n )\r\n\r\n ret = {\r\n 'count': len(log_list),\r\n 'logs': [dict(l) for l in log_list],\r\n }\r\n return _api_response(request, ret)", "def getLog(self):\n pass", "def getLog(self):\n return self.session.request('diag/log/')", "def show_logs():\n nodes=hl.getAllNodes();\n\n return render_template('logs.html',nodes = nodes)", "def get_logs(ctx, num):\n app = ctx.obj['app']\n api_client = ctx.obj['api_client']\n colors = dict()\n logs = api_client.get_application_logs(app, lines=num)\n for log in reversed(logs):\n if log['process'] not in colors:\n index = len(colors)\n colors[log['process']] = _available_colors[index % len(_available_colors)]\n for log in logs:\n color = colors[log['process']]\n header = click.style('{timestamp} {app_name}[{process}]:'.format(\n timestamp=log['timestamp'],\n app_name=log['app'],\n process=log['process'],\n ), fg=color)\n click.echo('{header} {message}'.format(header=header, message=log['message']))", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def logs(self):\n return self._logs", "def logs(self):\n return self._logs", "def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)", "def getLog():\n with open(webapp.config['LOGFILE'], 'r') as logfile:\n output = logfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def debug_logs_get():\n try:\n return flask.Response(debug_logs.collect(), mimetype='text/plain')\n except debug_logs.Error as e:\n return flask.Response('Failed to retrieve debug logs: %s' % str(e),\n status=500)", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def print_aldb(service):\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def logs(self):\n if not self._logs:\n self.read_logs()\n return self._logs", "def log_access():\n # todo use project prefix\n tail('/var/log/nginx/access.log')", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def getLog(request):\n # TODO: GET\n data = {}\n return data", "def log():\n return logging.getLogger(\"vodka\")", "def setup_logging():\n logging.basicConfig(\n filename=os.getenv(\"SERVICE_LOG\", \"server.log\"),\n level=logging.DEBUG,\n format=\"%(levelname)s: %(asctime)s pid:%(process)s module:%(module)s %(message)s\",\n datefmt=\"%d/%m/%y %H:%M:%S\",\n )", "def log():\n return logging.getLogger(__name__)", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def logs(name):\n\n try:\n container = CLIENT.containers.get(name)\n click.secho(str(container.logs()), bg='blue', fg='white')\n except docker.errors.NotFound as err:\n print(err)", "def log_route():\n return send_file(path.join('..', 'app.log'), as_attachment=True)", "def view_service(options, service_name, client):\n if options.show_events:\n return display_events(client.service_events(service_name))\n\n service_content = client.service(service_name)\n return display.DisplayServices().format_details(service_content)", "def event_log(self):\n pass", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def get_application_logs(self, destination, tag=None):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def logger(request):\n log = logging.getLogger()\n hdlr = logging.StreamHandler()\n fmt = '%(asctime)s %(name)s %(levelname)s %(message)s'\n formatter = logging.Formatter(fmt)\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.DEBUG)\n log.propagate = False\n\n return log", "def list_logs():\n resource_route = \"/static/log/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n logs_path = os.path.join(path_to_current_file, 'static', 'log')\n directory_list = os.listdir(logs_path)\n log_files = [f for f in directory_list if os.path.isfile(os.path.join(logs_path, f))]\n log_files.sort()\n if '.gitignore' in log_files:\n log_files.remove('.gitignore')\n full_log_paths = [file_request_path + f for f in log_files]\n response_code = 200\n return make_response(jsonify({'files': full_log_paths}), response_code)", "def dinghy_get_pod_logs(req, resp):\n resp.content = api.template(\n 'pod_logs.html'\n )", "def on_start(self):\r\n self.log()", "def getLog(self):\n \n return self.resp[\"log\"]", "def homepage_log():\n\n return render_template('home_log.html')", "def showAllLogs():\n\t#Add sections to log screen\n\tallLogs=findFiles(getWorkingDirectory(),\".log\")\n\tcounter=-1\n\tfor l in allLogs:\n\t\tcounter+=1\n\t\tbase=getRootName(l)\n\t\tif base in logDict:\n\t\t\tbase=logDict[base]\n\t\t#Add to selection bar\n\t\tlogSelectionBar.addTab(base,command=lambda n=l: displayLog(n))\n\t\t#Store\n\t\tloadedLogs[counter]=l", "def container_logs(ctx, token, container_id):\n try:\n out = ctx.obj.container_logs(token, container_id)\n print_message(out)\n except BaseException:\n m = (\"Error: No container related to %s\" %\n container_id)\n print_error(m)", "async def logs(id: UUID):\n page_size = 200\n offset = 0\n more_logs = True\n log_filter = LogFilter(flow_run_id={\"any_\": [id]})\n\n async with get_client() as client:\n # Get the flow run\n try:\n flow_run = await client.read_flow_run(id)\n except ObjectNotFound as exc:\n exit_with_error(f\"Flow run {str(id)!r} not found!\")\n\n while more_logs:\n # Get the next page of logs\n page_logs = await client.read_logs(\n log_filter=log_filter, limit=page_size, offset=offset\n )\n\n # Print the logs\n for log in page_logs:\n app.console.print(\n # Print following the flow run format (declared in logging.yml)\n f\"{pendulum.instance(log.timestamp).to_datetime_string()}.{log.timestamp.microsecond // 1000:03d} | {logging.getLevelName(log.level):7s} | Flow run {flow_run.name!r} - {log.message}\",\n soft_wrap=True,\n )\n\n if len(page_logs) == page_size:\n offset += page_size\n else:\n # No more logs to show, exit\n more_logs = False", "def print_im_aldb(service):\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n print_aldb_to_log(insteon_modem.aldb)", "def print_logs(dauth_directory: DauthDirectoryConnection) -> None:\n print(dauth_directory.print_logs())", "def view_log():\n g.title = \"View Log\"\n log = ShotLog().get_text() #log is a generator\n \n return render_template('log_viewer.html',log=log)", "def show_logs_for_running_containers(services, tail):\n if not check_for_docker_compose_file():\n log.info('No running containers found')\n sys.exit(1)\n\n try:\n if tail:\n run_docker_compose_command(['logs', '-f'] + services)\n else:\n run_docker_compose_command(['logs'] + services)\n except KeyboardInterrupt:\n sys.exit(0)", "def get_main_log(self) -> Any:\n return self.logger", "def log (self):\n return self._log", "def get_logs(self, start, end):\n raise NotImplementedError(\"error - not implemented\")", "def get_logs(self, start, end):\n raise NotImplementedError(\"error - not implemented\")", "def getLog(self):\n return self.log", "def getLog(self):\n return self.log", "def log_info():\n # Get an instance of a logger\n logging.basicConfig(level=logging.DEBUG)\n return logging.getLogger('general')", "def service_bus_cli():\n configure_logging()", "def log(self):\r\n return self._log", "def logs(self, data):\n required = {'token', 'container_id'}\n api.validate(data, required)\n token = data['token']\n container_id = data['container_id']\n self.credentials_module.authorize_container(token,\n container_id)\n results = self.docker_module.logs_container(container_id)\n return results", "def cmd_logs(args):\n\n remote.show_log(_get_current_project_name(), num=args.num, tail=args.tail)", "def logs_directory(self):", "def onViewLog(self):\n view_log.ViewLog(self.root, self.log)", "def logs(self) -> Sequence['outputs.GetElasticsearchLogResult']:\n return pulumi.get(self, \"logs\")", "def services_log(slave_id):\n handler = None\n for kwargs in (dict(socktype=socket.SOCK_RAW), dict(socktype=socket.SOCK_STREAM), dict()):\n try:\n handler = logging.handlers.SysLogHandler(\n facility=logging.handlers.SysLogHandler.LOG_LOCAL7, address='/dev/log', **kwargs)\n break\n except (IOError, TypeError):\n pass\n logger = logging.getLogger('[{slave_id}] {name}'.format(name=__name__, slave_id=slave_id))\n logger.setLevel(logging.DEBUG)\n if handler:\n logger.propagate = 0\n logger.addHandler(handler)\n return logger", "def logger(self):\n pass", "def endpoint_log(self, endpoint_name=None, since=None):\n if endpoint_name is None:\n url = '/v1.1/endpoint/log'\n else:\n url = '/v1.1/endpoints/%s/log' % endpoint_name\n if since is not None:\n url += '?since=%f' % float(since)\n _, body = self.request(url, 'GET')\n return body", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def log_serviceinfo(logger, info):\n\n try:\n debugging = logger.isEnabledFor(logging.DEBUG)\n log_level = logging.INFO\n\n log_info = {'name': info.name,\n 'address': socket.inet_ntoa(info.addresses[0]),\n 'port': info.port}\n log_hdr = \"\\n {address}:{port} {name}\\n\"\n log_fmt = log_hdr\n\n if debugging:\n log_level = logging.DEBUG\n if info.server != info.name:\n log_info['server'] = info.server\n log_fmt += \" server: {server}\\n\"\n\n for (k, v) in info.properties.items():\n li_k = \"prop_\" + bytes2str(k)\n log_info[li_k] = v\n log_fmt += \" {k}: {{{li_k}}}\\n\".format(k=k, li_k=li_k)\n\n logger.log(log_level, log_fmt.format(**log_info))\n\n except:\n logger.exception(\"exception in log_tivo_serviceinfo\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def history_list(name):\n service_histories = request_service_history(name)\n table = present(lambda: service_histories,\n renderer='table',\n headers=['History Version', 'Service Name', 'Date Created', 'Manifest'],\n columns=['id', 'name', 'created_at', 'manifest'])\n if table:\n click.echo(table)\n else:\n click.echo('There is no record of your service deployments available.')\n # click.echo('https://docs.fandogh.cloud/docs/services.html\\n')", "def get_server_logs(self, server_id):\n status, data, errors, messages = self._make_get_request(MCAPIRoutes.GET_LOGS, extra_params={'id': server_id})\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)", "def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp", "def log(self):\n return self._log", "def log(self):\n return self._log", "def logs(request):\n def get_timestamp(data):\n return data.get('timestamp')\n\n events = []\n for record in InstallationRecord.objects.all():\n if record.profile:\n resource = record.profile\n resource_type = \"(Configuration Profile) \"\n else:\n resource = record.app\n if record.version:\n resource_type = \"(\" + record.version + \") \"\n else:\n resource_type = \"\"\n if record.active:\n obj = {'timestamp': record.installed_on,\n 'details': resource.name + \" \" + resource_type + \"was installed on \" + record.device.name}\n events.append(obj)\n else:\n obj = {'timestamp': record.expires,\n 'details': resource.name + \" \" + resource_type + \"was removed from \" + record.device.name}\n events.append(obj)\n obj = {'timestamp': record.installed_on,\n 'details': resource.name + \" \" + resource_type + \"was installed on \" + record.device.name}\n events.append(obj)\n events.sort(key=get_timestamp, reverse=True)\n\n paginator = Paginator(events, 50)\n page_number = request.GET.get('page', 1)\n current_page = paginator.get_page(page_number)\n context = {'headers': ['Timestamp', 'Event'], 'title': 'Install Log', 'events': current_page}\n return render(request, 'access_log.html', context)", "def log(self):\n if self._log is None:\n self._log = Log(client=self)\n return self._log", "def get_full_log(self):\n return self._get_log('full')", "def get_lines(self):\n return self._container.logs(stream=True)", "def info_log(self):\n return self._info_log", "def all_services(self):\n services = oc.all_service_names()\n for s in services:\n print(s)\n print(\"#total\", len(services))", "def getLogSession(self):\n return self.session.request('diag/logSession/')", "def show(self):\n self._logger.debug(\"show\")", "def log():\n return flask.send_from_directory(\"static\", \"logs.html\")", "def logs(self, task: RemoteTask) -> Iterable[str]:\n raise NotImplementedError()", "def viewLog(self, event):\n logcontent = \"\"\n if Config.GetOption(\"ActLog\") == True:\n\n logFrame = wx.Frame(None, -1, \"View Log\", size=(500, 500))\n panel5 = wx.Panel(logFrame)\n data = wx.richtext.RichTextCtrl(panel5, pos=(0, 0), size=(500,\n 500))\n data.AppendText(Log.ReadLog())\n logFrame.Centre()\n logFrame.Show()\n else:\n\n inform = wx.MessageDialog(None,\n \"The Log is disabled!\\\n \\nEnable it to view.\",\n \"Log Status\", wx.OK)\n inform.ShowModal()", "def get_logs(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.LogList(self._results, runtime=self._runtime)", "def add_logger_stdout(app):\n\n f = ContextFilter()\n app.logger.addFilter(f)\n\n stdout_handler = logging.StreamHandler(sys.stdout)\n FORMAT = '%(asctime)s %(hostname)s {0} :%(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'.format('Tester')\n formatter = logging.Formatter(FORMAT, datefmt='%Y-%m-%dT%H:%M:%S')\n stdout_handler.setFormatter(formatter)\n stdout_handler.setLevel(logging.INFO)\n stdout_handler._name = 'StreamHandler'\n app.logger.addHandler(stdout_handler)", "def kamel_logs():\n task_name = request.args.get('name')\n if task_name is None:\n return response(\"Need to specify task name!\")\n client = create_client()\n stdin, stdout, stderr = client.exec_command(f\"/usr/local/bin/kamel logs {task_name}\") # noqa\n time.sleep(1)\n stdout.channel.close()\n body = stdout.read().decode(\"utf-8\")\n return response(body)", "def view_the_log() -> 'html':\n try:\n with UseDatabase(app.config['dbconfig']) as cursor:\n _SQL = \"\"\"select phrase, letters, ip, browser_string, results\n from log \"\"\"\n cursor.execute(_SQL)\n contents = cursor.fetchall()\n titles = ('Phrase', 'Letters', 'Remote_addr','User_agent','Results')\n return render_template('viewlog.html',\n the_title = 'View Log',\n the_row_titles = titles,\n the_data = contents)\n except ConnectionError as err:\n print('Is your database switched on? Error:', str(err))\n #return 'Is your database switched on? Error: '+ str(err)\n except CredentialsError as err:\n print('Is your credentials right? Error:', str(err))\n #return 'Is your credentials right? Error:'+ str(err)\n except SQLError as err:\n print('Is your query correct? Error:', str(err))\n #return 'Is your query correct? Error:'+ str(err)\n except Exception as err:\n print('Something went wrong:', str(err))\n #return 'Something went wrong:'+ str(err)\n return 'Error'", "def tail(name):\n\n try:\n container = CLIENT.containers.get(name)\n for line in container.logs(stream=True):\n click.secho(line.strip(), bg='blue', fg='white')\n except docker.errors.NotFound as err:\n print(err)", "def do_logs(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['stdout'] = args.stdout\n opts['stderr'] = args.stderr\n opts['since'] = args.since\n opts['timestamps'] = args.timestamps\n opts['tail'] = args.tail\n opts = zun_utils.remove_null_parms(**opts)\n logs = cs.containers.logs(**opts)\n print(logs)", "def log_requests(response):\n ts = strftime('[%Y-%b-%d %H:%M-%S]')\n\n logger.info('Flask: {0} {1} {2} {3} {4} {5}'.\n format(ts, request.remote_addr, request.method, request.scheme, request.full_path, response.status))\n\n return response", "def lb_logs(ctx, lb, last, search):\n\n search_items = check_search_argument(search)\n\n from opstools.aws import lb_logs as this_ec2_list\n this_ec2_list.main(lb, last, search_items)" ]
[ "0.68098724", "0.68098724", "0.64053893", "0.6395825", "0.630246", "0.6281809", "0.6209214", "0.62079066", "0.611495", "0.61128664", "0.6103324", "0.60686356", "0.6053491", "0.6024822", "0.60207194", "0.5982966", "0.5934909", "0.5930149", "0.59281075", "0.59259474", "0.59259474", "0.5906575", "0.58693856", "0.58635455", "0.58498734", "0.5847194", "0.58307284", "0.57958484", "0.57682556", "0.5759328", "0.57544404", "0.5728511", "0.5727895", "0.5726829", "0.56995255", "0.5696627", "0.56872934", "0.5686603", "0.56740123", "0.5667079", "0.56519526", "0.5642251", "0.56394845", "0.562933", "0.5617266", "0.5612262", "0.5580046", "0.557741", "0.5576161", "0.55667484", "0.5559552", "0.55516636", "0.5540736", "0.553608", "0.5534876", "0.5525734", "0.552194", "0.5486517", "0.5471473", "0.5471473", "0.54704005", "0.54704005", "0.5460734", "0.5459001", "0.5451914", "0.5448479", "0.544609", "0.54445535", "0.5444437", "0.54414195", "0.5439506", "0.54324186", "0.5425694", "0.54194003", "0.5411583", "0.539208", "0.5389592", "0.5387629", "0.53872424", "0.5387008", "0.5387008", "0.5370828", "0.53683305", "0.53658813", "0.5364442", "0.5349781", "0.5345224", "0.5343253", "0.5327988", "0.53200644", "0.5319979", "0.5317838", "0.5316672", "0.5305218", "0.52988535", "0.5295249", "0.52932805", "0.52757984", "0.5273531", "0.52542716" ]
0.6984841
0
Find nodeinfo endpoint on host
def _nodeinfo_endpoint(host): zkclient = context.GLOBAL.zk.conn nodeinfo_zk_path = '{}/{}'.format(z.ENDPOINTS, 'root') for node in zkclient.get_children(nodeinfo_zk_path): if 'nodeinfo' in node and host in node: data, _metadata = zkclient.get( '{}/{}'.format(nodeinfo_zk_path, node) ) return data.decode().split(':')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getHostInfo():", "def get_node_details(self, node):\n node_details = self.parser.find_server_by_ip(node.get('ip')) or \\\n self.parser.find_server_by_hostname(node.get('host'))\n\n return node_details", "def getHost():", "def getHost():", "def test_get_node_internal_ip_address(self):\n pass", "def lookup(self, ip, port):\n for entry in self.cqcNet.hostDict:\n node = self.cqcNet.hostDict[entry]\n if (node.ip == ip) and (node.port == port):\n return node.name\n\n logging.debug(\"CQC %s: No such node\", self.name)\n return None", "def getaddrinfo(host: str, port: int) -> List:\n ...", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)", "def test_get_host(self):\n pass", "def opencloud_fetch_host_info( hostname ):\n raise Exception(\"Opencloud support not implemented\")", "def discovery_endpoint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"discovery_endpoint\")", "def get_host(uri, token):\n if not uri.startswith('/api'):\n host, tls, port = SERVICE_MAP.get('docs')\n else:\n uri_layers = uri.split('/')\n if uri_layers[SERVICE] == 'inf':\n host, tls, port = SERVICE_MAP.get(uri_layers[SERVICE_SUBGROUP], NO_RECORD)\n else:\n # services like \"auth\" and \"link\" are their own group; only 'inf' has subgroups\n host, tls, port = SERVICE_MAP.get(uri_layers[SERVICE], NO_RECORD)\n if host == 'UNKNOWN':\n host = _user_ipam_server(token)\n return host, tls, port", "def get_node_ip(\n self,\n name,\n ):\n pass", "def describe_endpoint(EndpointName=None):\n pass", "def get(self, node, endpoint=\"\"):\n if not node:\n return\n for router in self.router.routers:\n if router.node == node:\n return router.render_peers()", "def getRemoteHost():", "def get_host_info(self, args, get_all=False):\n return None", "def fusion_api_get_hypervisor_host(self, uri=None, param='', api=None, headers=None): # pylint: disable=unused-argument\n return self.hypervisor_host.get(uri, api, headers, param='')", "def _localhost():\n s = socket\n infos = s.getaddrinfo(\n None, 0, s.AF_UNSPEC, s.SOCK_STREAM, 0, s.AI_ADDRCONFIG\n )\n (family, _, _, _, address) = infos[0]\n nodename = address[0]\n return (family, nodename)", "def _node_host_interface(self, node):\n return self._get_host_interface(node.hostname, node.address)", "def get_hostname(self):\n module = 'hostname'\n method = 'GET'\n response = self.axapi_call(module, method)\n hostname = response.json()['hostname']['value']\n print(self.device + ' Device hostname is: ' + hostname)", "def host():\n return platform.node()", "def get_node_by_server(self, ip, port, is_register=False):\n # print('List of nodes in stream ', self.get_server_address())\n # for node in self.nodes:\n # print(node.get_server_address(), node.is_register)\n node_address = (Node.parse_ip(ip), port)\n for node in self.nodes:\n if node.get_server_address() == node_address and node.is_register == is_register:\n return node\n return None", "def __get_otp_node(rest, target_node):\n nodes = rest.node_statuses()\n for node in nodes:\n if node.ip == target_node.ip:\n return node", "def getAddress(self):\n return self.hostname, self.port", "def get_hostname():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/hostname\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def default_node_detector():\n ret = []\n try:\n hostname = socket.gethostname()\n ret.append(hostname)\n except socket.error:\n pass\n\n try:\n fqdn = socket.getfqdn()\n if fqdn not in ret:\n ret.append(fqdn)\n except socket.error:\n pass\n\n if any(ret):\n return ret\n else:\n return None", "def get_nodes():\n return conf.config.get_nodes(RELATIVE_PATH_FIXTURES_HOST)", "def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)", "def hostname_get():\n try:\n return json_response.success({'hostname': hostname.determine()})\n except hostname.Error as e:\n return json_response.error(str(e)), 200", "def get_hostname(config):\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n cluster_props = redshift.describe_clusters(\n ClusterIdentifier=CLUSTER_IDENTIFIER)['Clusters'][0]\n endpoint = cluster_props[\"Endpoint\"][\"Address\"]\n return endpoint", "def __get_node_ip(self, hostname):\n try:\n command = \"grep -w \" + hostname + \" /etc/hosts | awk {'print $1'}\"\n child = subprocess.Popen(command, stdout = subprocess.PIPE, \\\n stderr = subprocess.PIPE, shell = True)\n std_out, std_err = child.communicate()\n return std_out.strip()\n except Exception as err:\n self.logger.error(\"Error occurred while getting ip of node:%s\" %err)\n return \"\"", "def GetServerHost():\n return GetHostName(True)", "def test_get_host_access(self):\n pass", "def get_nodes_info(self, ctxt):\n cctxt = self.client.prepare(server=DEFAULT_SERVER, timeout=RPC_TIMEOUT)\n return cctxt.call(ctxt, \"get_nodes_info\")", "def machine_lookup(session, hostname, public_ip = True):\n\n try:\n idx, target = hostname.split('.', 1)\n idx = int(idx) # if it is not a valid number, then it is a hostname\n hostname = target\n except:\n idx = 0\n\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None\n else:\n item.sort(key = lambda i: i['Instances'][0][\"InstanceId\"])\n\n if len(item) <= idx:\n print(\"Could not find IP address for '{}' index '{}'\".format(hostname, idx))\n return None\n else:\n item = item[idx]['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n return item['PublicIpAddress']\n elif 'PrivateIpAddress' in item and not public_ip:\n return item['PrivateIpAddress']\n else:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None", "def ex_describe_addresses_for_node(self, node):\n node_elastic_ips = self.ex_describe_addresses([node])\n return node_elastic_ips[node.id]", "def getRequestHostname():", "def RemoteEndPoint(self) -> _n_5_t_2:", "def parse_endpoint(endpoint):\n\n host, port = endpoint[0].split(':')\n return host, port", "def get_host_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_HOST_INFO)", "def get_ip_address(host):\n for h in host:\n ip = h.address['addr']\n return ip", "def get_node_ip(project, node):\n with BMI(_username, _password, project) as bmi:\n ret = bmi.get_node_ip(node)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(ret[constants.RETURN_VALUE_KEY])\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def lookup(self,hostname):\n\t\tfor block in self.blockchain.chain:\n\t\t\ttransactions = block['transactions']\n\t\t\tfor transaction in transactions:\n\t\t\t\t# print(transaction)\n\t\t\t\tif 'hostname' in transaction and transaction['hostname'] == hostname:\n\t\t\t\t\treturn (transaction['ip'],transaction['port'])\n\t\traise LookupError('No existing entry matching hostname')", "def get_monitoring_url(self):\n return \"http://{0}:{1}\".format(self.get_head_node_ip(), self.MONITOR_PORT)", "def get(self, request, nnid, wfver, desc):\n try:\n return_data = NNCommonManager().get_nn_node_info(nnid, wfver, desc)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))", "def GetNodeInfo(self, hvparams=None):\n return self.GetLinuxNodeInfo()", "def host_info(self, host):\n\n endpoint = '/Domain/Host/Info'\n\n params = {\n 'Host' : host,\n }\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response", "def RemoteEndPoint(self) -> _n_5_t_1:", "def RemoteEndPoint(self) -> _n_5_t_1:", "def get_host(name):\n raise NotImplementedError('derived class should overload me')", "def fusion_api_get_ha_nodes(self, uri=None, param='', api=None, headers=None):\n return self.ha_nodes.get(uri=uri, api=api, headers=headers, param=param)", "def test_get_host_configuration_metrics(self):\n pass", "def get_node(conn, name):\n datacenter_id = get_datacenter_id()\n\n for item in conn.list_servers(datacenter_id)[\"items\"]:\n if item[\"properties\"][\"name\"] == name:\n node = {\"id\": item[\"id\"]}\n node.update(item[\"properties\"])\n return node", "def describe_endpoint_config(EndpointConfigName=None):\n pass", "def get_info_by_node(conn, node): \n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Info WHERE NodeID=?\", (node))", "def rpc_info():", "def get_host(self):\r\n return self.host", "def _get_base_endpoint_name(self):", "def get_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def display_nodes(nodes):\n for node in nodes:\n print(f'{node.name} has an IP address of {node.address}.')", "def build_host( self, node ):\n\n try:\n\n if node.url is not None:\n \n NodeId = node.url\n if self.state[ 'probe' ].resolve_a( node ) is None: return node\n\n elif node.a_records is not None:\n\n NodeId = node.a_records[0]\n if self.state[ 'probe' ].resolve_ptr( node ) is None: return node \n\n else:\n self._log( 'build_host', 'DEBUG', 'Empty host object detected, unable to process {}'.format( node ) )\n\n # Pull Coords If Geoip Available\n if self.state[ 'geoip' ] is not None:\n self.state[ 'probe' ].pull_geoip( node, self.state[ 'geoip' ] )\n\n # Ignore everything without an exchange\n if self.state[ 'probe' ].resolve_mx( node ) is None: return None \n\n # Pull down our TXT records\n if self.state[ 'probe' ].resolve_txt( node ) is None: return node\n\n except:\n self._log( 'build_host', 'DEBUG', 'Lookup has failed for {}'.format( NodeId ) )\n\n return node", "def execute(connection_info: NodeConnectionInfo) -> dict:\n response = rpc_client.request(\n connection_info.address_rpc,\n constants.RPC_INFO_GET_PEERS\n )\n\n return response.data.result[\"peers\"]", "def getEndpoint(self, tag):\r\n if tag in self.robots:\r\n return self.robots[tag]\r\n elif tag in self.containers:\r\n return self.containers[tag]\r\n else:\r\n raise InvalidRequest('Can not get a non existent endpoint '\r\n \"'{0}'.\".format(tag))", "def get_api_endpoint(self):\n return \"tcp://%s:%s\" % (self.ip, self.port)", "def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]", "def getNodeDNS(self,node):\n data = self.connect('get','nodes/%s/dns' % (node),None)\n return data", "def get_endpoint(self, *args):\n\t\traise NotImplementedError", "def get_node_ip(self):\n return ray.services.get_node_ip_address()", "def get_node_id(host):\n\n for h in host:\n\n mac_address = h.find(\"address\").nextSibling\n address = repr(mac_address)\n if 'addrtype=\"mac\"' in address:\n host_id = address.split('\\n')[0].split('\"')[1]\n return host_id", "def endpoint_details(self) -> Optional[pulumi.Input['ServerEndpointDetailsArgs']]:\n return pulumi.get(self, \"endpoint_details\")", "def endpoint_details(self) -> Optional[pulumi.Input['ServerEndpointDetailsArgs']]:\n return pulumi.get(self, \"endpoint_details\")", "def getipaddrs(hostname):\n result = socket.getaddrinfo(hostname,None,0,socket.SOCK_STREAM)\n return [x[4][0] for x in result]", "def fetch_rest_url(self, url):\n print(\"This is a Capella run. Finding the srv domain for {}\".format(url))\n srv_info = {}\n srv_records = dns.resolver.resolve('_couchbases._tcp.' + url, 'SRV')\n for srv in srv_records:\n srv_info['host'] = str(srv.target).rstrip('.')\n srv_info['port'] = srv.port\n print(\"This is a Capella run. Srv info {}\".format(srv_info))\n return srv_info['host']", "def pull_info(task):\n\n interface_result = task.run(task=send_command, command=\"show interfaces\")\n task.host[\"facts\"] = interface_result.scrapli_response.genie_parse_output()\n interfaces = task.host[\"facts\"]\n for interface in interfaces:\n try:\n mac_addr = interfaces[interface][\"mac_address\"]\n if target == mac_addr:\n target_list.append(mac_addr)\n intf = interface\n print_info(task, intf)\n except KeyError:\n pass", "def node():\n return uname().node", "def node():\n return uname().node", "def _node(default=''):\n try:\n import socket\n except ImportError:\n # No sockets...\n return default\n try:\n return socket.gethostname()\n except OSError:\n # Still not working...\n return default", "def getHostKey(instance):\n return instance['hostname']", "def endpoint_details(self) -> pulumi.Output[Optional['outputs.ServerEndpointDetails']]:\n return pulumi.get(self, \"endpoint_details\")", "def _get_local_endpoint():\n return \"https://%s:8446\" % socket.getfqdn()", "def usage(self, host):", "def _LookupPeer(self, peer_id):\n key = self._GetServerKey(peer_id)\n values, placemark = self._dht.Get(key)\n if not values:\n raise NessieError('No peers returned for user id %r.' % peer_id)\n # NOTE(damonkohler): Need to accomodate for the possibility of multipe\n # values.\n value = self._Decrypt(values[0])\n host, port = value.split(':')\n port = int(port)\n return host, port", "def get_discovery_summary():\n pass", "def read_from_which_host(self, client):\n cursor = client.pymongo_test.test.find()\n next(cursor)\n return cursor.address", "def instance_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]},\n {\"Name\": \"instance-state-name\", \"Values\": [\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'PublicDnsName' in item:\n return item['PublicDnsName']\n return None", "def get_hosts(self, target, listener_type):", "def LocalEndpoint(self) -> _n_5_t_1:", "def compute_node_get_by_host(context, host):\n session = get_session()\n with session.begin():\n service = session.query(models.Service).\\\n filter_by(host=host, binary=\"monitor-bmc\").first()\n node = session.query(models.ComputeNode).\\\n options(joinedload('service')).\\\n filter_by(deleted=False,service_id=service.id)\n return node.first()", "def get_es_node_addresses():\n zk = KazooClient(hosts=os.environ['ZK_ADDRESS'], timeout=10.0, randomize_hosts=True)\n zk.start()\n\n esNodes = []\n try:\n\n #Fetch the list of ES cluster node names from Zookeeper\n zkPath = '/es/clusters/' + os.environ['ES_CLUSTER'] + '/json'\n children = zk.get_children(zkPath)\n\n #Retrieve the JSON metadata associated with each ephemeral ES node\n for node in children:\n zookeeperAddr = zkPath + '/' + node\n esNodeInfo = zk.get(zookeeperAddr)\n jsonData = json.loads(esNodeInfo[0])\n\n #Collect each node ip address and port\n esNodes.append(jsonData['address'] + ':' + jsonData['port'])\n\n except KazooException:\n log('Kazoo Exception: Unable to fetch Zookeeper data from ' + zkPath + ' : ' + traceback.format_exc());\n\n zk.stop()\n zk.close()\n\n log('ES Node list retrieved from Zookeeper :: ' + json.dumps(esNodes))\n\n return esNodes", "def api_endpoint():\n return 'localhost'", "def host_discover(self):\n self._scanned = True\n return self._scanner.scan(self._ips, arguments='-sP')", "def get_redshift_endpoint_info(redshift, cluster_props):\n redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]\n\n DWH_ENDPOINT = cluster_props['Endpoint']['Address']\n DWH_ROLE_ARN = cluster_props['IamRoles'][0]['IamRoleArn']\n #print(\"DWH_ENDPOINT:\", DWH_ENDPOINT)\n #print(\"DWH_ROLE_ARN:\", DWH_ROLE_ARN)\n return (DWH_ENDPOINT, DWH_ROLE_ARN)", "def fusion_api_get_hypervisor_host_profile(self, uri=None, param='', api=None, headers=None):\n return self.host_profile.get(uri, api, headers, param)", "def get_info_of_url(url):\n pass", "def obtain_port_correspondence(self):\n try:\n of_response = requests.get(self.url + \"restconf/operational/opendaylight-inventory:nodes\",\n headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n if of_response.status_code != 200:\n self.logger.warning(\"obtain_port_correspondence \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)\n self.logger.debug(\"obtain_port_correspondence \" + error_text)\n info = of_response.json()\n\n if not isinstance(info, dict):\n self.logger.error(\"obtain_port_correspondence. Unexpected response not a dict: %s\", str(info))\n raise OpenflowConnUnexpectedResponse(\"Unexpected openflow response, not a dict. Wrong version?\")\n\n nodes = info.get('nodes')\n if not isinstance(nodes, dict):\n self.logger.error(\"obtain_port_correspondence. Unexpected response at 'nodes', \"\n \"not found or not a dict: %s\", str(type(nodes)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes',not found or not a dict. \"\n \"Wrong version?\")\n\n node_list = nodes.get('node')\n if not isinstance(node_list, list):\n self.logger.error(\"obtain_port_correspondence. Unexpected response, at 'nodes':'node', \"\n \"not found or not a list: %s\", str(type(node_list)))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response, at 'nodes':'node', not found or not a list.\"\n \" Wrong version?\")\n\n for node in node_list:\n node_id = node.get('id')\n if node_id is None:\n self.logger.error(\"obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', \"\n \"not found: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:'id', not found. \"\n \"Wrong version?\")\n\n if node_id == 'controller-config':\n continue\n\n # Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value\n # of the dpid\n # In case this is not the desired switch, continue\n if self.id != node_id:\n continue\n\n node_connector_list = node.get('node-connector')\n if not isinstance(node_connector_list, list):\n self.logger.error(\"obtain_port_correspondence. Unexpected response at \"\n \"'nodes':'node'[]:'node-connector', not found or not a list: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:'node-connector', \"\n \"not found or not a list. Wrong version?\")\n\n for node_connector in node_connector_list:\n self.pp2ofi[str(node_connector['flow-node-inventory:name'])] = str(node_connector['id'])\n self.ofi2pp[node_connector['id']] = str(node_connector['flow-node-inventory:name'])\n\n node_ip_address = node.get('flow-node-inventory:ip-address')\n if node_ip_address is None:\n self.logger.error(\"obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:\"\n \"'flow-node-inventory:ip-address', not found: %s\", str(node))\n raise OpenflowConnUnexpectedResponse(\"Unexpected response at 'nodes':'node'[]:\"\n \"'flow-node-inventory:ip-address', not found. Wrong version?\")\n\n # If we found the appropriate dpid no need to continue in the for loop\n break\n\n # print self.name, \": obtain_port_correspondence ports:\", self.pp2ofi\n return self.pp2ofi\n except requests.exceptions.RequestException as e:\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"obtain_port_correspondence \" + error_text)\n raise OpenflowConnConnectionException(error_text)\n except ValueError as e:\n # ValueError in the case that JSON can not be decoded\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"obtain_port_correspondence \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)", "def get_es_node_addresses(self):\n esNodes = []\n\n #Initlate the Zookeeper Kazoo connection\n #kz_retry = KazooRetry(max_tries=3, delay=0.5, backoff=2)\n zk = KazooClient(hosts=os.environ['ZK_ADDRESS'], timeout=10.0, randomize_hosts=True)\n zk.start()\n\n try:\n\n #Fetch the list of ES cluster node names from Zookeeper\n zkPath = '/es/clusters/' + os.environ['ES_CLUSTER'] + '/json'\n children = zk.get_children(zkPath)\n\n #Retrieve the JSON metadata associated with each ephemeral ES node\n for node in children:\n zookeeperAddr = zkPath + '/' + node\n esNodeInfo = zk.get(zookeeperAddr)\n jsonData = json.loads(esNodeInfo[0])\n\n #Collect each node ip address and port\n host = {'host':jsonData['address'], 'port': int(jsonData['port'])}\n esNodes.append(host)\n\n except KazooException:\n log('Kazoo Exception: Unable to fetch Zookeeper data from ' + zkPath + ' : ' + traceback.format_exc());\n\n #Close and Zookeeper connection\n zk.stop()\n zk.close()\n\n return esNodes", "def get_config(hostname=get_hostname()):\n for doc in load():\n if doc['name'] == hostname:\n return doc\n elif hostname == \"upload_tsm\":\n return hostname\n raise LookupError(\"Unknown host %s\" % hostname)", "def main_endpoint(request, node_id):\n status = node.infotable['status']\n request.setResponseCode(status)\n\n latency = node.infotable['latency']\n if latency > 0:\n time.sleep(latency)\n\n node.make_requests()\n\n return node.node_id", "def connection_details(self):\n try:\n self.open(\"https://ipinfo.io/json\")\n self.log.debug(\"IPINFO Server returned (%s)\", self.response().content)\n res = json.loads(self.response().content.decode('utf-8'))\n except (requests.exceptions.ProxyError,\n requests.exceptions.ConnectionError):\n return {'ip': 'Unknown'}\n except ValueError:\n self.log.error(\"Server returned no JSON (%s)\", self.response().content)\n return {'ip': 'Unknown'}\n except Exception as exc: # TODO\n self.log.error(\"Unknown exception %s\", exc)\n return {'ip': 'Unknown'}\n else:\n return res" ]
[ "0.7210888", "0.66543496", "0.6595249", "0.6595249", "0.6384198", "0.62400854", "0.6236083", "0.6164553", "0.6050032", "0.6024226", "0.5987261", "0.5953469", "0.593182", "0.5915911", "0.59002453", "0.5839691", "0.58132106", "0.57908964", "0.5767918", "0.5744465", "0.57380223", "0.56938416", "0.56692904", "0.56422174", "0.5637466", "0.56145126", "0.5609382", "0.5606128", "0.5605112", "0.5604234", "0.55814123", "0.55685395", "0.55683416", "0.5553571", "0.5547536", "0.55454993", "0.55390924", "0.5531198", "0.55305094", "0.5529746", "0.5526134", "0.5519123", "0.5511199", "0.55017847", "0.5495942", "0.54950756", "0.54922515", "0.5484635", "0.54812264", "0.54812264", "0.5475731", "0.54743487", "0.54483944", "0.5440776", "0.543795", "0.5434381", "0.54296845", "0.54176205", "0.54125684", "0.54088044", "0.54027075", "0.5399018", "0.5388631", "0.538165", "0.538049", "0.5372923", "0.5371311", "0.5370528", "0.53669745", "0.53609985", "0.5359717", "0.5359717", "0.53576213", "0.53571767", "0.5356474", "0.5346774", "0.5346774", "0.53425413", "0.5331621", "0.5330638", "0.5314453", "0.5309984", "0.530618", "0.5294421", "0.5293455", "0.528882", "0.52887547", "0.52762306", "0.52575934", "0.5254994", "0.52444017", "0.5241461", "0.52413774", "0.52387846", "0.5237119", "0.52340496", "0.52308685", "0.52234584", "0.52200997", "0.52190673" ]
0.7731481
0
dropout + batch norm + l1_l2
def architecture_CONV_FC_batch_norm_dropout_L1_l2( X, nbclasses, nb_conv=1, nb_fc=1, kernel_initializer="random_normal" ): # input size width, height, depth = X.shape input_shape = (height, depth) # parameters of the architecture l1_l2_rate = 1.0e-3 dropout_rate = 0.5 conv_kernel = 3 conv_filters = 64 nbunits_fc = 128 activation = relu kernel_initializer = kernel_initializer model = Sequential( name=f"""{str(nb_conv)}__CONV_k{str(conv_kernel)}_ {str(nb_fc)}_initializer_{kernel_initializer}_ _FC128_bn_d_{str(dropout_rate)}""" ) model.add( Conv1D( input_shape=input_shape, activation=activation, kernel_initializer=kernel_initializer, kernel_regularizer=l1_l2(l1_l2_rate), kernel_size=conv_kernel, filters=conv_filters, ) ) model.add(BatchNormalization()) # if more covolutional layers are defined in parameters if nb_conv > 1: for _layer in range(nb_conv): model.add( Conv1D( kernel_size=conv_kernel, filters=conv_filters, activation=activation, kernel_regularizer=l1_l2(l1_l2_rate), ) ) model.add(BatchNormalization()) # Flatten + FC layers model.add(Flatten()) for _layer in range(nb_fc): model.add( Dense( nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation ) ) model.add(Dropout(dropout_rate)) model.add(Dense(nbclasses, activation=softmax)) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def architecture_CONV_FC_batch_norm_dropout_L1_l2_LEAKY_ReLU(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_CONV_k_{str(conv_kernel)}_\n {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)}\n _LEAKY_ReLU\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(Dense(nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate)))\n model.add(LeakyReLU())\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_TANH(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = tanh\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_\n CONV_k_{str(conv_kernel)}_\n {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)}\n _TANH\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n \n dims = [input_dim] + hidden_dims + [num_classes]\n\n # initialise all parameters (weight, bias, gamma, beta)\n for i in range(len(dims)-1):\n w = 'W' + str(i+1)\n b = 'b' + str(i+1)\n self.params[w] = np.random.randn(dims[i], dims[i+1])*weight_scale\n self.params[b] = np.zeros(dims[i+1])\n \n if self.use_batchnorm:\n for i in range(len(dims)-2):\n #no gamma and beta for last layer\n gamma = 'gamma' + str(i+1)\n beta = 'beta' + str(i+1)\n self.params[gamma] = np.ones(dims[i+1])\n self.params[beta] = np.zeros(dims[i+1])\n \n \n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def optimize(self):\n self.u = np.random.uniform(-1, 1, (self.batchsize, 288, 1, 1))\n self.l2 = torch.from_numpy(self.u).float()\n self.n = torch.randn(self.batchsize, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n print(self.l1.shape,99999999999999999999999999999999999)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def __init__(\n self,\n hidden_dims,\n input_dim=3 * 32 * 32,\n num_classes=10,\n dropout=1,\n normalization=None,\n reg=0.0,\n weight_scale=1e-2,\n dtype=np.float32,\n seed=None,\n ):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n Din, Dout = input_dim, hidden_dims[0]\n for i in range(self.num_layers):\n self.params['W' + str(i+1)] = np.random.normal(scale=weight_scale, size=(Din, Dout))\n self.params['b' + str(i+1)] = np.zeros((Dout,))\n Din = Dout\n if i < len(hidden_dims) - 1:\n Dout = hidden_dims[i+1]\n if i == len(hidden_dims) - 1:\n Dout = num_classes\n \n # BN params initialization\n if self.normalization != None:\n for i in range(self.num_layers - 1):\n self.params['gamma' + str(i+1)] = np.ones(shape=(hidden_dims[i]))\n self.params['beta' + str(i+1)] = np.zeros(shape=(hidden_dims[i]))\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {\"mode\": \"train\", \"p\": dropout}\n if seed is not None:\n self.dropout_param[\"seed\"] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization == \"batchnorm\":\n self.bn_params = [{\"mode\": \"train\"} for i in range(self.num_layers - 1)]\n if self.normalization == \"layernorm\":\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)", "def build_generator(latent_dim=100):\n # The weight initialization and the slope are chosen to accord with the\n # Parameters in the paper. I only change padding when it seems neccesary to\n # to mantain adequate dimensons. \n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.3\n \n inputs = keras.Input(shape=(1,1,100))\n # First convolutional layer\n x = Conv2DTranspose(\n 1024, \n kernel_size=(4,4), \n strides=1, \n kernel_initializer=weight_initializer,\n padding='valid',\n use_bias=False\n )(inputs)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Second convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 512,\n kernel_size = 4,\n strides = (2,2),\n padding = 'same',\n use_bias = False\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 256,\n kernel_size = 5,\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 128,\n kernel_size = (5,5),\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fifth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 3,\n kernel_size = (5,5),\n use_bias=False,\n strides = (2,2),\n padding = 'same',\n activation='tanh'\n )(x)\n model = keras.Model(inputs=inputs, outputs=x)\n return model", "def tf_l2_loss(Gt, pred,_axis):\n l2diff = tf.subtract(Gt, pred)\n l2loss = tf.reduce_sum(tf.square(l2diff), axis=_axis)\n l2loss = tf.maximum(l2loss, 1e-10)\n l2loss = tf.sqrt(l2loss) # (n_batch, n_class) -> (n_batch, 1)\n\n return l2loss", "def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n if type(hidden_dims) != list:\n raise ValueError('hidden_dim has to be a list')\n\n self.L = len(hidden_dims) + 1\n self.N = input_dim\n self.C = num_classes\n dims = [self.N] + hidden_dims + [self.C]\n Ws = {'W' + str(i + 1):\n weight_scale * np.random.randn(dims[i], dims[i + 1]) for i in range(len(dims) - 1)}\n b = {'b' + str(i + 1): np.zeros(dims[i + 1])\n for i in range(len(dims) - 1)}\n\n self.params.update(b)\n self.params.update(Ws)\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)", "def masked_l2(preds, actuals, mask):\n loss = tf.nn.l2(preds, actuals)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)", "def loss_false(code_batch, k=1):\n\n _, n_latent = code_batch.get_shape()\n\n # changing these parameters is equivalent to changing the strength of the\n # regularizer, so we keep these fixed (these values correspond to the\n # original values used in Kennel et al 1992).\n rtol = 20.0\n atol = 2.0\n # k_frac = 0.01\n # n_batch = tf.cast(tf.keras.backend.shape(code_batch)[0], tf.float32)\n # assert False, n_batch\n # k = max(1, int(k_frac * n_batch))\n\n ## Vectorized version of distance matrix calculation\n tri_mask = tf.linalg.band_part(tf.ones((n_latent, n_latent), tf.float32), -1, 0)\n batch_masked = tf.multiply(tri_mask[:, tf.newaxis, :], code_batch[tf.newaxis, ...])\n X_sq = tf.reduce_sum(batch_masked * batch_masked, axis=2, keepdims=True)\n pdist_vector = (\n X_sq\n + tf.transpose(X_sq, [0, 2, 1])\n - 2 * tf.matmul(batch_masked, tf.transpose(batch_masked, [0, 2, 1]))\n )\n all_dists = pdist_vector\n all_ra = tf.sqrt(\n (1 / (tf.range(1, 1 + n_latent, dtype=tf.float32)))\n * tf.squeeze(\n tf.reduce_sum(\n tf.square(tf.math.reduce_std(batch_masked, axis=1, keepdims=True)),\n axis=2,\n )\n )\n )\n\n # Avoid singularity in the case of zeros\n all_dists = tf.clip_by_value(all_dists, 1e-14, tf.reduce_max(all_dists))\n\n # inds = tf.argsort(all_dists, axis=-1)\n _, inds = tf.math.top_k(-all_dists, int(k + 1))\n # top_k currently faster than argsort because it truncates matrix\n\n neighbor_dists_d = tf.gather(all_dists, inds, batch_dims=-1)\n neighbor_new_dists = tf.gather(all_dists[1:], inds[:-1], batch_dims=-1)\n\n # Eq. 4 of Kennel et al.\n scaled_dist = tf.sqrt(\n (neighbor_new_dists - neighbor_dists_d[:-1]) / neighbor_dists_d[:-1]\n )\n\n # Kennel condition #1\n is_false_change = scaled_dist > rtol\n # Kennel condition 2\n is_large_jump = neighbor_new_dists > atol * all_ra[:-1, tf.newaxis, tf.newaxis]\n\n is_false_neighbor = tf.math.logical_or(is_false_change, is_large_jump)\n total_false_neighbors = tf.cast(is_false_neighbor, tf.int32)[..., 1 : (k + 1)]\n\n # Pad zero to match dimensionality of latent space\n reg_weights = 1 - tf.reduce_mean(\n tf.cast(total_false_neighbors, tf.float64), axis=(1, 2)\n )\n reg_weights = tf.pad(reg_weights, [[1, 0]])\n\n # Find average batch activity\n activations_batch_averaged = tf.sqrt(tf.reduce_mean(tf.square(code_batch), axis=0))\n\n # L2 Activity regularization\n activations_batch_averaged = tf.cast(activations_batch_averaged, tf.float64)\n loss = tf.reduce_sum(tf.multiply(reg_weights, activations_batch_averaged))\n\n return tf.cast(loss, tf.float32)", "def layer_norm_and_dropout(input_tensor, dropout_prob, name=None, dropout_name=None):\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob, dropout_name=dropout_name)\n return output_tensor", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_SIGMOID(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = sigmoid\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_CONV_k_\n {str(conv_kernel)}_{str(nb_fc)}\n _FC128_bn_d_{str(dropout_rate)}\n _SIGMOID\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #\n # initialized from a normal distribution centered at 0 with standard #\n # deviation equal to weight_scale. Biases should be initialized to zero. #\n # #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n dimension = [input_dim] + hidden_dims + [num_classes]\n for i in range(1, self.num_layers+1):\n self.params['W{0}'.format(i)] = weight_scale * np.random.randn(dimension[i-1], dimension[i])\n self.params['b{0}'.format(i)] = np.zeros(dimension[i])\n\n if self.normalization in ['batchnorm', 'layernorm']:\n self._batchnormInit()\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def batchnorm_init(m, kernelsize=3):\r\n n = kernelsize**2 * m.num_features\r\n m.weight.data.normal_(0, math.sqrt(2. / (n)))\r\n m.bias.data.zero_()", "def _optimization(dataset1, dataset2, nb_epochs=3000):\n\n x1_mean = dataset1['data'].mean()\n x1_std = dataset1['data'].std()\n x1 = (dataset1['data'] - x1_mean) / (x1_std)\n y1 = dataset1['labels']\n Y1 = dataset1['hot_labels']\n\n x2_mean = dataset2['data'].mean()\n x2_std = dataset2['data'].std()\n x2 = (dataset2['data'] - x2_mean) / (x2_std)\n\n x_model1 = Input(x1.shape[1:])\n y_model1 = Dropout(0.1)(x_model1)\n y_model1 = Dense(50, activation='relu')(x_model1)\n y_model1 = Dropout(0.2)(y_model1)\n y_model1 = Dense(50, activation='relu')(y_model1)\n out_model1 = Dense(len(np.unique(y1)), activation='softmax')(y_model1)\n\n model1 = Model(input=x_model1, output=out_model1)\n\n optimizer = keras.optimizers.Adadelta()\n model1.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=200, min_lr=0.1)\n\n hist = model1.fit(x1, Y1, batch_size=x1.shape[0], nb_epoch=nb_epochs, verbose=1, shuffle=True, callbacks=[reduce_lr])\n\n dataset2_new_labels = []\n\n for i in range(x2.shape[0]):\n xTrain = x2[i,:].reshape((1,x2.shape[1]))\n dataset2_new_labels.append(np.argmax(model1.predict(xTrain, batch_size=1)))\n\n # Print the testing results which has the l in range(x_train.shape[0]):\n # for i in range(len(x_test1)):\n # xTest = x_test1[i,:].reshape((1,2048))\n # print((np.argmax(model.predict(xTest, batch_size=1)), y_test1[i]))\n # log = pd.DataFrame(hist.history)\n # print(\"saving results for 100 nodes\" + _MODE + fname)\n # log.to_json('accuracies/accuracy_100_' + _MODE + fname + '.json')\n\n # with open('Text_Files/' + fname + '_results.txt', 'w') as text_file:\n # text_file.write(fname + '<<<=====>>>' + str(max(log.val_acc.values)))\n\n # assert 2==1\n\n x_model1 = []\n y_model1 = []\n out_model1 = []\n model1 = []\n\n return dataset2_new_labels", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n input_size = input_dim\n for i in range(len(hidden_dims)):\n output_size = hidden_dims[i]\n self.params['W' + str(i+1)] = np.random.randn(input_size,output_size) * weight_scale\n self.params['b' + str(i+1)] = np.zeros(output_size)\n if self.normalization:\n self.params['gamma' + str(i+1)] = np.ones(output_size)\n self.params['beta' + str(i+1)] = np.zeros(output_size)\n input_size = output_size # 下一层的输入\n # 输出层,没有BN操作\n self.params['W' + str(self.num_layers)] = np.random.randn(input_size,num_classes) * weight_scale\n self.params['b' + str(self.num_layers)] = np.zeros(num_classes)\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def _fit_apgl(x, mask, lmbd,\n max_iter=100, L=1e-3, beta=0.5,\n tol=1e-3, print_loss=False):\n # init\n n1, n2 = x.shape\n rdm = RandomState(123)\n theta = rdm.randn(n1, n2) # natural parameter\n thetaOld = theta\n alpha = 1\n alphaOld = 0\n\n # main loop\n loss = _cross_entropy(x, mask, theta) + lmbd * \\\n np.linalg.norm(theta, ord='nuc')\n iteration = []\n for i in range(int(max_iter)):\n if print_loss:\n print(f'Epoch {i}, loss {loss:.3f}')\n iteration.append(loss)\n lossOld = loss\n # nesterov extropolation\n A = theta + (alphaOld - 1) / alpha * (theta - thetaOld)\n for _ in range(50):\n S = A - L * _gradient(x, mask, A)\n thetaNew = svt(S, lmbd * L)\n ce = _cross_entropy(x, mask, thetaNew)\n if ce < _bound(x, mask, thetaNew, theta, L):\n break\n else:\n L = beta * L\n thetaOld = theta\n theta = thetaNew\n alphaOld = alpha\n alpha = (1 + np.sqrt(4 + alpha ** 2)) / 2\n loss = ce + lmbd * np.linalg.norm(theta, ord='nuc')\n if i == max_iter - 1:\n print(f'Reach max iteration {i+1}')\n if np.abs(lossOld - loss) < tol:\n break\n\n return theta, np.array(iteration)", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel_old, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n # norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def L2X(train = True):\n print('Loading dataset...') \n x_train, y_train, x_val, y_val, id_to_word = load_data()\n #pred_train = np.load('data/pred_train.npy')\n #pred_val = np.load('data/pred_val.npy') \n print('Creating model...')\n\n # P(S|X)\n with tf.variable_scope('selection_model'):\n X_ph = Input(shape=(maxlen,), dtype='int32')\n\n logits_T_grp = construct_gumbel_selector(X_ph, max_features, embedding_dims, maxlen) # bs, max_len * num_groups\n tau = 0.5 \n T = Sample_Concrete(tau, k, num_feature=maxlen, num_groups=num_groups)(logits_T_grp)\n\n T = Reshape((maxlen, num_groups))(T)\n T = Permute((2, 1))(T) # bs, num_groups, max_len\n\n # q(X_S)\n with tf.variable_scope('prediction_model'):\n emb2 = Embedding(max_features, embedding_dims, \n input_length=maxlen)(X_ph)\n # emb2 bs, max_len, 50\n # apply the matrix trick as before\n # here the output size of matmul layer is different from before\n net = matmul_layer([T, emb2]) # bs, num_groups, 50\n #print(net.shape)\n net = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'merge_channel')(net) # bs, num_groups, 1\n\n # net = Mean(net) # bs, 50\n input_group = Flatten()(net) # bs, num_groups\n # num_groups = K.int_shape(input_group)[1]\n # here we add instance wise f-s again!!!!\n net = Dense(100, activation='relu', name = 's/dense1',\n kernel_regularizer=regularizers.l2(1e-3))(input_group)\n net = Dense(100, activation='relu', name = 's/dense2',\n kernel_regularizer=regularizers.l2(1e-3))(net)\n logits = Dense(num_groups)(net)\n\n\n\n\n # A tensor of shape, [batch_size, max_sents, 100]\n samples = Sample_Concrete_Original(tau, num_vital_group, name='group_importance')(logits)\n new_input_group = Multiply()([input_group, samples]) \n\n\n\n net = Dense(hidden_dims, activation='relu')(new_input_group)\n preds = Dense(2, activation='softmax', \n name = 'new_dense')(net)\n\n\n model = Model(inputs=X_ph, \n outputs=preds)\n model.summary()\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',#optimizer,\n metrics=['acc']) \n #train_acc = np.mean(np.argmax(pred_train, axis = 1)==np.argmax(y_train, axis = 1))\n #val_acc = np.mean(np.argmax(pred_val, axis = 1)==np.argmax(y_val, axis = 1))\n #print('The train and validation accuracy of the original model is {} and {}'.format(train_acc, val_acc))\n\n if train:\n filepath=\"models/l2x.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint] \n st = time.time()\n model.fit(x_train, y_train, \n validation_data=(x_val, y_val), \n callbacks = callbacks_list,\n epochs=epochs, batch_size=batch_size)\n duration = time.time() - st\n print('Training time is {}'.format(duration)) \n\n model.load_weights('models/l2x.hdf5', by_name=True) \n\n pred_model = Model(X_ph, [T, samples]) \n pred_model.summary()\n pred_model.compile(loss='categorical_crossentropy', \n optimizer='adam', metrics=['acc']) \n\n st = time.time()\n #scores = pred_model.predict(x_val, \n # verbose = 1, batch_size = batch_size)[:,:,0] \n #scores = np.reshape(scores, [scores.shape[0], maxlen])\n scores_t, group_importances_t = pred_model.predict(x_train, verbose = 1, batch_size = batch_size)\n scores_v, group_importances_v = pred_model.predict(x_val, verbose = 1, batch_size = batch_size)\n return scores_t, group_importances_t, scores_v, group_importances_v, x_val", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.LeakyReLU(alpha=0.1))", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def EmbeddingL1RegularizationUpdate(embedding_variable, net_input, learn_rate, l1_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n sign_inside = tf.sign(tf.matmul(net_input, embedding_variable))\n where = tf.equal(sign_inside, 0)\n # should replace 0's with random in [-1, 1] for an better (not necessarily acute)implementation\n grad = l1_reg_val * tf.matmul(tf.transpose(net_input), sign_inside)\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l1_reg_val * tf.norm(tf.matmul(net_input, embedding_variable), ord=1)\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l1 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def validation_dubo(latent_dim, covar_module0, covar_module1, likelihood, train_xt, m, log_v, z, P, T, eps):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n v = torch.exp(log_v)\n torch_dtype = torch.double\n x_st = torch.reshape(train_xt, [P, T, train_xt.shape[1]]).to(device)\n stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=1)\n K0xz = covar_module0(train_xt, z).evaluate().to(device)\n K0zz = (covar_module0(z, z).evaluate() + eps * torch.eye(z.shape[1], dtype=torch_dtype).to(device)).to(device)\n LK0zz = torch.cholesky(K0zz).to(device)\n iK0zz = torch.cholesky_solve(torch.eye(z.shape[1], dtype=torch_dtype).to(device), LK0zz).to(device)\n K0_st = covar_module0(stacked_x_st, stacked_x_st).evaluate().transpose(0,1)\n B_st = (covar_module1(stacked_x_st, stacked_x_st).evaluate() + torch.eye(T, dtype=torch.double).to(device) * likelihood.noise_covar.noise.unsqueeze(dim=2)).transpose(0,1)\n LB_st = torch.cholesky(B_st).to(device)\n iB_st = torch.cholesky_solve(torch.eye(T, dtype=torch_dtype).to(device), LB_st)\n\n dubo_sum = torch.tensor([0.0]).double().to(device)\n for i in range(latent_dim):\n m_st = torch.reshape(m[:, i], [P, T, 1]).to(device)\n v_st = torch.reshape(v[:, i], [P, T]).to(device)\n K0xz_st = torch.reshape(K0xz[i], [P, T, K0xz.shape[2]]).to(device)\n iB_K0xz = torch.matmul(iB_st[i], K0xz_st).to(device)\n K0zx_iB_K0xz = torch.matmul(torch.transpose(K0xz[i], 0, 1), torch.reshape(iB_K0xz, [P*T, K0xz.shape[2]])).to(device)\n W = K0zz[i] + K0zx_iB_K0xz\n W = (W + W.T) / 2\n LW = torch.cholesky(W).to(device)\n logDetK0zz = 2 * torch.sum(torch.log(torch.diagonal(LK0zz[i]))).to(device)\n logDetB = 2 * torch.sum(torch.log(torch.diagonal(LB_st[i], dim1=-2, dim2=-1))).to(device)\n logDetW = 2 * torch.sum(torch.log(torch.diagonal(LW))).to(device)\n logDetSigma = -logDetK0zz + logDetB + logDetW\n iB_m_st = torch.solve(m_st, B_st[i])[0].to(device)\n qF1 = torch.sum(m_st*iB_m_st).to(device)\n p = torch.matmul(K0xz[i].T, torch.reshape(iB_m_st, [P * T])).to(device)\n qF2 = torch.sum(torch.triangular_solve(p[:,None], LW, upper=False)[0] ** 2).to(device)\n qF = qF1 - qF2\n tr = torch.sum(iB_st[i] * K0_st[i]) - torch.sum(K0zx_iB_K0xz * iK0zz[i])\n logDetD = torch.sum(torch.log(v[:, i])).to(device)\n tr_iB_D = torch.sum(torch.diagonal(iB_st[i], dim1=-2, dim2=-1)*v_st).to(device)\n D05_iB_K0xz = torch.reshape(iB_K0xz*torch.sqrt(v_st)[:,:,None], [P*T, K0xz.shape[2]])\n K0zx_iB_D_iB_K0zx = torch.matmul(torch.transpose(D05_iB_K0xz,0,1), D05_iB_K0xz).to(device)\n tr_iB_K0xz_iW_K0zx_iB_D = torch.sum(torch.diagonal(torch.cholesky_solve(K0zx_iB_D_iB_K0zx, LW))).to(device)\n tr_iSigma_D = tr_iB_D - tr_iB_K0xz_iW_K0zx_iB_D\n dubo = 0.5*(tr_iSigma_D + qF - P*T + logDetSigma - logDetD + tr)\n dubo_sum = dubo_sum + dubo\n return dubo_sum", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache", "def calculate2_simpleKL_norm(pred, truth, rnd=0.01):\n return 1 - calculate2_simpleKL(pred, truth, rnd=rnd) / 4000", "def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')", "def train(self,D,batch_size=64,iter=10000,l2_reg=0.01,noise_level=0.1,debug=False):\n sess = tf.get_default_session()\n\n idxes = np.random.permutation(len(D))\n train_idxes = idxes[:int(len(D)*0.8)]\n valid_idxes = idxes[int(len(D)*0.8):]\n\n def _batch(idx_list,add_noise):\n batch = []\n\n if len(idx_list) > batch_size:\n idxes = np.random.choice(idx_list,batch_size,replace=False)\n else:\n idxes = idx_list\n\n for i in idxes:\n batch.append(D[i])\n\n b_x,b_y,b_l = zip(*batch)\n x_split = np.array([len(x) for x in b_x])\n y_split = np.array([len(y) for y in b_y])\n b_x,b_y,b_l = np.concatenate(b_x,axis=0),np.concatenate(b_y,axis=0),np.array(b_l)\n\n if add_noise:\n b_l = (b_l + np.random.binomial(1,noise_level,batch_size)) % 2 #Flip it with probability 0.1\n\n return b_x,b_y,x_split,y_split,b_l\n\n for it in tqdm(range(iter),dynamic_ncols=True):\n b_x,b_y,x_split,y_split,b_l = _batch(train_idxes,add_noise=True)\n\n loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l,\n self.l2_reg:l2_reg,\n })\n\n if debug:\n if it % 100 == 0 or it < 10:\n b_x,b_y,x_split,y_split,b_l = _batch(valid_idxes,add_noise=False)\n valid_acc = sess.run(self.acc,feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l\n })\n tqdm.write(('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc)))\n\n #if valid_acc >= 0.95:\n # print('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc))\n # print('early termination@%08d'%it)\n # break", "def configure_batchnorm(x, model):\n bs = x.size(0)\n # train mode, because dent optimizes the model to minimize entropy\n model.train()\n # disable grad, to (re-)enable only what dent updates\n model.requires_grad_(False)\n # configure norm for dent updates:\n # enable grad + keep statisics + repeat affine params\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.weight = nn.Parameter(m.ckpt_weight.unsqueeze(0).repeat(bs, 1))\n m.bias = nn.Parameter(m.ckpt_bias.unsqueeze(0).repeat(bs, 1))\n m.requires_grad_(True)\n return model", "def spatial_batchnorm_backward(dout, cache):\r\n \tN, C, H, W = dout.shape\r\n dout_new = dout.transpose(0, 2, 3, 1).reshape(N*H*W, C)\r\n dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)\r\n dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)\r\n\r\n return dx, dgamma, dbeta", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))", "def scipy_minus_gradient(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n if perceptron:\n perceptron._gradient_iter += 1\n g = None\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n first_term = vector_graphs[correct_rows,:].sum(axis=0)\n all_scores = vector_graphs * w\n all_probs = []\n for batch in batches:\n batch_scores = all_scores[batch]\n S = logsumexp(batch_scores)\n all_probs.append(np.exp(batch_scores - S))\n all_probs = numpy.hstack(all_probs)\n second_term = all_probs * vector_graphs\n if g is None:\n g = second_term - first_term\n else:\n g = g + second_term - first_term\n index += 1\n if index % 100 == 0:\n print('Gradient '+str(index)+' processed')\n g = numpy.ndarray.flatten(numpy.asarray(g)) / len(all_vector_graphs)\n if sigma != None:\n g = g + sigma * w\n print('Gradient norm:'+str(scipy.linalg.norm(g)))\n sys.stdout.flush()\n if perceptron and perceptron._model_pickle:\n if perceptron._gradient_iter % 5 == 0:\n perceptron._weights = numpy.reshape(w,(1,perceptron._num_features))\n perceptron.save(perceptron._model_pickle+'_'+str(perceptron._gradient_iter))\n return g", "def train_LR(self, X, y, eta=1e-3, batch_size=1, num_iters=1000) :\n loss_history = []\n N,d = X.shape\n for t in np.arange(num_iters):\n X_batch = None\n y_batch = None\n # ================================================================ #\n # YOUR CODE HERE:\n # Sample batch_size elements from the training data for use in gradient descent. \n # After sampling, X_batch should have shape: (batch_size,1), y_batch should have shape: (batch_size,)\n # The indices should be randomly generated to reduce correlations in the dataset. \n # Use np.random.choice. It is better to user WITHOUT replacement.\n # ================================================================ #\n \n # sample indices without replacement\n batch_idx = np.random.choice(N, batch_size, replace = False)\n X_batch = X[batch_idx]\n y_batch = y[batch_idx]\n \n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss = 0.0\n grad = np.zeros_like(self.w)\n # ================================================================ #\n # YOUR CODE HERE: \n # evaluate loss and gradient for batch data\n # save loss as loss and gradient as grad\n # update the weights self.w\n # ================================================================ #\n \n # compute the loss and gradient\n # loss_and_grad will take responsible for these\n \n loss, grad = self.loss_and_grad(X_batch, y_batch)\n \n self.w = self.w - eta * grad\n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss_history.append(loss)\n return loss_history, self.w", "def neg_sampling_loss_and_gradient(\n center_word_vec,\n outside_word_idx,\n outside_vectors,\n dataset,\n K=10\n):\n\n # Negative sampling of words is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n neg_sample_word_indices = get_negative_samples(outside_word_idx, dataset, K)\n indices = [outside_word_idx] + neg_sample_word_indices\n\n ### YOUR CODE HERE\n \n outside_word_vector = outside_vectors[outside_word_idx]\n outside_words_dot_center_word = outside_word_vector.dot(center_word_vec)\n \n neg_samples_vector = outside_vectors[neg_sample_word_indices]\n neg_samples_dot_center_word = neg_samples_vector.dot(center_word_vec)\n \n sigmoid_outside_dot = sigmoid(outside_words_dot_center_word)\n sigmoid_negative_dot = sigmoid(-neg_samples_dot_center_word)\n\n loss = -np.log(sigmoid_outside_dot) -np.sum(np.log(sigmoid_negative_dot))\n \n grad_center_vec = \\\n (sigmoid_outside_dot - 1) * outside_word_vector + \\\n np.sum((1 - sigmoid_negative_dot)[:, np.newaxis] * neg_samples_vector, axis = 0)\n \n grad_outside_vecs = np.zeros_like(outside_vectors)\n grad_outside_vecs[outside_word_idx] = (sigmoid_outside_dot - 1) * center_word_vec\n \n for i, neg_index in enumerate(neg_sample_word_indices):\n grad_outside_vecs[neg_index] += \\\n (1 - sigmoid_negative_dot[i]) * center_word_vec\n\n ### END YOUR CODE\n\n return loss, grad_center_vec, grad_outside_vecs", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n CustomBatchNormalization(),\n LeakyReLU(alpha=0.1))", "def l2_normalization(inputs, scaling=True):\n with tf.variable_scope('L2Normalization'):\n inputs_shape = inputs.get_shape()\n channel_shape = inputs_shape[-1:]\n # cal l2_norm on channel\n outputs = tf.nn.l2_normalize(inputs, 3, epsilon=1e-12)\n # scalling\n if scaling:\n # scale.shape == channel.shape\n scale = slim.variable('gamma', channel_shape, tf.float32, tf.constant_initializer(1.0))\n outputs = tf.multiply(outputs, scale)\n\n return outputs", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n gamma, x_hat, num, denom, eps, sample_variance = cache\n N, D = dout.shape\n \n dbeta = np.sum(dout, axis=0)\n dyx_hat = dout\n dgamma = np.sum(dyx_hat*x_hat, axis=0)\n dx_hat = gamma*dyx_hat\n ddenom = np.sum(num*dx_hat, axis=0)\n dmu1 = (1/denom)*dx_hat\n dsqvar = ddenom*(-1)*(1/(denom**2))\n dvar = 0.5*((sample_variance+eps)**(-0.5))*dsqvar\n dsq = (1/N)*np.ones((N,D))*dvar\n dmu2 = 2*num*dsq\n dmu = (-1)*np.sum(dmu1+dmu2, axis=0)\n dx1 = dmu1 + dmu2\n dx2 = (1/N)*np.ones((N,D))*dmu\n dx = dx1+dx2\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def test_keras_unsafe_l2_norm():\n model, X, y, Xval, yval = make_small_model()\n\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model.compile(loss=loss, optimizer=None)\n\n isDP, msg = safekeras.check_optimizer_is_DP(model.optimizer)\n assert isDP, \"failed check that optimizer is dP\"\n\n model.l2_norm_clip = 0.9\n\n model.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n\n DPused, msg = safekeras.check_DP_used(model.optimizer)\n assert (\n DPused\n ), \"Failed check that DP version of optimiser was actually used in training\"\n\n loss, acc = model.evaluate(X, y)\n expected_accuracy = UNSAFE_ACC\n assert round(acc, 6) == round(\n expected_accuracy, 6\n ), \"failed check that accuracy is as expected\"\n\n msg, disclosive = model.preliminary_check()\n correct_msg = (\n \"WARNING: model parameters may present a disclosure risk:\"\n \"\\n- parameter l2_norm_clip = 0.9 identified as less than the recommended \"\n \"min value of 1.0.\"\n )\n assert msg == correct_msg, \"failed check correct warning message\"\n assert disclosive is True, \"failed check disclosive is True\"", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def darknet_CBL(*args, **kwargs):\n\n no_bias_kwargs = {'use_bias': False} # 没懂为啥用 no_bias\n no_bias_kwargs.update(kwargs)\n return compose(\n darknet_Conv2D(*args, **no_bias_kwargs),\n custom_batchnormalization(),\n LeakyReLU(alpha=0.1)\n )", "def reset_ref_batch(self, batch):\n with torch.no_grad():\n self.labels = batch[1]\n self.batch = batch[0]\n _, self.r_act_2, _ = self.inference_net(self.batch.cuda(self.gpu_id))\n\n self.mu2_c0, self.sigma2_c0 = calc_stats(self.r_act_2[self.labels.view(-1) == 0])\n self.mu2_c1, self.sigma2_c1 = calc_stats(self.r_act_2[self.labels.view(-1) == 1])", "def l2_norm(input_x, epsilon=1e-12):\n input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)\n return input_x_norm", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.):\n sigma2 = sigma * sigma\n\n inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))\n\n smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)\n smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)\n smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)\n smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),\n tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))\n\n outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)\n\n return outside_mul", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n x, mu, sigma, gamma, beta = cache\n N = dout.shape[0]\n X_mu = x - mu\n var_inv = 1./sigma\n \n dX_norm = dout * gamma\n dvar = np.sum(dX_norm * X_mu,axis=0) * -0.5 * sigma**(-3)\n dmu = np.sum(dX_norm * -var_inv ,axis=0) + dvar * 1/N * np.sum(-2.* X_mu, axis=0)\n\n dx = (dX_norm * var_inv) + (dmu / N) + (dvar * 2/N * X_mu)\n dbeta = np.sum(dout, axis=0)\n dgamma = np.sum(dout * X_mu/sigma, axis=0)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def relaxed_ba_bias(Xinput, L, lamb, beta, max_iter=300):\n X = Xinput.T # X: n_samples x n_dim\n D, m = X.shape\n B = np.sign(np.random.rand(L, m))\n c1 = np.random.rand(L,1)\n c2 = np.random.rand(D,1)\n\n for i in range(max_iter):\n # given B, compute W1\n W1 = lamb*np.matmul(np.matmul((B - c1), X.T), \\\n np.linalg.inv(lamb*np.matmul(X,X.T) + beta*np.eye(D)))\n\n # given B, compute W2\n W2 = np.matmul( np.matmul((X-c2), B.T), \\\n np.linalg.inv(np.matmul(B,B.T) + beta*np.eye(L)))\n\n # compute c1\n c1 = (1.0/m)*np.matmul(B - np.matmul(W1, X), np.ones((m,1)))\n # compute c2\n c2 = (1.0/m)*np.matmul(X - np.matmul(W2, B), np.ones((m,1)))\n\n # given W1, W2, c1, c2, compute B\n Xtmp = X - c2\n H = np.matmul(W1, X) + c1\n B = learn_B_new(Xtmp.T, W2.T, B.T, H.T, lamb);\n\n B = B.T\n\n # X_reconst = np.matmul(W2, np.sign(np.matmul(W1, X) + c1)) + c2\n # mse = np.mean(np.square(X_reconst - X))\n # print('mse {}'.format(mse))\n return W2, W1, c2, c1, B", "def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n padding_type='reflect'):\n assert (n_blocks >= 0)\n super(DenseGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n dense_features = ngf * mult\n dense_features = dense_features + 6 * 32\n for i in range(n_blocks):\n model += [DenseBlock(num_layers=6, num_input_features=ngf * mult, bn_size=4, growth_rate=32, drop_rate=0,\n norm_layer=norm_layer)]\n model += [norm_layer(dense_features), nn.ReLU(inplace=True),\n nn.Conv2d(dense_features, ngf * mult, kernel_size=1, stride=1, bias=use_bias),\n ]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*model)", "def build_nn_experimental(dropout: float=0.3, verbosity: int=0):\n # Setting Up Input layer\n input_q1 = Input(shape=(512,))\n input_q2 = Input(shape=(512,))\n \n # Network for 1st input Dense 128 --> Relu --> Dense 264 --> Relu\n input1_layer = Dense(512, activation='relu')(input_q1)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Dense(512, activation='relu')(input1_layer)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Model(inputs=input_q1, outputs=input1_layer)\n \n # Network for 2st input Dense 128 --> Relu --> Dense 264 --> Relu\n input2_layer = Dense(512, activation='relu')(input_q2)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Dense(512, activation='relu')(input2_layer)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Model(inputs=input_q2, outputs=input2_layer)\n \n merged = concatenate([input1_layer.output, input2_layer.output])\n\n # Fully connected layer & final prediction layer\n pred_layer = Dense(4096, activation='relu')(merged)\n pred_layer = Dense(1024, activation='relu')(pred_layer)\n pred_layer = Dense(256, activation='relu')(pred_layer)\n pred_layer = Dense(64, activation='relu')(pred_layer)\n pred_layer = Dropout(dropout)(pred_layer)\n \n pred_layer = Dense(1, activation='sigmoid')(pred_layer)\n \n model = Model(inputs=[input1_layer.input, input2_layer.input], outputs=pred_layer)\n if verbosity > 0:\n model.summary()\n return model", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def batch_norm(x, training, name):\n with tf.variable_scope(name):\n x = tf.cond(training, lambda: tf.contrib.layers.batch_norm(x, is_training=True, scope=name+'_batch_norm'),\n lambda: tf.contrib.layers.batch_norm(x, is_training=False, scope=name+'_batch_norm', reuse=True))\n return x", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def derive_sample_params(self, global_state):\n return global_state.l2_norm_clip", "def __init__(self, rng, input, layer_sizes, dropout_rates,\r\n activations=None, use_bias=True, prob_constraint_on=True):\r\n # Set up all the hidden layers\r\n weight_matrix_sizes = list(zip(layer_sizes, layer_sizes[1:]))\r\n # we build two parallel layers\r\n # - training_layers for training with/without dropout\r\n # - testing_layers for testing the performance\r\n self.training_layers = []\r\n self.testing_layers = []\r\n \r\n # dropout the input\r\n next_training_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])\r\n next_testing_layer_input = input\r\n \r\n layer_counter = 0\r\n for n_in, n_out in weight_matrix_sizes[:-1]:\r\n \r\n # setup the training layer\r\n next_training_layer = DropoutHiddenLayer(rng=rng,\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n dropout_rate=dropout_rates[layer_counter])\r\n self.training_layers.append(next_training_layer)\r\n next_training_layer_input = next_training_layer.output\r\n\r\n # setup the testing layer\r\n # Reuse the paramters from the dropout layer here, in a different\r\n # path through the graph.\r\n next_testing_layer = HiddenLayer(rng=rng,\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=next_training_layer.W * (1 - dropout_rates[layer_counter]),\r\n b=next_training_layer.b)\r\n self.testing_layers.append(next_testing_layer)\r\n next_testing_layer_input = next_testing_layer.output\r\n \r\n layer_counter += 1\r\n \r\n # Set up the output layer for training layers\r\n n_in, n_out = weight_matrix_sizes[-1]\r\n training_output_layer = LogisticRegression(\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n prob_constraint_on=prob_constraint_on)\r\n self.training_layers.append(training_output_layer)\r\n\r\n # Set up the output layer for testing layers\r\n # Again, reuse paramters in the dropout output.\r\n testing_output_layer = LogisticRegression(\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=training_output_layer.W * (1 - dropout_rates[-1]),\r\n b=training_output_layer.b,\r\n prob_constraint_on=prob_constraint_on)\r\n self.testing_layers.append(testing_output_layer)\r\n\r\n # Use the MSE of the logistic regression layer as the objective\r\n # In training phase, we use the MSE of the logistic regression layer\r\n # which is on top of the dropout_layers\r\n self.training_MSE = self.training_layers[-1].MSE\r\n # In validation/testing phase, we use the MSE of the logistic regression layer\r\n # which is on top of the normal_layers\r\n self.testing_MSE = self.testing_layers[-1].MSE\r\n \r\n # NOTE: for prediction, we use all the weights, thus we should use\r\n # the normal layers instead of the dropout layers\r\n self.y_pred = self.testing_layers[-1].y_pred\r\n \r\n # Grab all the parameters together.\r\n self.params = [ param for layer in self.training_layers for param in layer.params ]\r\n # The above is Double Iteration in List Comprehension\r\n # See the discussion in\r\n # http://stackoverflow.com/questions/17657720/python-list-comprehension-double-for\r\n # In regular for-loop format, we have\r\n # for layer in self.dropout_layers:\r\n # for param in layer.params:\r\n # put param in the resulting list\r", "def train(args,train_loader, model, criterion, optimizer, epoch, pruner, writer):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n \n\n model.train()\n total =0 \n correct = 0\n reg_loss = 0.0\n train_loss = 0.0\n end = time.time()\n\n for i, (inputs, target) in enumerate(train_loader):\n\n target = target.cuda()\n inputs = inputs.cuda()\n \n inputs, targets_a, targets_b, lam = mixup_data(inputs, target, args.alpha, True)\n inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))\n\n ##input_var = torch.autograd.Variable(input)\n ##target_var = torch.autograd.Variable(target)\n\n\n outputs = model(inputs)\n ##outputs, Qs, Ys = model(inputs)\n ##loss = criterion(output, target_var)\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n## print(\"loss:\")\n## print(loss)\n## print(loss.item())\n## train_loss += loss.data[0]\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += target.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n\n## prec1 = accuracy(output.data, target, topk=(1,))[0]\n## losses.update(loss.data.item(), input.size(0))\n## top1.update(prec1.item(), input.size(0))\n\n optimizer.zero_grad()\n\n\n\n## for y in Ys:\n## y.retain_grad()\n\n\n\n loss.backward()\n\n\n optimizer.step()\n\n\n\n\n if pruner is not None:\n pruner.prune(update_state=False)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n\n if 0:\n kwalt = epoch*len(train_loader)+i\n if writer is not None:\n for j,q in enumerate(Qs):\n writer.add_scalar(\"variances %d\" % j, q.cpu().numpy(), kwalt)\n\n for l,y in enumerate(Ys):\n if y.grad is not None:\n writer.add_scalar(\"grad %d\" % (l-j), getQ(y.grad).cpu().numpy(), kwalt)\n\n## writer.add_scalars(\"variancess\", { \"%d\"% j : q.cpu().numpy() for j,q in enumerate(Qs)}, i)\n\n\n\n if 0:\n if i % args.print_freq == 0:\n print(\n f\"Epoch: [{epoch}][{i}/{len(train_loader)}]\\t\"\n f\"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t\"\n f\"Loss {losses.val:.4f} ({losses.avg:.4f})\\t\"\n f\"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\"\n )\n niter = epoch*len(train_loader)+i\n\n batch_idx = i\n if writer is not None:\n writer.add_scalar('Train/Loss', train_loss/batch_idx, epoch)\n writer.add_scalar('Train/Prec@1', 100.*correct/total, epoch)", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def __init__(self, X_train, y_train, input_shape, filters, kernel_size,\n maxpool, loss_function='categorical_crossentropy', nb_classes= 2, droput_iteration=20, dropout = 0.05):\n\n # We normalize the training data to have zero mean and unit standard\n # deviation in the training set if necessary\n\n # if normalize:\n # self.std_X_train = np.std(X_train, 0)\n # self.std_X_train[ self.std_X_train == 0 ] = 1\n # self.mean_X_train = np.mean(X_train, 0)\n # else:\n # self.std_X_train = np.ones(X_train.shape[ 1 ])\n # self.mean_X_train = np.zeros(X_train.shape[ 1 ])\n\n self.droput_iteration = droput_iteration\n self.nb_classes = nb_classes\n self.mean_y_train = np.mean(y_train)\n self.std_y_train = np.std(y_train)\n\n\n\n # model = Sequential()\n # model.add(Conv2D(filters, (kernel_size, kernel_size), padding='same',\n # input_shape=input_shape))\n # model.add(Activation('relu'))\n # model.add(Conv2D(filters, (kernel_size, kernel_size)))\n # model.add(Activation('relu'))\n # model.add(MaxPooling2D(pool_size=(maxpool, maxpool)))\n # model.add(Dropout(dropout))\n # c = 3.5\n # Weight_Decay = c / float(X_train.shape[0])\n # model.add(Flatten())\n # model.add(Dense(128, W_regularizer=l2(Weight_Decay)))\n # model.add(Activation('relu'))\n # model.add(Dropout(dropout))\n # model.add(Dense(nb_classes))\n # model.add(Activation('softmax'))\n\n # model.compile(loss=loss_function, optimizer='adam')\n\n c = 3.5\n Weight_Decay = c / float(X_train.shape[0])\n\n model = Sequential()\n model.add(Dense(256, input_shape =input_shape))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Dense(256, W_regularizer=l2(Weight_Decay)))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Flatten())\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n\n model.compile(loss=loss_function, optimizer='adam')\n\n\n self.model = model\n # # We iterate the learning process\n # model.fit(X_train, y_train, batch_size=self.batch_size, nb_epoch=n_epochs, verbose=0)\n\n # #function for bayesian inference using dropouts\n # self.f = K.function([model.layers[0].input, K.learning_phase()],\n # [model.layers[-1].output])", "def xlarge_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n #model.add(Dense(18, input_dim=12, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)", "def build_nn(dropout: float=0.3,verbosity: int=0):\n model = Sequential()\n model.add(Dense(1024, input_shape=(1024,), activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1, activation='sigmoid'))\n \n if verbosity > 0:\n model.summary()\n return model", "def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)", "def remove_tracking(model, norm_type, norm_power=0.2):\n normlayer = select_norm(norm_type, norm_power=norm_power)\n # find total number of childern\n model_len = 0\n for n, child in enumerate(model.children()):\n model_len = n\n\n # for layer 0 which is outside\n conv_shape = model.conv1.out_channels\n w = model.bn1.weight\n b = model.bn1.bias\n model.bn1 = normlayer(conv_shape)\n model.bn1.weight = w\n model.bn1.bias = b\n\n # replace in all other layers\n for n, child in enumerate(model.children()):\n if 4 <= n <= model_len - 2:\n for i in range(len(child)):\n conv_shape = child[i].conv1.out_channels\n w = child[i].bn1.weight\n b = child[i].bn1.bias\n child[i].bn1 = normlayer(conv_shape)\n child[i].bn1.weight = w\n child[i].bn1.bias = b\n\n conv_shape = child[i].conv2.out_channels\n w = child[i].bn2.weight\n b = child[i].bn2.bias\n child[i].bn2 = normlayer(conv_shape)\n child[i].bn2.weight = w\n child[i].bn2.bias = b\n # if model have bn3 as well\n try:\n conv_shape = child[i].conv3.out_channels\n w = child[i].bn3.weight\n b = child[i].bn3.bias\n child[i].bn3 = normlayer(conv_shape)\n child[i].bn3.weight = w\n child[i].bn3.bias = b\n except:\n pass\n try:\n conv_shape = child[i].downsample[0].out_channels\n w = child[i].downsample[1].weight\n b = child[i].downsample[1].bias\n child[i].downsample[1] = normlayer(conv_shape)\n child[i].downsample[1].weight = w\n child[i].downsample[1].bias = b\n print(\"downsample\")\n except:\n print(\"no downsample\")\n\n return model", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n s_cache,shape_x = cache\n reshaped_dout = np.reshape(dout,(-1,dout.shape[1]))\n dx_reshaped,dgamma,dbeta = batchnorm_backward_alt(reshaped_dout,s_cache)\n dx = np.reshape(dx_reshaped,shape_x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=True):\n super(NLayerTFDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if(no_antialias):\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if(no_antialias):\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)]\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.encoder = nn.Sequential(*sequence)\n dim = ndf * nf_mult\n self.transformer_enc = transformer.TransformerDecoders(dim, nhead=4, num_encoder_layers=4, dim_feedforward=dim*2, dropout=0.0)\n\n self.query_embed = nn.Embedding(1, dim)\n self.classifier = nn.Sequential(\n nn.Linear(dim, dim//2),\n nn.LayerNorm(dim//2),\n nn.ReLU(),\n nn.Linear(dim//2, dim//4),\n nn.LayerNorm(dim//4),\n nn.ReLU(),\n nn.Linear(dim//4, 1),\n nn.Sigmoid()\n )", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def linf1(parameter, bias=None, reg=0.01, lr=0.1):\n\n Norm = reg*lr\n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n sorted_w_and_b, indices = torch.sort(torch.abs(w_and_b), descending=True)\n\n # CUDA or CPU\n devicetype=\"cuda\"\n if w_and_b.is_cuda:\n devicetype=\"cuda\"\n else:\n devicetype=\"cpu\"\n\n\n #SLOW\n rows, cols = sorted_w_and_b.size()\n\n sorted_z = torch.cat((sorted_w_and_b, torch.zeros(rows,1, device=torch.device(devicetype))),1)\n subtracted = torch.clamp(sorted_w_and_b - sorted_z[:,1:],max=Norm) #Max=Norm important\n\n scale_indices = torch.cumsum(torch.ones(rows,cols, device=torch.device(devicetype)),1)\n scaled_subtracted = subtracted * scale_indices\n max_mass = torch.cumsum(scaled_subtracted,1)\n nonzero = torch.clamp(-1*(max_mass - Norm),0)\n\n oneN = 1.0/scale_indices\n\n # Algorithm described in paper, but these are all efficient GPU operation steps)\n # First we subtract every value from the cell next to it\n nonzero_ones = torch.clamp(nonzero * 1000000, max=1) #Hacky, but efficient\n shifted_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,:(cols-1)]),1)\n over_one = -1*(nonzero_ones - shifted_ones)\n last_one = torch.cat((over_one,torch.zeros(rows,1, device=torch.device(devicetype))),1)[:,1:]\n max_remain = last_one * nonzero\n shift_max = torch.cat((torch.zeros(rows,1, device=torch.device(devicetype)),max_remain[:,:(cols-1)]),1)\n first_col_nonzero_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,1:]),1) #Edge case for only first column\n tosub = first_col_nonzero_ones * subtracted + shift_max * oneN\n\n # We flip the tensor so that we can get a cumulative sum for the value to subtract, then flip back\n nastyflipS = torch.flip(torch.flip(tosub,[0,1]),[0])\n aggsubS = torch.cumsum(nastyflipS,1)\n nastyflipagainS = torch.flip(torch.flip(aggsubS,[0,1]),[0])\n\n # The proximal gradient step is equal to subtracting the sorted cumulative sum\n updated_weights = sorted_w_and_b - nastyflipagainS\n unsorted = torch.zeros(rows,cols, device=torch.device(devicetype)).scatter_(1,indices,updated_weights)\n final_w_and_b = torch.sign(w_and_b) * unsorted\n\n # Actually update parameters and bias\n if bias is not None:\n update = final_w_and_b[:,:cols-1]\n parameter.data = update\n update_b = final_w_and_b[:,-1]\n bias.data = update_b\n else:\n parameter.data = final_w_and_b", "def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n super(LightweightConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will translate\r\n # into a HiddenLayer with a tanh activation function connected to the\r\n # LogisticRegression layer; the activation function can be replaced by\r\n # sigmoid or any other nonlinear function\r\n self.hiddenLayer = HiddenLayer(rng=rng, input=input,\r\n n_in=n_in, n_out=n_hidden,\r\n activation=T.tanh)\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input=self.hiddenLayer.output,\r\n n_in=n_hidden,\r\n n_out=n_out)\r\n\r\n # L1 norm ; one regularization option is to enforce L1 norm to\r\n # be small\r\n self.L1 = abs(self.hiddenLayer.W).sum() \\\r\n + abs(self.logRegressionLayer.W).sum()\r\n\r\n # square of L2 norm ; one regularization option is to enforce\r\n # square of L2 norm to be small\r\n self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \\\r\n + (self.logRegressionLayer.W ** 2).sum()\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n # same holds for the function computing the number of errors\r\n self.errors = self.logRegressionLayer.errors\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def batch_norm_conv(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def __init__(self, mode, dim, epsilon=1e-12, name='l2Normalize'):\n super(L2Normalization, self).__init__(mode, name)\n self.dim = dim\n self.epsilon = epsilon", "def l2_normalization(\n inputs,\n name,\n scaling=False,\n scale_initializer=init_ops.ones_initializer(),\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n data_format='NHWC',\n trainable=True,\n scope=None):\n\n with variable_scope.variable_scope(\n scope, 'L2Normalization_'+name, [inputs], reuse=reuse) as sc:\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n dtype = inputs.dtype.base_dtype\n if data_format == 'NHWC':\n # norm_dim = tf.range(1, inputs_rank-1)\n norm_dim = tf.range(inputs_rank-1, inputs_rank)\n params_shape = inputs_shape[-1:]\n elif data_format == 'NCHW':\n # norm_dim = tf.range(2, inputs_rank)\n norm_dim = tf.range(1, 2)\n params_shape = (inputs_shape[1])\n\n # Normalize along spatial dimensions.\n outputs = nn.l2_normalize(inputs, norm_dim, epsilon=1e-12)\n # Additional scaling.\n if scaling:\n scale_collections = utils.get_variable_collections(\n variables_collections, 'scale')\n scale = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=scale_initializer,\n collections=scale_collections,\n trainable=trainable)\n if data_format == 'NHWC':\n outputs = tf.multiply(outputs, scale)\n elif data_format == 'NCHW':\n scale = tf.expand_dims(scale, axis=-1)\n scale = tf.expand_dims(scale, axis=-1)\n outputs = tf.multiply(outputs, scale)\n # outputs = tf.transpose(outputs, perm=(0, 2, 3, 1))\n\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)", "def TCN_V1(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache", "def BatchNorm(X): # (X - mu) / sigma -> Have to implement trainable parameters gamma and beta on this\n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n bn = (X - torch.mean(X)) / (torch.std(X)+epsilon)\n sigma.append(torch.std(X)+epsilon)\n return bn", "def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line.#\n ###########################################################################\n N = dout.shape[0]\n x_norm,inv_var,gamma = cache\n dgamma = np.sum(dout * x_norm,axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Simplified calculation of dx.\n dx_normalized = dout * gamma\n dx = (1 / N) * inv_var * (N * dx_normalized - np.sum(dx_normalized,axis = 0) \\\n - x_norm * np.sum(dx_normalized * x_norm,axis = 0)) \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def compute_loss(model, loader, loss_fn, optimizer=None):\n\n total_loss = 0.0\n count_batches = 0\n for x_, y_, qm_, db_mask, blink_mask in loader:\n batch_loss_list = []\n xbatch_list = []\n for mask in [db_mask, blink_mask]:\n idxes = get_idxes_from_mask(mask)\n x_pick, y_pick, qm_pick = x_[idxes], y_[idxes], qm_[idxes]\n y_pos_idxes = torch.nonzero(y_pick.squeeze(), as_tuple=False).reshape(1, -1)[0]\n y_neg_idxes = torch.nonzero(~y_pick.squeeze().bool(), as_tuple=False).reshape(1, -1)[0]\n\n if (len(y_pos_idxes) == 0) or (len(y_neg_idxes) == 0):\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(x_pick) <= 1:\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(y_pos_idxes) == 1:\n y_pos_idx = y_pos_idxes[0]\n else: # len(y_pos_idxes) > 1:\n # TODO: I am just always using the first positive example for now\n # rand_idx = random.choice(list(range(len(y_pos_idxes))))\n # print(y_pos_idxes)\n rand_idx = 0\n y_pos_idx = y_pos_idxes[rand_idx]\n\n batch_length = 1 + len(y_neg_idxes)\n batch_feature_len = x_.shape[1]\n x_batch = torch.zeros(batch_length, batch_feature_len)\n x_batch[:-1:, :] = x_pick[y_neg_idxes]\n x_batch[-1, :] = x_pick[y_pos_idx] # put positive to the end\n xbatch_list.append(x_batch)\n # print(y_pos_idx, len(y_neg_idxes))\n # print(\"batch\", x_batch.shape)\n\n if (len(xbatch_list[0]) == 0) and (len(xbatch_list[1]) == 0):\n # skip if both batches are []\n # print(\"hitting cases without any examples [SHOULD BE WRONG]\")\n continue\n elif (len(xbatch_list[0]) == 0) or (len(xbatch_list[1]) == 0):\n # continue # TODO: testing whether improvements made if we only use cases where there are sources from both\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 1\n yhat_neg = yhat[:-1]\n yhat_pos = yhat[-1].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones((len(yhat) - 1), 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n else:\n # get yhats for both BLINK and DB batches\n # print(len(xbatch_list[0]), len(xbatch_list[1]))\n # print((xbatch_list[0], xbatch_list[1]))\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 2\n yhat_neg = torch.zeros(extended_batch_length, 1)\n yhat_neg[:len(xbatch_list[0])-1] = yhat[:len(xbatch_list[0])-1]\n yhat_neg[len(xbatch_list[0])-1:] = yhat[len(xbatch_list[0]):-1]\n for idx in [len(xbatch_list[0]), -1]:\n yhat_pos = yhat[idx].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones(extended_batch_length, 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n\n # update every question-mention\n if batch_loss_list and optimizer:\n (sum(batch_loss_list)/len(batch_loss_list)).backward()\n optimizer.step()\n\n avg_loss = total_loss / count_batches\n\n return avg_loss, batch_length", "def train_net(epoch, data, net, opti, batch_graph):\n global num_batches, batch_size\n # train the network\n for num in range(epoch):\n # run each batch through each round\n for batch_id in range(num_batches):\n # calculate the neighborhood for the graph\n batch = torch.from_numpy(data[batch_id]).float()\n batch = batch.view(batch_size, -1)\n batch_distances = pairwise_distances(batch)\n nbr_graph_tensor = torch.from_numpy(batch_graph[batch_id]).float()\n batch_distances_masked = batch_distances * nbr_graph_tensor.float()\n global lbda\n out = net(batch, False) # run the batch through the network\n svd_loss, out = implement_svd(out) # calculate the SVD L2,1 loss and SVD representation\n output_distances = pairwise_distances(out)\n # Multiply the distances between each pair of points with the neighbor mask\n output_distances_masked = output_distances * nbr_graph_tensor.float()\n # Find the difference between |img_i - img_j|^2 and |output_i - output_j|^2\n nbr_diff = torch.abs((output_distances_masked - batch_distances_masked))\n nbr_distance = nbr_diff.norm()\n svd_loss *= lbda_svd # multiply SVD loss by its scaling factor\n # find variance in all directions\n var = 0\n for i in range(out.size()[0]):\n var += lbda_var / out[i].var()\n loss = nbr_distance + svd_loss + var # loss contains all three terms\n opti.zero_grad()\n loss.backward()\n opti.step()\n print('Epoch: %f, Step: %f, Loss: %.2f' % (num, batch_id + 1, loss.data.cpu().numpy()))\n\n # find the ideal number of dimensions\n global final_dim\n batch = torch.from_numpy(data[0]).float()\n batch = batch.view(batch_size, -1)\n out = net(batch, False)\n u, s, v = torch.svd(out)\n final_dim = calc_dim(s)", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic):\n self.binary=binary\n self.stochastic=stochastic\n \n # Since we are dealing with a one hidden layer MLP, this will translate\n # into a HiddenLayer with a tanh activation function connected to the\n # LogisticRegression layer; the activation function can be replaced by\n # sigmoid or any other nonlinear function.\n self.hiddenLayers = []\n self.normLayers=[]\n for i in xrange(n_hiddenLayers):\n h_input = input if i == 0 else self.hiddenLayers[i-1].output\n h_in = n_in if i == 0 else n_hidden\n\n # if binary==True, we append a binary hiddenlayer\n if binary==True:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=True,\n stochastic=stochastic\n ))\n self.normLayers.append(\n BatchNormLayer(\n input=self.hiddenLayers[i].output,\n n_in=n_hidden,\n n_out=n_hidden\n ))\n else:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=False,\n stochastic=False\n ))\n\n # The logistic regression layer gets as input the hidden units\n # of the hidden layer\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayers[-1].output,\n n_in=n_hidden,\n n_out=n_out,\n binary=binary,\n stochastic=stochastic\n )\n \n # same holds for the function computing the number of errors\n self.errors = self.logRegressionLayer.errors\n\n # the parameters of the model are the parameters of the two layer it is\n # made out of\n self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params\n self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt\n self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws\n # keep track of model input\n self.input = input", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)" ]
[ "0.6057391", "0.6015575", "0.59682286", "0.59510905", "0.5895679", "0.5895679", "0.5812982", "0.57391614", "0.5654636", "0.5644836", "0.56329393", "0.56230843", "0.5617473", "0.56083846", "0.5606605", "0.55982274", "0.55822885", "0.5554348", "0.5552772", "0.554574", "0.5543535", "0.55342215", "0.5517298", "0.5482447", "0.5482134", "0.5472582", "0.5470922", "0.54539526", "0.5446315", "0.54459244", "0.5445722", "0.54425806", "0.54378456", "0.5435633", "0.54307294", "0.5428913", "0.5416998", "0.5408639", "0.5403238", "0.54025966", "0.53949994", "0.53880835", "0.53844064", "0.53817916", "0.5379645", "0.53776026", "0.53730524", "0.5371371", "0.53707147", "0.5370329", "0.536995", "0.5363935", "0.5362235", "0.53618956", "0.5360489", "0.5357146", "0.5349869", "0.53462803", "0.53454286", "0.5344462", "0.5341536", "0.5340334", "0.53318334", "0.53240746", "0.5321518", "0.5313607", "0.5312993", "0.5311656", "0.53095317", "0.53087497", "0.5305072", "0.53037035", "0.5295605", "0.5289092", "0.5287954", "0.52876663", "0.5281113", "0.52785045", "0.52784204", "0.5270944", "0.5270709", "0.5269045", "0.5267125", "0.5263374", "0.5260554", "0.52602875", "0.5259031", "0.52578044", "0.52568597", "0.525481", "0.52547556", "0.5253757", "0.52510357", "0.52459514", "0.5245125", "0.52440065", "0.5243", "0.52407926", "0.52388793", "0.5237576" ]
0.6199096
0
dropout + batch norm + l1_l2
def architecture_CONV_FC_batch_norm_dropout_L1_l2_TANH( X, nbclasses, nb_conv=1, nb_fc=1 ): # input size width, height, depth = X.shape input_shape = (height, depth) # parameters of the architecture l1_l2_rate = 1.0e-3 dropout_rate = 0.5 conv_kernel = 3 conv_filters = 64 nbunits_fc = 128 activation = tanh model = Sequential( name=f"""{str(nb_conv)}_ CONV_k_{str(conv_kernel)}_ {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)} _TANH""" ) model.add( Conv1D( input_shape=input_shape, activation=activation, kernel_regularizer=l1_l2(l1_l2_rate), kernel_size=conv_kernel, filters=conv_filters, ) ) model.add(BatchNormalization()) # if more covolutional layers are defined in parameters if nb_conv > 1: for _layer in range(nb_conv): model.add( Conv1D( kernel_size=conv_kernel, filters=conv_filters, activation=activation, kernel_regularizer=l1_l2(l1_l2_rate), ) ) model.add(BatchNormalization()) # Flatten + FC layers model.add(Flatten()) for _layer in range(nb_fc): model.add( Dense( nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation ) ) model.add(Dropout(dropout_rate)) model.add(Dense(nbclasses, activation=softmax)) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def architecture_CONV_FC_batch_norm_dropout_L1_l2(\n X, nbclasses, nb_conv=1, nb_fc=1, kernel_initializer=\"random_normal\"\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = relu\n kernel_initializer = kernel_initializer\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}__CONV_k{str(conv_kernel)}_\n {str(nb_fc)}_initializer_{kernel_initializer}_\n _FC128_bn_d_{str(dropout_rate)}\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_LEAKY_ReLU(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_CONV_k_{str(conv_kernel)}_\n {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)}\n _LEAKY_ReLU\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(Dense(nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate)))\n model.add(LeakyReLU())\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n \n dims = [input_dim] + hidden_dims + [num_classes]\n\n # initialise all parameters (weight, bias, gamma, beta)\n for i in range(len(dims)-1):\n w = 'W' + str(i+1)\n b = 'b' + str(i+1)\n self.params[w] = np.random.randn(dims[i], dims[i+1])*weight_scale\n self.params[b] = np.zeros(dims[i+1])\n \n if self.use_batchnorm:\n for i in range(len(dims)-2):\n #no gamma and beta for last layer\n gamma = 'gamma' + str(i+1)\n beta = 'beta' + str(i+1)\n self.params[gamma] = np.ones(dims[i+1])\n self.params[beta] = np.zeros(dims[i+1])\n \n \n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def optimize(self):\n self.u = np.random.uniform(-1, 1, (self.batchsize, 288, 1, 1))\n self.l2 = torch.from_numpy(self.u).float()\n self.n = torch.randn(self.batchsize, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n print(self.l1.shape,99999999999999999999999999999999999)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def __init__(\n self,\n hidden_dims,\n input_dim=3 * 32 * 32,\n num_classes=10,\n dropout=1,\n normalization=None,\n reg=0.0,\n weight_scale=1e-2,\n dtype=np.float32,\n seed=None,\n ):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n Din, Dout = input_dim, hidden_dims[0]\n for i in range(self.num_layers):\n self.params['W' + str(i+1)] = np.random.normal(scale=weight_scale, size=(Din, Dout))\n self.params['b' + str(i+1)] = np.zeros((Dout,))\n Din = Dout\n if i < len(hidden_dims) - 1:\n Dout = hidden_dims[i+1]\n if i == len(hidden_dims) - 1:\n Dout = num_classes\n \n # BN params initialization\n if self.normalization != None:\n for i in range(self.num_layers - 1):\n self.params['gamma' + str(i+1)] = np.ones(shape=(hidden_dims[i]))\n self.params['beta' + str(i+1)] = np.zeros(shape=(hidden_dims[i]))\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {\"mode\": \"train\", \"p\": dropout}\n if seed is not None:\n self.dropout_param[\"seed\"] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization == \"batchnorm\":\n self.bn_params = [{\"mode\": \"train\"} for i in range(self.num_layers - 1)]\n if self.normalization == \"layernorm\":\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)", "def build_generator(latent_dim=100):\n # The weight initialization and the slope are chosen to accord with the\n # Parameters in the paper. I only change padding when it seems neccesary to\n # to mantain adequate dimensons. \n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.3\n \n inputs = keras.Input(shape=(1,1,100))\n # First convolutional layer\n x = Conv2DTranspose(\n 1024, \n kernel_size=(4,4), \n strides=1, \n kernel_initializer=weight_initializer,\n padding='valid',\n use_bias=False\n )(inputs)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Second convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 512,\n kernel_size = 4,\n strides = (2,2),\n padding = 'same',\n use_bias = False\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 256,\n kernel_size = 5,\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 128,\n kernel_size = (5,5),\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fifth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 3,\n kernel_size = (5,5),\n use_bias=False,\n strides = (2,2),\n padding = 'same',\n activation='tanh'\n )(x)\n model = keras.Model(inputs=inputs, outputs=x)\n return model", "def tf_l2_loss(Gt, pred,_axis):\n l2diff = tf.subtract(Gt, pred)\n l2loss = tf.reduce_sum(tf.square(l2diff), axis=_axis)\n l2loss = tf.maximum(l2loss, 1e-10)\n l2loss = tf.sqrt(l2loss) # (n_batch, n_class) -> (n_batch, 1)\n\n return l2loss", "def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n if type(hidden_dims) != list:\n raise ValueError('hidden_dim has to be a list')\n\n self.L = len(hidden_dims) + 1\n self.N = input_dim\n self.C = num_classes\n dims = [self.N] + hidden_dims + [self.C]\n Ws = {'W' + str(i + 1):\n weight_scale * np.random.randn(dims[i], dims[i + 1]) for i in range(len(dims) - 1)}\n b = {'b' + str(i + 1): np.zeros(dims[i + 1])\n for i in range(len(dims) - 1)}\n\n self.params.update(b)\n self.params.update(Ws)\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)", "def masked_l2(preds, actuals, mask):\n loss = tf.nn.l2(preds, actuals)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)", "def layer_norm_and_dropout(input_tensor, dropout_prob, name=None, dropout_name=None):\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob, dropout_name=dropout_name)\n return output_tensor", "def loss_false(code_batch, k=1):\n\n _, n_latent = code_batch.get_shape()\n\n # changing these parameters is equivalent to changing the strength of the\n # regularizer, so we keep these fixed (these values correspond to the\n # original values used in Kennel et al 1992).\n rtol = 20.0\n atol = 2.0\n # k_frac = 0.01\n # n_batch = tf.cast(tf.keras.backend.shape(code_batch)[0], tf.float32)\n # assert False, n_batch\n # k = max(1, int(k_frac * n_batch))\n\n ## Vectorized version of distance matrix calculation\n tri_mask = tf.linalg.band_part(tf.ones((n_latent, n_latent), tf.float32), -1, 0)\n batch_masked = tf.multiply(tri_mask[:, tf.newaxis, :], code_batch[tf.newaxis, ...])\n X_sq = tf.reduce_sum(batch_masked * batch_masked, axis=2, keepdims=True)\n pdist_vector = (\n X_sq\n + tf.transpose(X_sq, [0, 2, 1])\n - 2 * tf.matmul(batch_masked, tf.transpose(batch_masked, [0, 2, 1]))\n )\n all_dists = pdist_vector\n all_ra = tf.sqrt(\n (1 / (tf.range(1, 1 + n_latent, dtype=tf.float32)))\n * tf.squeeze(\n tf.reduce_sum(\n tf.square(tf.math.reduce_std(batch_masked, axis=1, keepdims=True)),\n axis=2,\n )\n )\n )\n\n # Avoid singularity in the case of zeros\n all_dists = tf.clip_by_value(all_dists, 1e-14, tf.reduce_max(all_dists))\n\n # inds = tf.argsort(all_dists, axis=-1)\n _, inds = tf.math.top_k(-all_dists, int(k + 1))\n # top_k currently faster than argsort because it truncates matrix\n\n neighbor_dists_d = tf.gather(all_dists, inds, batch_dims=-1)\n neighbor_new_dists = tf.gather(all_dists[1:], inds[:-1], batch_dims=-1)\n\n # Eq. 4 of Kennel et al.\n scaled_dist = tf.sqrt(\n (neighbor_new_dists - neighbor_dists_d[:-1]) / neighbor_dists_d[:-1]\n )\n\n # Kennel condition #1\n is_false_change = scaled_dist > rtol\n # Kennel condition 2\n is_large_jump = neighbor_new_dists > atol * all_ra[:-1, tf.newaxis, tf.newaxis]\n\n is_false_neighbor = tf.math.logical_or(is_false_change, is_large_jump)\n total_false_neighbors = tf.cast(is_false_neighbor, tf.int32)[..., 1 : (k + 1)]\n\n # Pad zero to match dimensionality of latent space\n reg_weights = 1 - tf.reduce_mean(\n tf.cast(total_false_neighbors, tf.float64), axis=(1, 2)\n )\n reg_weights = tf.pad(reg_weights, [[1, 0]])\n\n # Find average batch activity\n activations_batch_averaged = tf.sqrt(tf.reduce_mean(tf.square(code_batch), axis=0))\n\n # L2 Activity regularization\n activations_batch_averaged = tf.cast(activations_batch_averaged, tf.float64)\n loss = tf.reduce_sum(tf.multiply(reg_weights, activations_batch_averaged))\n\n return tf.cast(loss, tf.float32)", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_SIGMOID(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = sigmoid\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_CONV_k_\n {str(conv_kernel)}_{str(nb_fc)}\n _FC128_bn_d_{str(dropout_rate)}\n _SIGMOID\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #\n # initialized from a normal distribution centered at 0 with standard #\n # deviation equal to weight_scale. Biases should be initialized to zero. #\n # #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n dimension = [input_dim] + hidden_dims + [num_classes]\n for i in range(1, self.num_layers+1):\n self.params['W{0}'.format(i)] = weight_scale * np.random.randn(dimension[i-1], dimension[i])\n self.params['b{0}'.format(i)] = np.zeros(dimension[i])\n\n if self.normalization in ['batchnorm', 'layernorm']:\n self._batchnormInit()\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x", "def batchnorm_init(m, kernelsize=3):\r\n n = kernelsize**2 * m.num_features\r\n m.weight.data.normal_(0, math.sqrt(2. / (n)))\r\n m.bias.data.zero_()", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n input_size = input_dim\n for i in range(len(hidden_dims)):\n output_size = hidden_dims[i]\n self.params['W' + str(i+1)] = np.random.randn(input_size,output_size) * weight_scale\n self.params['b' + str(i+1)] = np.zeros(output_size)\n if self.normalization:\n self.params['gamma' + str(i+1)] = np.ones(output_size)\n self.params['beta' + str(i+1)] = np.zeros(output_size)\n input_size = output_size # 下一层的输入\n # 输出层,没有BN操作\n self.params['W' + str(self.num_layers)] = np.random.randn(input_size,num_classes) * weight_scale\n self.params['b' + str(self.num_layers)] = np.zeros(num_classes)\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def _optimization(dataset1, dataset2, nb_epochs=3000):\n\n x1_mean = dataset1['data'].mean()\n x1_std = dataset1['data'].std()\n x1 = (dataset1['data'] - x1_mean) / (x1_std)\n y1 = dataset1['labels']\n Y1 = dataset1['hot_labels']\n\n x2_mean = dataset2['data'].mean()\n x2_std = dataset2['data'].std()\n x2 = (dataset2['data'] - x2_mean) / (x2_std)\n\n x_model1 = Input(x1.shape[1:])\n y_model1 = Dropout(0.1)(x_model1)\n y_model1 = Dense(50, activation='relu')(x_model1)\n y_model1 = Dropout(0.2)(y_model1)\n y_model1 = Dense(50, activation='relu')(y_model1)\n out_model1 = Dense(len(np.unique(y1)), activation='softmax')(y_model1)\n\n model1 = Model(input=x_model1, output=out_model1)\n\n optimizer = keras.optimizers.Adadelta()\n model1.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=200, min_lr=0.1)\n\n hist = model1.fit(x1, Y1, batch_size=x1.shape[0], nb_epoch=nb_epochs, verbose=1, shuffle=True, callbacks=[reduce_lr])\n\n dataset2_new_labels = []\n\n for i in range(x2.shape[0]):\n xTrain = x2[i,:].reshape((1,x2.shape[1]))\n dataset2_new_labels.append(np.argmax(model1.predict(xTrain, batch_size=1)))\n\n # Print the testing results which has the l in range(x_train.shape[0]):\n # for i in range(len(x_test1)):\n # xTest = x_test1[i,:].reshape((1,2048))\n # print((np.argmax(model.predict(xTest, batch_size=1)), y_test1[i]))\n # log = pd.DataFrame(hist.history)\n # print(\"saving results for 100 nodes\" + _MODE + fname)\n # log.to_json('accuracies/accuracy_100_' + _MODE + fname + '.json')\n\n # with open('Text_Files/' + fname + '_results.txt', 'w') as text_file:\n # text_file.write(fname + '<<<=====>>>' + str(max(log.val_acc.values)))\n\n # assert 2==1\n\n x_model1 = []\n y_model1 = []\n out_model1 = []\n model1 = []\n\n return dataset2_new_labels", "def _fit_apgl(x, mask, lmbd,\n max_iter=100, L=1e-3, beta=0.5,\n tol=1e-3, print_loss=False):\n # init\n n1, n2 = x.shape\n rdm = RandomState(123)\n theta = rdm.randn(n1, n2) # natural parameter\n thetaOld = theta\n alpha = 1\n alphaOld = 0\n\n # main loop\n loss = _cross_entropy(x, mask, theta) + lmbd * \\\n np.linalg.norm(theta, ord='nuc')\n iteration = []\n for i in range(int(max_iter)):\n if print_loss:\n print(f'Epoch {i}, loss {loss:.3f}')\n iteration.append(loss)\n lossOld = loss\n # nesterov extropolation\n A = theta + (alphaOld - 1) / alpha * (theta - thetaOld)\n for _ in range(50):\n S = A - L * _gradient(x, mask, A)\n thetaNew = svt(S, lmbd * L)\n ce = _cross_entropy(x, mask, thetaNew)\n if ce < _bound(x, mask, thetaNew, theta, L):\n break\n else:\n L = beta * L\n thetaOld = theta\n theta = thetaNew\n alphaOld = alpha\n alpha = (1 + np.sqrt(4 + alpha ** 2)) / 2\n loss = ce + lmbd * np.linalg.norm(theta, ord='nuc')\n if i == max_iter - 1:\n print(f'Reach max iteration {i+1}')\n if np.abs(lossOld - loss) < tol:\n break\n\n return theta, np.array(iteration)", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel_old, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n # norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.LeakyReLU(alpha=0.1))", "def L2X(train = True):\n print('Loading dataset...') \n x_train, y_train, x_val, y_val, id_to_word = load_data()\n #pred_train = np.load('data/pred_train.npy')\n #pred_val = np.load('data/pred_val.npy') \n print('Creating model...')\n\n # P(S|X)\n with tf.variable_scope('selection_model'):\n X_ph = Input(shape=(maxlen,), dtype='int32')\n\n logits_T_grp = construct_gumbel_selector(X_ph, max_features, embedding_dims, maxlen) # bs, max_len * num_groups\n tau = 0.5 \n T = Sample_Concrete(tau, k, num_feature=maxlen, num_groups=num_groups)(logits_T_grp)\n\n T = Reshape((maxlen, num_groups))(T)\n T = Permute((2, 1))(T) # bs, num_groups, max_len\n\n # q(X_S)\n with tf.variable_scope('prediction_model'):\n emb2 = Embedding(max_features, embedding_dims, \n input_length=maxlen)(X_ph)\n # emb2 bs, max_len, 50\n # apply the matrix trick as before\n # here the output size of matmul layer is different from before\n net = matmul_layer([T, emb2]) # bs, num_groups, 50\n #print(net.shape)\n net = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'merge_channel')(net) # bs, num_groups, 1\n\n # net = Mean(net) # bs, 50\n input_group = Flatten()(net) # bs, num_groups\n # num_groups = K.int_shape(input_group)[1]\n # here we add instance wise f-s again!!!!\n net = Dense(100, activation='relu', name = 's/dense1',\n kernel_regularizer=regularizers.l2(1e-3))(input_group)\n net = Dense(100, activation='relu', name = 's/dense2',\n kernel_regularizer=regularizers.l2(1e-3))(net)\n logits = Dense(num_groups)(net)\n\n\n\n\n # A tensor of shape, [batch_size, max_sents, 100]\n samples = Sample_Concrete_Original(tau, num_vital_group, name='group_importance')(logits)\n new_input_group = Multiply()([input_group, samples]) \n\n\n\n net = Dense(hidden_dims, activation='relu')(new_input_group)\n preds = Dense(2, activation='softmax', \n name = 'new_dense')(net)\n\n\n model = Model(inputs=X_ph, \n outputs=preds)\n model.summary()\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',#optimizer,\n metrics=['acc']) \n #train_acc = np.mean(np.argmax(pred_train, axis = 1)==np.argmax(y_train, axis = 1))\n #val_acc = np.mean(np.argmax(pred_val, axis = 1)==np.argmax(y_val, axis = 1))\n #print('The train and validation accuracy of the original model is {} and {}'.format(train_acc, val_acc))\n\n if train:\n filepath=\"models/l2x.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint] \n st = time.time()\n model.fit(x_train, y_train, \n validation_data=(x_val, y_val), \n callbacks = callbacks_list,\n epochs=epochs, batch_size=batch_size)\n duration = time.time() - st\n print('Training time is {}'.format(duration)) \n\n model.load_weights('models/l2x.hdf5', by_name=True) \n\n pred_model = Model(X_ph, [T, samples]) \n pred_model.summary()\n pred_model.compile(loss='categorical_crossentropy', \n optimizer='adam', metrics=['acc']) \n\n st = time.time()\n #scores = pred_model.predict(x_val, \n # verbose = 1, batch_size = batch_size)[:,:,0] \n #scores = np.reshape(scores, [scores.shape[0], maxlen])\n scores_t, group_importances_t = pred_model.predict(x_train, verbose = 1, batch_size = batch_size)\n scores_v, group_importances_v = pred_model.predict(x_val, verbose = 1, batch_size = batch_size)\n return scores_t, group_importances_t, scores_v, group_importances_v, x_val", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def EmbeddingL1RegularizationUpdate(embedding_variable, net_input, learn_rate, l1_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n sign_inside = tf.sign(tf.matmul(net_input, embedding_variable))\n where = tf.equal(sign_inside, 0)\n # should replace 0's with random in [-1, 1] for an better (not necessarily acute)implementation\n grad = l1_reg_val * tf.matmul(tf.transpose(net_input), sign_inside)\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l1_reg_val * tf.norm(tf.matmul(net_input, embedding_variable), ord=1)\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l1 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def validation_dubo(latent_dim, covar_module0, covar_module1, likelihood, train_xt, m, log_v, z, P, T, eps):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n v = torch.exp(log_v)\n torch_dtype = torch.double\n x_st = torch.reshape(train_xt, [P, T, train_xt.shape[1]]).to(device)\n stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=1)\n K0xz = covar_module0(train_xt, z).evaluate().to(device)\n K0zz = (covar_module0(z, z).evaluate() + eps * torch.eye(z.shape[1], dtype=torch_dtype).to(device)).to(device)\n LK0zz = torch.cholesky(K0zz).to(device)\n iK0zz = torch.cholesky_solve(torch.eye(z.shape[1], dtype=torch_dtype).to(device), LK0zz).to(device)\n K0_st = covar_module0(stacked_x_st, stacked_x_st).evaluate().transpose(0,1)\n B_st = (covar_module1(stacked_x_st, stacked_x_st).evaluate() + torch.eye(T, dtype=torch.double).to(device) * likelihood.noise_covar.noise.unsqueeze(dim=2)).transpose(0,1)\n LB_st = torch.cholesky(B_st).to(device)\n iB_st = torch.cholesky_solve(torch.eye(T, dtype=torch_dtype).to(device), LB_st)\n\n dubo_sum = torch.tensor([0.0]).double().to(device)\n for i in range(latent_dim):\n m_st = torch.reshape(m[:, i], [P, T, 1]).to(device)\n v_st = torch.reshape(v[:, i], [P, T]).to(device)\n K0xz_st = torch.reshape(K0xz[i], [P, T, K0xz.shape[2]]).to(device)\n iB_K0xz = torch.matmul(iB_st[i], K0xz_st).to(device)\n K0zx_iB_K0xz = torch.matmul(torch.transpose(K0xz[i], 0, 1), torch.reshape(iB_K0xz, [P*T, K0xz.shape[2]])).to(device)\n W = K0zz[i] + K0zx_iB_K0xz\n W = (W + W.T) / 2\n LW = torch.cholesky(W).to(device)\n logDetK0zz = 2 * torch.sum(torch.log(torch.diagonal(LK0zz[i]))).to(device)\n logDetB = 2 * torch.sum(torch.log(torch.diagonal(LB_st[i], dim1=-2, dim2=-1))).to(device)\n logDetW = 2 * torch.sum(torch.log(torch.diagonal(LW))).to(device)\n logDetSigma = -logDetK0zz + logDetB + logDetW\n iB_m_st = torch.solve(m_st, B_st[i])[0].to(device)\n qF1 = torch.sum(m_st*iB_m_st).to(device)\n p = torch.matmul(K0xz[i].T, torch.reshape(iB_m_st, [P * T])).to(device)\n qF2 = torch.sum(torch.triangular_solve(p[:,None], LW, upper=False)[0] ** 2).to(device)\n qF = qF1 - qF2\n tr = torch.sum(iB_st[i] * K0_st[i]) - torch.sum(K0zx_iB_K0xz * iK0zz[i])\n logDetD = torch.sum(torch.log(v[:, i])).to(device)\n tr_iB_D = torch.sum(torch.diagonal(iB_st[i], dim1=-2, dim2=-1)*v_st).to(device)\n D05_iB_K0xz = torch.reshape(iB_K0xz*torch.sqrt(v_st)[:,:,None], [P*T, K0xz.shape[2]])\n K0zx_iB_D_iB_K0zx = torch.matmul(torch.transpose(D05_iB_K0xz,0,1), D05_iB_K0xz).to(device)\n tr_iB_K0xz_iW_K0zx_iB_D = torch.sum(torch.diagonal(torch.cholesky_solve(K0zx_iB_D_iB_K0zx, LW))).to(device)\n tr_iSigma_D = tr_iB_D - tr_iB_K0xz_iW_K0zx_iB_D\n dubo = 0.5*(tr_iSigma_D + qF - P*T + logDetSigma - logDetD + tr)\n dubo_sum = dubo_sum + dubo\n return dubo_sum", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache", "def calculate2_simpleKL_norm(pred, truth, rnd=0.01):\n return 1 - calculate2_simpleKL(pred, truth, rnd=rnd) / 4000", "def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')", "def train(self,D,batch_size=64,iter=10000,l2_reg=0.01,noise_level=0.1,debug=False):\n sess = tf.get_default_session()\n\n idxes = np.random.permutation(len(D))\n train_idxes = idxes[:int(len(D)*0.8)]\n valid_idxes = idxes[int(len(D)*0.8):]\n\n def _batch(idx_list,add_noise):\n batch = []\n\n if len(idx_list) > batch_size:\n idxes = np.random.choice(idx_list,batch_size,replace=False)\n else:\n idxes = idx_list\n\n for i in idxes:\n batch.append(D[i])\n\n b_x,b_y,b_l = zip(*batch)\n x_split = np.array([len(x) for x in b_x])\n y_split = np.array([len(y) for y in b_y])\n b_x,b_y,b_l = np.concatenate(b_x,axis=0),np.concatenate(b_y,axis=0),np.array(b_l)\n\n if add_noise:\n b_l = (b_l + np.random.binomial(1,noise_level,batch_size)) % 2 #Flip it with probability 0.1\n\n return b_x,b_y,x_split,y_split,b_l\n\n for it in tqdm(range(iter),dynamic_ncols=True):\n b_x,b_y,x_split,y_split,b_l = _batch(train_idxes,add_noise=True)\n\n loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l,\n self.l2_reg:l2_reg,\n })\n\n if debug:\n if it % 100 == 0 or it < 10:\n b_x,b_y,x_split,y_split,b_l = _batch(valid_idxes,add_noise=False)\n valid_acc = sess.run(self.acc,feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l\n })\n tqdm.write(('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc)))\n\n #if valid_acc >= 0.95:\n # print('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc))\n # print('early termination@%08d'%it)\n # break", "def configure_batchnorm(x, model):\n bs = x.size(0)\n # train mode, because dent optimizes the model to minimize entropy\n model.train()\n # disable grad, to (re-)enable only what dent updates\n model.requires_grad_(False)\n # configure norm for dent updates:\n # enable grad + keep statisics + repeat affine params\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.weight = nn.Parameter(m.ckpt_weight.unsqueeze(0).repeat(bs, 1))\n m.bias = nn.Parameter(m.ckpt_bias.unsqueeze(0).repeat(bs, 1))\n m.requires_grad_(True)\n return model", "def spatial_batchnorm_backward(dout, cache):\r\n \tN, C, H, W = dout.shape\r\n dout_new = dout.transpose(0, 2, 3, 1).reshape(N*H*W, C)\r\n dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)\r\n dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)\r\n\r\n return dx, dgamma, dbeta", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))", "def scipy_minus_gradient(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n if perceptron:\n perceptron._gradient_iter += 1\n g = None\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n first_term = vector_graphs[correct_rows,:].sum(axis=0)\n all_scores = vector_graphs * w\n all_probs = []\n for batch in batches:\n batch_scores = all_scores[batch]\n S = logsumexp(batch_scores)\n all_probs.append(np.exp(batch_scores - S))\n all_probs = numpy.hstack(all_probs)\n second_term = all_probs * vector_graphs\n if g is None:\n g = second_term - first_term\n else:\n g = g + second_term - first_term\n index += 1\n if index % 100 == 0:\n print('Gradient '+str(index)+' processed')\n g = numpy.ndarray.flatten(numpy.asarray(g)) / len(all_vector_graphs)\n if sigma != None:\n g = g + sigma * w\n print('Gradient norm:'+str(scipy.linalg.norm(g)))\n sys.stdout.flush()\n if perceptron and perceptron._model_pickle:\n if perceptron._gradient_iter % 5 == 0:\n perceptron._weights = numpy.reshape(w,(1,perceptron._num_features))\n perceptron.save(perceptron._model_pickle+'_'+str(perceptron._gradient_iter))\n return g", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n CustomBatchNormalization(),\n LeakyReLU(alpha=0.1))", "def neg_sampling_loss_and_gradient(\n center_word_vec,\n outside_word_idx,\n outside_vectors,\n dataset,\n K=10\n):\n\n # Negative sampling of words is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n neg_sample_word_indices = get_negative_samples(outside_word_idx, dataset, K)\n indices = [outside_word_idx] + neg_sample_word_indices\n\n ### YOUR CODE HERE\n \n outside_word_vector = outside_vectors[outside_word_idx]\n outside_words_dot_center_word = outside_word_vector.dot(center_word_vec)\n \n neg_samples_vector = outside_vectors[neg_sample_word_indices]\n neg_samples_dot_center_word = neg_samples_vector.dot(center_word_vec)\n \n sigmoid_outside_dot = sigmoid(outside_words_dot_center_word)\n sigmoid_negative_dot = sigmoid(-neg_samples_dot_center_word)\n\n loss = -np.log(sigmoid_outside_dot) -np.sum(np.log(sigmoid_negative_dot))\n \n grad_center_vec = \\\n (sigmoid_outside_dot - 1) * outside_word_vector + \\\n np.sum((1 - sigmoid_negative_dot)[:, np.newaxis] * neg_samples_vector, axis = 0)\n \n grad_outside_vecs = np.zeros_like(outside_vectors)\n grad_outside_vecs[outside_word_idx] = (sigmoid_outside_dot - 1) * center_word_vec\n \n for i, neg_index in enumerate(neg_sample_word_indices):\n grad_outside_vecs[neg_index] += \\\n (1 - sigmoid_negative_dot[i]) * center_word_vec\n\n ### END YOUR CODE\n\n return loss, grad_center_vec, grad_outside_vecs", "def l2_normalization(inputs, scaling=True):\n with tf.variable_scope('L2Normalization'):\n inputs_shape = inputs.get_shape()\n channel_shape = inputs_shape[-1:]\n # cal l2_norm on channel\n outputs = tf.nn.l2_normalize(inputs, 3, epsilon=1e-12)\n # scalling\n if scaling:\n # scale.shape == channel.shape\n scale = slim.variable('gamma', channel_shape, tf.float32, tf.constant_initializer(1.0))\n outputs = tf.multiply(outputs, scale)\n\n return outputs", "def train_LR(self, X, y, eta=1e-3, batch_size=1, num_iters=1000) :\n loss_history = []\n N,d = X.shape\n for t in np.arange(num_iters):\n X_batch = None\n y_batch = None\n # ================================================================ #\n # YOUR CODE HERE:\n # Sample batch_size elements from the training data for use in gradient descent. \n # After sampling, X_batch should have shape: (batch_size,1), y_batch should have shape: (batch_size,)\n # The indices should be randomly generated to reduce correlations in the dataset. \n # Use np.random.choice. It is better to user WITHOUT replacement.\n # ================================================================ #\n \n # sample indices without replacement\n batch_idx = np.random.choice(N, batch_size, replace = False)\n X_batch = X[batch_idx]\n y_batch = y[batch_idx]\n \n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss = 0.0\n grad = np.zeros_like(self.w)\n # ================================================================ #\n # YOUR CODE HERE: \n # evaluate loss and gradient for batch data\n # save loss as loss and gradient as grad\n # update the weights self.w\n # ================================================================ #\n \n # compute the loss and gradient\n # loss_and_grad will take responsible for these\n \n loss, grad = self.loss_and_grad(X_batch, y_batch)\n \n self.w = self.w - eta * grad\n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss_history.append(loss)\n return loss_history, self.w", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n gamma, x_hat, num, denom, eps, sample_variance = cache\n N, D = dout.shape\n \n dbeta = np.sum(dout, axis=0)\n dyx_hat = dout\n dgamma = np.sum(dyx_hat*x_hat, axis=0)\n dx_hat = gamma*dyx_hat\n ddenom = np.sum(num*dx_hat, axis=0)\n dmu1 = (1/denom)*dx_hat\n dsqvar = ddenom*(-1)*(1/(denom**2))\n dvar = 0.5*((sample_variance+eps)**(-0.5))*dsqvar\n dsq = (1/N)*np.ones((N,D))*dvar\n dmu2 = 2*num*dsq\n dmu = (-1)*np.sum(dmu1+dmu2, axis=0)\n dx1 = dmu1 + dmu2\n dx2 = (1/N)*np.ones((N,D))*dmu\n dx = dx1+dx2\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def test_keras_unsafe_l2_norm():\n model, X, y, Xval, yval = make_small_model()\n\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model.compile(loss=loss, optimizer=None)\n\n isDP, msg = safekeras.check_optimizer_is_DP(model.optimizer)\n assert isDP, \"failed check that optimizer is dP\"\n\n model.l2_norm_clip = 0.9\n\n model.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n\n DPused, msg = safekeras.check_DP_used(model.optimizer)\n assert (\n DPused\n ), \"Failed check that DP version of optimiser was actually used in training\"\n\n loss, acc = model.evaluate(X, y)\n expected_accuracy = UNSAFE_ACC\n assert round(acc, 6) == round(\n expected_accuracy, 6\n ), \"failed check that accuracy is as expected\"\n\n msg, disclosive = model.preliminary_check()\n correct_msg = (\n \"WARNING: model parameters may present a disclosure risk:\"\n \"\\n- parameter l2_norm_clip = 0.9 identified as less than the recommended \"\n \"min value of 1.0.\"\n )\n assert msg == correct_msg, \"failed check correct warning message\"\n assert disclosive is True, \"failed check disclosive is True\"", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def darknet_CBL(*args, **kwargs):\n\n no_bias_kwargs = {'use_bias': False} # 没懂为啥用 no_bias\n no_bias_kwargs.update(kwargs)\n return compose(\n darknet_Conv2D(*args, **no_bias_kwargs),\n custom_batchnormalization(),\n LeakyReLU(alpha=0.1)\n )", "def reset_ref_batch(self, batch):\n with torch.no_grad():\n self.labels = batch[1]\n self.batch = batch[0]\n _, self.r_act_2, _ = self.inference_net(self.batch.cuda(self.gpu_id))\n\n self.mu2_c0, self.sigma2_c0 = calc_stats(self.r_act_2[self.labels.view(-1) == 0])\n self.mu2_c1, self.sigma2_c1 = calc_stats(self.r_act_2[self.labels.view(-1) == 1])", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def l2_norm(input_x, epsilon=1e-12):\n input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)\n return input_x_norm", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n x, mu, sigma, gamma, beta = cache\n N = dout.shape[0]\n X_mu = x - mu\n var_inv = 1./sigma\n \n dX_norm = dout * gamma\n dvar = np.sum(dX_norm * X_mu,axis=0) * -0.5 * sigma**(-3)\n dmu = np.sum(dX_norm * -var_inv ,axis=0) + dvar * 1/N * np.sum(-2.* X_mu, axis=0)\n\n dx = (dX_norm * var_inv) + (dmu / N) + (dvar * 2/N * X_mu)\n dbeta = np.sum(dout, axis=0)\n dgamma = np.sum(dout * X_mu/sigma, axis=0)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.):\n sigma2 = sigma * sigma\n\n inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))\n\n smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)\n smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)\n smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)\n smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),\n tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))\n\n outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)\n\n return outside_mul", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def relaxed_ba_bias(Xinput, L, lamb, beta, max_iter=300):\n X = Xinput.T # X: n_samples x n_dim\n D, m = X.shape\n B = np.sign(np.random.rand(L, m))\n c1 = np.random.rand(L,1)\n c2 = np.random.rand(D,1)\n\n for i in range(max_iter):\n # given B, compute W1\n W1 = lamb*np.matmul(np.matmul((B - c1), X.T), \\\n np.linalg.inv(lamb*np.matmul(X,X.T) + beta*np.eye(D)))\n\n # given B, compute W2\n W2 = np.matmul( np.matmul((X-c2), B.T), \\\n np.linalg.inv(np.matmul(B,B.T) + beta*np.eye(L)))\n\n # compute c1\n c1 = (1.0/m)*np.matmul(B - np.matmul(W1, X), np.ones((m,1)))\n # compute c2\n c2 = (1.0/m)*np.matmul(X - np.matmul(W2, B), np.ones((m,1)))\n\n # given W1, W2, c1, c2, compute B\n Xtmp = X - c2\n H = np.matmul(W1, X) + c1\n B = learn_B_new(Xtmp.T, W2.T, B.T, H.T, lamb);\n\n B = B.T\n\n # X_reconst = np.matmul(W2, np.sign(np.matmul(W1, X) + c1)) + c2\n # mse = np.mean(np.square(X_reconst - X))\n # print('mse {}'.format(mse))\n return W2, W1, c2, c1, B", "def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n padding_type='reflect'):\n assert (n_blocks >= 0)\n super(DenseGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n dense_features = ngf * mult\n dense_features = dense_features + 6 * 32\n for i in range(n_blocks):\n model += [DenseBlock(num_layers=6, num_input_features=ngf * mult, bn_size=4, growth_rate=32, drop_rate=0,\n norm_layer=norm_layer)]\n model += [norm_layer(dense_features), nn.ReLU(inplace=True),\n nn.Conv2d(dense_features, ngf * mult, kernel_size=1, stride=1, bias=use_bias),\n ]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*model)", "def build_nn_experimental(dropout: float=0.3, verbosity: int=0):\n # Setting Up Input layer\n input_q1 = Input(shape=(512,))\n input_q2 = Input(shape=(512,))\n \n # Network for 1st input Dense 128 --> Relu --> Dense 264 --> Relu\n input1_layer = Dense(512, activation='relu')(input_q1)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Dense(512, activation='relu')(input1_layer)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Model(inputs=input_q1, outputs=input1_layer)\n \n # Network for 2st input Dense 128 --> Relu --> Dense 264 --> Relu\n input2_layer = Dense(512, activation='relu')(input_q2)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Dense(512, activation='relu')(input2_layer)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Model(inputs=input_q2, outputs=input2_layer)\n \n merged = concatenate([input1_layer.output, input2_layer.output])\n\n # Fully connected layer & final prediction layer\n pred_layer = Dense(4096, activation='relu')(merged)\n pred_layer = Dense(1024, activation='relu')(pred_layer)\n pred_layer = Dense(256, activation='relu')(pred_layer)\n pred_layer = Dense(64, activation='relu')(pred_layer)\n pred_layer = Dropout(dropout)(pred_layer)\n \n pred_layer = Dense(1, activation='sigmoid')(pred_layer)\n \n model = Model(inputs=[input1_layer.input, input2_layer.input], outputs=pred_layer)\n if verbosity > 0:\n model.summary()\n return model", "def batch_norm(x, training, name):\n with tf.variable_scope(name):\n x = tf.cond(training, lambda: tf.contrib.layers.batch_norm(x, is_training=True, scope=name+'_batch_norm'),\n lambda: tf.contrib.layers.batch_norm(x, is_training=False, scope=name+'_batch_norm', reuse=True))\n return x", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def derive_sample_params(self, global_state):\n return global_state.l2_norm_clip", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def __init__(self, rng, input, layer_sizes, dropout_rates,\r\n activations=None, use_bias=True, prob_constraint_on=True):\r\n # Set up all the hidden layers\r\n weight_matrix_sizes = list(zip(layer_sizes, layer_sizes[1:]))\r\n # we build two parallel layers\r\n # - training_layers for training with/without dropout\r\n # - testing_layers for testing the performance\r\n self.training_layers = []\r\n self.testing_layers = []\r\n \r\n # dropout the input\r\n next_training_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])\r\n next_testing_layer_input = input\r\n \r\n layer_counter = 0\r\n for n_in, n_out in weight_matrix_sizes[:-1]:\r\n \r\n # setup the training layer\r\n next_training_layer = DropoutHiddenLayer(rng=rng,\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n dropout_rate=dropout_rates[layer_counter])\r\n self.training_layers.append(next_training_layer)\r\n next_training_layer_input = next_training_layer.output\r\n\r\n # setup the testing layer\r\n # Reuse the paramters from the dropout layer here, in a different\r\n # path through the graph.\r\n next_testing_layer = HiddenLayer(rng=rng,\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=next_training_layer.W * (1 - dropout_rates[layer_counter]),\r\n b=next_training_layer.b)\r\n self.testing_layers.append(next_testing_layer)\r\n next_testing_layer_input = next_testing_layer.output\r\n \r\n layer_counter += 1\r\n \r\n # Set up the output layer for training layers\r\n n_in, n_out = weight_matrix_sizes[-1]\r\n training_output_layer = LogisticRegression(\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n prob_constraint_on=prob_constraint_on)\r\n self.training_layers.append(training_output_layer)\r\n\r\n # Set up the output layer for testing layers\r\n # Again, reuse paramters in the dropout output.\r\n testing_output_layer = LogisticRegression(\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=training_output_layer.W * (1 - dropout_rates[-1]),\r\n b=training_output_layer.b,\r\n prob_constraint_on=prob_constraint_on)\r\n self.testing_layers.append(testing_output_layer)\r\n\r\n # Use the MSE of the logistic regression layer as the objective\r\n # In training phase, we use the MSE of the logistic regression layer\r\n # which is on top of the dropout_layers\r\n self.training_MSE = self.training_layers[-1].MSE\r\n # In validation/testing phase, we use the MSE of the logistic regression layer\r\n # which is on top of the normal_layers\r\n self.testing_MSE = self.testing_layers[-1].MSE\r\n \r\n # NOTE: for prediction, we use all the weights, thus we should use\r\n # the normal layers instead of the dropout layers\r\n self.y_pred = self.testing_layers[-1].y_pred\r\n \r\n # Grab all the parameters together.\r\n self.params = [ param for layer in self.training_layers for param in layer.params ]\r\n # The above is Double Iteration in List Comprehension\r\n # See the discussion in\r\n # http://stackoverflow.com/questions/17657720/python-list-comprehension-double-for\r\n # In regular for-loop format, we have\r\n # for layer in self.dropout_layers:\r\n # for param in layer.params:\r\n # put param in the resulting list\r", "def train(args,train_loader, model, criterion, optimizer, epoch, pruner, writer):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n \n\n model.train()\n total =0 \n correct = 0\n reg_loss = 0.0\n train_loss = 0.0\n end = time.time()\n\n for i, (inputs, target) in enumerate(train_loader):\n\n target = target.cuda()\n inputs = inputs.cuda()\n \n inputs, targets_a, targets_b, lam = mixup_data(inputs, target, args.alpha, True)\n inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))\n\n ##input_var = torch.autograd.Variable(input)\n ##target_var = torch.autograd.Variable(target)\n\n\n outputs = model(inputs)\n ##outputs, Qs, Ys = model(inputs)\n ##loss = criterion(output, target_var)\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n## print(\"loss:\")\n## print(loss)\n## print(loss.item())\n## train_loss += loss.data[0]\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += target.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n\n## prec1 = accuracy(output.data, target, topk=(1,))[0]\n## losses.update(loss.data.item(), input.size(0))\n## top1.update(prec1.item(), input.size(0))\n\n optimizer.zero_grad()\n\n\n\n## for y in Ys:\n## y.retain_grad()\n\n\n\n loss.backward()\n\n\n optimizer.step()\n\n\n\n\n if pruner is not None:\n pruner.prune(update_state=False)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n\n if 0:\n kwalt = epoch*len(train_loader)+i\n if writer is not None:\n for j,q in enumerate(Qs):\n writer.add_scalar(\"variances %d\" % j, q.cpu().numpy(), kwalt)\n\n for l,y in enumerate(Ys):\n if y.grad is not None:\n writer.add_scalar(\"grad %d\" % (l-j), getQ(y.grad).cpu().numpy(), kwalt)\n\n## writer.add_scalars(\"variancess\", { \"%d\"% j : q.cpu().numpy() for j,q in enumerate(Qs)}, i)\n\n\n\n if 0:\n if i % args.print_freq == 0:\n print(\n f\"Epoch: [{epoch}][{i}/{len(train_loader)}]\\t\"\n f\"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t\"\n f\"Loss {losses.val:.4f} ({losses.avg:.4f})\\t\"\n f\"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\"\n )\n niter = epoch*len(train_loader)+i\n\n batch_idx = i\n if writer is not None:\n writer.add_scalar('Train/Loss', train_loss/batch_idx, epoch)\n writer.add_scalar('Train/Prec@1', 100.*correct/total, epoch)", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def __init__(self, X_train, y_train, input_shape, filters, kernel_size,\n maxpool, loss_function='categorical_crossentropy', nb_classes= 2, droput_iteration=20, dropout = 0.05):\n\n # We normalize the training data to have zero mean and unit standard\n # deviation in the training set if necessary\n\n # if normalize:\n # self.std_X_train = np.std(X_train, 0)\n # self.std_X_train[ self.std_X_train == 0 ] = 1\n # self.mean_X_train = np.mean(X_train, 0)\n # else:\n # self.std_X_train = np.ones(X_train.shape[ 1 ])\n # self.mean_X_train = np.zeros(X_train.shape[ 1 ])\n\n self.droput_iteration = droput_iteration\n self.nb_classes = nb_classes\n self.mean_y_train = np.mean(y_train)\n self.std_y_train = np.std(y_train)\n\n\n\n # model = Sequential()\n # model.add(Conv2D(filters, (kernel_size, kernel_size), padding='same',\n # input_shape=input_shape))\n # model.add(Activation('relu'))\n # model.add(Conv2D(filters, (kernel_size, kernel_size)))\n # model.add(Activation('relu'))\n # model.add(MaxPooling2D(pool_size=(maxpool, maxpool)))\n # model.add(Dropout(dropout))\n # c = 3.5\n # Weight_Decay = c / float(X_train.shape[0])\n # model.add(Flatten())\n # model.add(Dense(128, W_regularizer=l2(Weight_Decay)))\n # model.add(Activation('relu'))\n # model.add(Dropout(dropout))\n # model.add(Dense(nb_classes))\n # model.add(Activation('softmax'))\n\n # model.compile(loss=loss_function, optimizer='adam')\n\n c = 3.5\n Weight_Decay = c / float(X_train.shape[0])\n\n model = Sequential()\n model.add(Dense(256, input_shape =input_shape))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Dense(256, W_regularizer=l2(Weight_Decay)))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Flatten())\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n\n model.compile(loss=loss_function, optimizer='adam')\n\n\n self.model = model\n # # We iterate the learning process\n # model.fit(X_train, y_train, batch_size=self.batch_size, nb_epoch=n_epochs, verbose=0)\n\n # #function for bayesian inference using dropouts\n # self.f = K.function([model.layers[0].input, K.learning_phase()],\n # [model.layers[-1].output])", "def xlarge_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n #model.add(Dense(18, input_dim=12, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)", "def build_nn(dropout: float=0.3,verbosity: int=0):\n model = Sequential()\n model.add(Dense(1024, input_shape=(1024,), activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1, activation='sigmoid'))\n \n if verbosity > 0:\n model.summary()\n return model", "def remove_tracking(model, norm_type, norm_power=0.2):\n normlayer = select_norm(norm_type, norm_power=norm_power)\n # find total number of childern\n model_len = 0\n for n, child in enumerate(model.children()):\n model_len = n\n\n # for layer 0 which is outside\n conv_shape = model.conv1.out_channels\n w = model.bn1.weight\n b = model.bn1.bias\n model.bn1 = normlayer(conv_shape)\n model.bn1.weight = w\n model.bn1.bias = b\n\n # replace in all other layers\n for n, child in enumerate(model.children()):\n if 4 <= n <= model_len - 2:\n for i in range(len(child)):\n conv_shape = child[i].conv1.out_channels\n w = child[i].bn1.weight\n b = child[i].bn1.bias\n child[i].bn1 = normlayer(conv_shape)\n child[i].bn1.weight = w\n child[i].bn1.bias = b\n\n conv_shape = child[i].conv2.out_channels\n w = child[i].bn2.weight\n b = child[i].bn2.bias\n child[i].bn2 = normlayer(conv_shape)\n child[i].bn2.weight = w\n child[i].bn2.bias = b\n # if model have bn3 as well\n try:\n conv_shape = child[i].conv3.out_channels\n w = child[i].bn3.weight\n b = child[i].bn3.bias\n child[i].bn3 = normlayer(conv_shape)\n child[i].bn3.weight = w\n child[i].bn3.bias = b\n except:\n pass\n try:\n conv_shape = child[i].downsample[0].out_channels\n w = child[i].downsample[1].weight\n b = child[i].downsample[1].bias\n child[i].downsample[1] = normlayer(conv_shape)\n child[i].downsample[1].weight = w\n child[i].downsample[1].bias = b\n print(\"downsample\")\n except:\n print(\"no downsample\")\n\n return model", "def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n s_cache,shape_x = cache\n reshaped_dout = np.reshape(dout,(-1,dout.shape[1]))\n dx_reshaped,dgamma,dbeta = batchnorm_backward_alt(reshaped_dout,s_cache)\n dx = np.reshape(dx_reshaped,shape_x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=True):\n super(NLayerTFDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if(no_antialias):\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if(no_antialias):\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)]\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.encoder = nn.Sequential(*sequence)\n dim = ndf * nf_mult\n self.transformer_enc = transformer.TransformerDecoders(dim, nhead=4, num_encoder_layers=4, dim_feedforward=dim*2, dropout=0.0)\n\n self.query_embed = nn.Embedding(1, dim)\n self.classifier = nn.Sequential(\n nn.Linear(dim, dim//2),\n nn.LayerNorm(dim//2),\n nn.ReLU(),\n nn.Linear(dim//2, dim//4),\n nn.LayerNorm(dim//4),\n nn.ReLU(),\n nn.Linear(dim//4, 1),\n nn.Sigmoid()\n )", "def linf1(parameter, bias=None, reg=0.01, lr=0.1):\n\n Norm = reg*lr\n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n sorted_w_and_b, indices = torch.sort(torch.abs(w_and_b), descending=True)\n\n # CUDA or CPU\n devicetype=\"cuda\"\n if w_and_b.is_cuda:\n devicetype=\"cuda\"\n else:\n devicetype=\"cpu\"\n\n\n #SLOW\n rows, cols = sorted_w_and_b.size()\n\n sorted_z = torch.cat((sorted_w_and_b, torch.zeros(rows,1, device=torch.device(devicetype))),1)\n subtracted = torch.clamp(sorted_w_and_b - sorted_z[:,1:],max=Norm) #Max=Norm important\n\n scale_indices = torch.cumsum(torch.ones(rows,cols, device=torch.device(devicetype)),1)\n scaled_subtracted = subtracted * scale_indices\n max_mass = torch.cumsum(scaled_subtracted,1)\n nonzero = torch.clamp(-1*(max_mass - Norm),0)\n\n oneN = 1.0/scale_indices\n\n # Algorithm described in paper, but these are all efficient GPU operation steps)\n # First we subtract every value from the cell next to it\n nonzero_ones = torch.clamp(nonzero * 1000000, max=1) #Hacky, but efficient\n shifted_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,:(cols-1)]),1)\n over_one = -1*(nonzero_ones - shifted_ones)\n last_one = torch.cat((over_one,torch.zeros(rows,1, device=torch.device(devicetype))),1)[:,1:]\n max_remain = last_one * nonzero\n shift_max = torch.cat((torch.zeros(rows,1, device=torch.device(devicetype)),max_remain[:,:(cols-1)]),1)\n first_col_nonzero_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,1:]),1) #Edge case for only first column\n tosub = first_col_nonzero_ones * subtracted + shift_max * oneN\n\n # We flip the tensor so that we can get a cumulative sum for the value to subtract, then flip back\n nastyflipS = torch.flip(torch.flip(tosub,[0,1]),[0])\n aggsubS = torch.cumsum(nastyflipS,1)\n nastyflipagainS = torch.flip(torch.flip(aggsubS,[0,1]),[0])\n\n # The proximal gradient step is equal to subtracting the sorted cumulative sum\n updated_weights = sorted_w_and_b - nastyflipagainS\n unsorted = torch.zeros(rows,cols, device=torch.device(devicetype)).scatter_(1,indices,updated_weights)\n final_w_and_b = torch.sign(w_and_b) * unsorted\n\n # Actually update parameters and bias\n if bias is not None:\n update = final_w_and_b[:,:cols-1]\n parameter.data = update\n update_b = final_w_and_b[:,-1]\n bias.data = update_b\n else:\n parameter.data = final_w_and_b", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n super(LightweightConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)", "def batch_norm_conv(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will translate\r\n # into a HiddenLayer with a tanh activation function connected to the\r\n # LogisticRegression layer; the activation function can be replaced by\r\n # sigmoid or any other nonlinear function\r\n self.hiddenLayer = HiddenLayer(rng=rng, input=input,\r\n n_in=n_in, n_out=n_hidden,\r\n activation=T.tanh)\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input=self.hiddenLayer.output,\r\n n_in=n_hidden,\r\n n_out=n_out)\r\n\r\n # L1 norm ; one regularization option is to enforce L1 norm to\r\n # be small\r\n self.L1 = abs(self.hiddenLayer.W).sum() \\\r\n + abs(self.logRegressionLayer.W).sum()\r\n\r\n # square of L2 norm ; one regularization option is to enforce\r\n # square of L2 norm to be small\r\n self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \\\r\n + (self.logRegressionLayer.W ** 2).sum()\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n # same holds for the function computing the number of errors\r\n self.errors = self.logRegressionLayer.errors\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def __init__(self, mode, dim, epsilon=1e-12, name='l2Normalize'):\n super(L2Normalization, self).__init__(mode, name)\n self.dim = dim\n self.epsilon = epsilon", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))", "def TCN_V1(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def l2_normalization(\n inputs,\n name,\n scaling=False,\n scale_initializer=init_ops.ones_initializer(),\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n data_format='NHWC',\n trainable=True,\n scope=None):\n\n with variable_scope.variable_scope(\n scope, 'L2Normalization_'+name, [inputs], reuse=reuse) as sc:\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n dtype = inputs.dtype.base_dtype\n if data_format == 'NHWC':\n # norm_dim = tf.range(1, inputs_rank-1)\n norm_dim = tf.range(inputs_rank-1, inputs_rank)\n params_shape = inputs_shape[-1:]\n elif data_format == 'NCHW':\n # norm_dim = tf.range(2, inputs_rank)\n norm_dim = tf.range(1, 2)\n params_shape = (inputs_shape[1])\n\n # Normalize along spatial dimensions.\n outputs = nn.l2_normalize(inputs, norm_dim, epsilon=1e-12)\n # Additional scaling.\n if scaling:\n scale_collections = utils.get_variable_collections(\n variables_collections, 'scale')\n scale = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=scale_initializer,\n collections=scale_collections,\n trainable=trainable)\n if data_format == 'NHWC':\n outputs = tf.multiply(outputs, scale)\n elif data_format == 'NCHW':\n scale = tf.expand_dims(scale, axis=-1)\n scale = tf.expand_dims(scale, axis=-1)\n outputs = tf.multiply(outputs, scale)\n # outputs = tf.transpose(outputs, perm=(0, 2, 3, 1))\n\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache", "def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line.#\n ###########################################################################\n N = dout.shape[0]\n x_norm,inv_var,gamma = cache\n dgamma = np.sum(dout * x_norm,axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Simplified calculation of dx.\n dx_normalized = dout * gamma\n dx = (1 / N) * inv_var * (N * dx_normalized - np.sum(dx_normalized,axis = 0) \\\n - x_norm * np.sum(dx_normalized * x_norm,axis = 0)) \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def BatchNorm(X): # (X - mu) / sigma -> Have to implement trainable parameters gamma and beta on this\n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n bn = (X - torch.mean(X)) / (torch.std(X)+epsilon)\n sigma.append(torch.std(X)+epsilon)\n return bn", "def compute_loss(model, loader, loss_fn, optimizer=None):\n\n total_loss = 0.0\n count_batches = 0\n for x_, y_, qm_, db_mask, blink_mask in loader:\n batch_loss_list = []\n xbatch_list = []\n for mask in [db_mask, blink_mask]:\n idxes = get_idxes_from_mask(mask)\n x_pick, y_pick, qm_pick = x_[idxes], y_[idxes], qm_[idxes]\n y_pos_idxes = torch.nonzero(y_pick.squeeze(), as_tuple=False).reshape(1, -1)[0]\n y_neg_idxes = torch.nonzero(~y_pick.squeeze().bool(), as_tuple=False).reshape(1, -1)[0]\n\n if (len(y_pos_idxes) == 0) or (len(y_neg_idxes) == 0):\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(x_pick) <= 1:\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(y_pos_idxes) == 1:\n y_pos_idx = y_pos_idxes[0]\n else: # len(y_pos_idxes) > 1:\n # TODO: I am just always using the first positive example for now\n # rand_idx = random.choice(list(range(len(y_pos_idxes))))\n # print(y_pos_idxes)\n rand_idx = 0\n y_pos_idx = y_pos_idxes[rand_idx]\n\n batch_length = 1 + len(y_neg_idxes)\n batch_feature_len = x_.shape[1]\n x_batch = torch.zeros(batch_length, batch_feature_len)\n x_batch[:-1:, :] = x_pick[y_neg_idxes]\n x_batch[-1, :] = x_pick[y_pos_idx] # put positive to the end\n xbatch_list.append(x_batch)\n # print(y_pos_idx, len(y_neg_idxes))\n # print(\"batch\", x_batch.shape)\n\n if (len(xbatch_list[0]) == 0) and (len(xbatch_list[1]) == 0):\n # skip if both batches are []\n # print(\"hitting cases without any examples [SHOULD BE WRONG]\")\n continue\n elif (len(xbatch_list[0]) == 0) or (len(xbatch_list[1]) == 0):\n # continue # TODO: testing whether improvements made if we only use cases where there are sources from both\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 1\n yhat_neg = yhat[:-1]\n yhat_pos = yhat[-1].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones((len(yhat) - 1), 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n else:\n # get yhats for both BLINK and DB batches\n # print(len(xbatch_list[0]), len(xbatch_list[1]))\n # print((xbatch_list[0], xbatch_list[1]))\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 2\n yhat_neg = torch.zeros(extended_batch_length, 1)\n yhat_neg[:len(xbatch_list[0])-1] = yhat[:len(xbatch_list[0])-1]\n yhat_neg[len(xbatch_list[0])-1:] = yhat[len(xbatch_list[0]):-1]\n for idx in [len(xbatch_list[0]), -1]:\n yhat_pos = yhat[idx].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones(extended_batch_length, 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n\n # update every question-mention\n if batch_loss_list and optimizer:\n (sum(batch_loss_list)/len(batch_loss_list)).backward()\n optimizer.step()\n\n avg_loss = total_loss / count_batches\n\n return avg_loss, batch_length", "def train_net(epoch, data, net, opti, batch_graph):\n global num_batches, batch_size\n # train the network\n for num in range(epoch):\n # run each batch through each round\n for batch_id in range(num_batches):\n # calculate the neighborhood for the graph\n batch = torch.from_numpy(data[batch_id]).float()\n batch = batch.view(batch_size, -1)\n batch_distances = pairwise_distances(batch)\n nbr_graph_tensor = torch.from_numpy(batch_graph[batch_id]).float()\n batch_distances_masked = batch_distances * nbr_graph_tensor.float()\n global lbda\n out = net(batch, False) # run the batch through the network\n svd_loss, out = implement_svd(out) # calculate the SVD L2,1 loss and SVD representation\n output_distances = pairwise_distances(out)\n # Multiply the distances between each pair of points with the neighbor mask\n output_distances_masked = output_distances * nbr_graph_tensor.float()\n # Find the difference between |img_i - img_j|^2 and |output_i - output_j|^2\n nbr_diff = torch.abs((output_distances_masked - batch_distances_masked))\n nbr_distance = nbr_diff.norm()\n svd_loss *= lbda_svd # multiply SVD loss by its scaling factor\n # find variance in all directions\n var = 0\n for i in range(out.size()[0]):\n var += lbda_var / out[i].var()\n loss = nbr_distance + svd_loss + var # loss contains all three terms\n opti.zero_grad()\n loss.backward()\n opti.step()\n print('Epoch: %f, Step: %f, Loss: %.2f' % (num, batch_id + 1, loss.data.cpu().numpy()))\n\n # find the ideal number of dimensions\n global final_dim\n batch = torch.from_numpy(data[0]).float()\n batch = batch.view(batch_size, -1)\n out = net(batch, False)\n u, s, v = torch.svd(out)\n final_dim = calc_dim(s)", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic):\n self.binary=binary\n self.stochastic=stochastic\n \n # Since we are dealing with a one hidden layer MLP, this will translate\n # into a HiddenLayer with a tanh activation function connected to the\n # LogisticRegression layer; the activation function can be replaced by\n # sigmoid or any other nonlinear function.\n self.hiddenLayers = []\n self.normLayers=[]\n for i in xrange(n_hiddenLayers):\n h_input = input if i == 0 else self.hiddenLayers[i-1].output\n h_in = n_in if i == 0 else n_hidden\n\n # if binary==True, we append a binary hiddenlayer\n if binary==True:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=True,\n stochastic=stochastic\n ))\n self.normLayers.append(\n BatchNormLayer(\n input=self.hiddenLayers[i].output,\n n_in=n_hidden,\n n_out=n_hidden\n ))\n else:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=False,\n stochastic=False\n ))\n\n # The logistic regression layer gets as input the hidden units\n # of the hidden layer\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayers[-1].output,\n n_in=n_hidden,\n n_out=n_out,\n binary=binary,\n stochastic=stochastic\n )\n \n # same holds for the function computing the number of errors\n self.errors = self.logRegressionLayer.errors\n\n # the parameters of the model are the parameters of the two layer it is\n # made out of\n self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params\n self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt\n self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws\n # keep track of model input\n self.input = input", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)" ]
[ "0.61973625", "0.605561", "0.59693855", "0.5950774", "0.58955586", "0.58955586", "0.5811553", "0.57387733", "0.5654752", "0.5646109", "0.5632108", "0.56242067", "0.5616107", "0.56091213", "0.5604995", "0.559818", "0.5580515", "0.55549634", "0.5554276", "0.5544025", "0.554147", "0.55350965", "0.55175644", "0.5482476", "0.5481916", "0.5471834", "0.5470667", "0.54523844", "0.54456246", "0.54452926", "0.5444863", "0.5441718", "0.54360014", "0.5434888", "0.5431906", "0.54282373", "0.54149354", "0.54087156", "0.5404278", "0.540064", "0.53953886", "0.53868693", "0.53840464", "0.5383069", "0.537987", "0.5378028", "0.5374169", "0.53707546", "0.5370373", "0.53696245", "0.5369289", "0.5365113", "0.5364293", "0.5361659", "0.53589344", "0.5356375", "0.535055", "0.5346654", "0.53436905", "0.5343398", "0.53420496", "0.5340915", "0.5329893", "0.53238344", "0.5321677", "0.5313274", "0.5311683", "0.53102624", "0.53097326", "0.53093064", "0.53061944", "0.5303811", "0.52970344", "0.5289948", "0.5287547", "0.52860415", "0.52819157", "0.52780706", "0.5277735", "0.5272196", "0.52712256", "0.52666557", "0.5266454", "0.52643776", "0.52619535", "0.52597666", "0.5259278", "0.5258576", "0.52565193", "0.5254235", "0.5254233", "0.5254014", "0.52517366", "0.5246567", "0.52464074", "0.52433485", "0.5242092", "0.52382356", "0.5238028", "0.52367747" ]
0.6014573
2
dropout + batch norm + l1_l2
def architecture_CONV_FC_batch_norm_dropout_L1_l2_SIGMOID( X, nbclasses, nb_conv=1, nb_fc=1 ): # input size width, height, depth = X.shape input_shape = (height, depth) # parameters of the architecture l1_l2_rate = 1.0e-3 dropout_rate = 0.5 conv_kernel = 3 conv_filters = 64 nbunits_fc = 128 activation = sigmoid model = Sequential( name=f"""{str(nb_conv)}_CONV_k_ {str(conv_kernel)}_{str(nb_fc)} _FC128_bn_d_{str(dropout_rate)} _SIGMOID""" ) model.add( Conv1D( input_shape=input_shape, activation=activation, kernel_regularizer=l1_l2(l1_l2_rate), kernel_size=conv_kernel, filters=conv_filters, ) ) model.add(BatchNormalization()) # if more covolutional layers are defined in parameters if nb_conv > 1: for _layer in range(nb_conv): model.add( Conv1D( kernel_size=conv_kernel, filters=conv_filters, activation=activation, kernel_regularizer=l1_l2(l1_l2_rate), ) ) model.add(BatchNormalization()) # Flatten + FC layers model.add(Flatten()) for _layer in range(nb_fc): model.add( Dense( nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation ) ) model.add(Dropout(dropout_rate)) model.add(Dense(nbclasses, activation=softmax)) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def architecture_CONV_FC_batch_norm_dropout_L1_l2(\n X, nbclasses, nb_conv=1, nb_fc=1, kernel_initializer=\"random_normal\"\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = relu\n kernel_initializer = kernel_initializer\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}__CONV_k{str(conv_kernel)}_\n {str(nb_fc)}_initializer_{kernel_initializer}_\n _FC128_bn_d_{str(dropout_rate)}\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_LEAKY_ReLU(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_CONV_k_{str(conv_kernel)}_\n {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)}\n _LEAKY_ReLU\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(Dense(nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate)))\n model.add(LeakyReLU())\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_TANH(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = tanh\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_\n CONV_k_{str(conv_kernel)}_\n {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)}\n _TANH\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n \n dims = [input_dim] + hidden_dims + [num_classes]\n\n # initialise all parameters (weight, bias, gamma, beta)\n for i in range(len(dims)-1):\n w = 'W' + str(i+1)\n b = 'b' + str(i+1)\n self.params[w] = np.random.randn(dims[i], dims[i+1])*weight_scale\n self.params[b] = np.zeros(dims[i+1])\n \n if self.use_batchnorm:\n for i in range(len(dims)-2):\n #no gamma and beta for last layer\n gamma = 'gamma' + str(i+1)\n beta = 'beta' + str(i+1)\n self.params[gamma] = np.ones(dims[i+1])\n self.params[beta] = np.zeros(dims[i+1])\n \n \n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def optimize(self):\n self.u = np.random.uniform(-1, 1, (self.batchsize, 288, 1, 1))\n self.l2 = torch.from_numpy(self.u).float()\n self.n = torch.randn(self.batchsize, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n print(self.l1.shape,99999999999999999999999999999999999)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def __init__(\n self,\n hidden_dims,\n input_dim=3 * 32 * 32,\n num_classes=10,\n dropout=1,\n normalization=None,\n reg=0.0,\n weight_scale=1e-2,\n dtype=np.float32,\n seed=None,\n ):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n Din, Dout = input_dim, hidden_dims[0]\n for i in range(self.num_layers):\n self.params['W' + str(i+1)] = np.random.normal(scale=weight_scale, size=(Din, Dout))\n self.params['b' + str(i+1)] = np.zeros((Dout,))\n Din = Dout\n if i < len(hidden_dims) - 1:\n Dout = hidden_dims[i+1]\n if i == len(hidden_dims) - 1:\n Dout = num_classes\n \n # BN params initialization\n if self.normalization != None:\n for i in range(self.num_layers - 1):\n self.params['gamma' + str(i+1)] = np.ones(shape=(hidden_dims[i]))\n self.params['beta' + str(i+1)] = np.zeros(shape=(hidden_dims[i]))\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {\"mode\": \"train\", \"p\": dropout}\n if seed is not None:\n self.dropout_param[\"seed\"] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization == \"batchnorm\":\n self.bn_params = [{\"mode\": \"train\"} for i in range(self.num_layers - 1)]\n if self.normalization == \"layernorm\":\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)", "def build_generator(latent_dim=100):\n # The weight initialization and the slope are chosen to accord with the\n # Parameters in the paper. I only change padding when it seems neccesary to\n # to mantain adequate dimensons. \n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.3\n \n inputs = keras.Input(shape=(1,1,100))\n # First convolutional layer\n x = Conv2DTranspose(\n 1024, \n kernel_size=(4,4), \n strides=1, \n kernel_initializer=weight_initializer,\n padding='valid',\n use_bias=False\n )(inputs)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Second convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 512,\n kernel_size = 4,\n strides = (2,2),\n padding = 'same',\n use_bias = False\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 256,\n kernel_size = 5,\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 128,\n kernel_size = (5,5),\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fifth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 3,\n kernel_size = (5,5),\n use_bias=False,\n strides = (2,2),\n padding = 'same',\n activation='tanh'\n )(x)\n model = keras.Model(inputs=inputs, outputs=x)\n return model", "def tf_l2_loss(Gt, pred,_axis):\n l2diff = tf.subtract(Gt, pred)\n l2loss = tf.reduce_sum(tf.square(l2diff), axis=_axis)\n l2loss = tf.maximum(l2loss, 1e-10)\n l2loss = tf.sqrt(l2loss) # (n_batch, n_class) -> (n_batch, 1)\n\n return l2loss", "def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n if type(hidden_dims) != list:\n raise ValueError('hidden_dim has to be a list')\n\n self.L = len(hidden_dims) + 1\n self.N = input_dim\n self.C = num_classes\n dims = [self.N] + hidden_dims + [self.C]\n Ws = {'W' + str(i + 1):\n weight_scale * np.random.randn(dims[i], dims[i + 1]) for i in range(len(dims) - 1)}\n b = {'b' + str(i + 1): np.zeros(dims[i + 1])\n for i in range(len(dims) - 1)}\n\n self.params.update(b)\n self.params.update(Ws)\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)", "def masked_l2(preds, actuals, mask):\n loss = tf.nn.l2(preds, actuals)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)", "def loss_false(code_batch, k=1):\n\n _, n_latent = code_batch.get_shape()\n\n # changing these parameters is equivalent to changing the strength of the\n # regularizer, so we keep these fixed (these values correspond to the\n # original values used in Kennel et al 1992).\n rtol = 20.0\n atol = 2.0\n # k_frac = 0.01\n # n_batch = tf.cast(tf.keras.backend.shape(code_batch)[0], tf.float32)\n # assert False, n_batch\n # k = max(1, int(k_frac * n_batch))\n\n ## Vectorized version of distance matrix calculation\n tri_mask = tf.linalg.band_part(tf.ones((n_latent, n_latent), tf.float32), -1, 0)\n batch_masked = tf.multiply(tri_mask[:, tf.newaxis, :], code_batch[tf.newaxis, ...])\n X_sq = tf.reduce_sum(batch_masked * batch_masked, axis=2, keepdims=True)\n pdist_vector = (\n X_sq\n + tf.transpose(X_sq, [0, 2, 1])\n - 2 * tf.matmul(batch_masked, tf.transpose(batch_masked, [0, 2, 1]))\n )\n all_dists = pdist_vector\n all_ra = tf.sqrt(\n (1 / (tf.range(1, 1 + n_latent, dtype=tf.float32)))\n * tf.squeeze(\n tf.reduce_sum(\n tf.square(tf.math.reduce_std(batch_masked, axis=1, keepdims=True)),\n axis=2,\n )\n )\n )\n\n # Avoid singularity in the case of zeros\n all_dists = tf.clip_by_value(all_dists, 1e-14, tf.reduce_max(all_dists))\n\n # inds = tf.argsort(all_dists, axis=-1)\n _, inds = tf.math.top_k(-all_dists, int(k + 1))\n # top_k currently faster than argsort because it truncates matrix\n\n neighbor_dists_d = tf.gather(all_dists, inds, batch_dims=-1)\n neighbor_new_dists = tf.gather(all_dists[1:], inds[:-1], batch_dims=-1)\n\n # Eq. 4 of Kennel et al.\n scaled_dist = tf.sqrt(\n (neighbor_new_dists - neighbor_dists_d[:-1]) / neighbor_dists_d[:-1]\n )\n\n # Kennel condition #1\n is_false_change = scaled_dist > rtol\n # Kennel condition 2\n is_large_jump = neighbor_new_dists > atol * all_ra[:-1, tf.newaxis, tf.newaxis]\n\n is_false_neighbor = tf.math.logical_or(is_false_change, is_large_jump)\n total_false_neighbors = tf.cast(is_false_neighbor, tf.int32)[..., 1 : (k + 1)]\n\n # Pad zero to match dimensionality of latent space\n reg_weights = 1 - tf.reduce_mean(\n tf.cast(total_false_neighbors, tf.float64), axis=(1, 2)\n )\n reg_weights = tf.pad(reg_weights, [[1, 0]])\n\n # Find average batch activity\n activations_batch_averaged = tf.sqrt(tf.reduce_mean(tf.square(code_batch), axis=0))\n\n # L2 Activity regularization\n activations_batch_averaged = tf.cast(activations_batch_averaged, tf.float64)\n loss = tf.reduce_sum(tf.multiply(reg_weights, activations_batch_averaged))\n\n return tf.cast(loss, tf.float32)", "def layer_norm_and_dropout(input_tensor, dropout_prob, name=None, dropout_name=None):\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob, dropout_name=dropout_name)\n return output_tensor", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #\n # initialized from a normal distribution centered at 0 with standard #\n # deviation equal to weight_scale. Biases should be initialized to zero. #\n # #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n dimension = [input_dim] + hidden_dims + [num_classes]\n for i in range(1, self.num_layers+1):\n self.params['W{0}'.format(i)] = weight_scale * np.random.randn(dimension[i-1], dimension[i])\n self.params['b{0}'.format(i)] = np.zeros(dimension[i])\n\n if self.normalization in ['batchnorm', 'layernorm']:\n self._batchnormInit()\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def batchnorm_init(m, kernelsize=3):\r\n n = kernelsize**2 * m.num_features\r\n m.weight.data.normal_(0, math.sqrt(2. / (n)))\r\n m.bias.data.zero_()", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n input_size = input_dim\n for i in range(len(hidden_dims)):\n output_size = hidden_dims[i]\n self.params['W' + str(i+1)] = np.random.randn(input_size,output_size) * weight_scale\n self.params['b' + str(i+1)] = np.zeros(output_size)\n if self.normalization:\n self.params['gamma' + str(i+1)] = np.ones(output_size)\n self.params['beta' + str(i+1)] = np.zeros(output_size)\n input_size = output_size # 下一层的输入\n # 输出层,没有BN操作\n self.params['W' + str(self.num_layers)] = np.random.randn(input_size,num_classes) * weight_scale\n self.params['b' + str(self.num_layers)] = np.zeros(num_classes)\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def _optimization(dataset1, dataset2, nb_epochs=3000):\n\n x1_mean = dataset1['data'].mean()\n x1_std = dataset1['data'].std()\n x1 = (dataset1['data'] - x1_mean) / (x1_std)\n y1 = dataset1['labels']\n Y1 = dataset1['hot_labels']\n\n x2_mean = dataset2['data'].mean()\n x2_std = dataset2['data'].std()\n x2 = (dataset2['data'] - x2_mean) / (x2_std)\n\n x_model1 = Input(x1.shape[1:])\n y_model1 = Dropout(0.1)(x_model1)\n y_model1 = Dense(50, activation='relu')(x_model1)\n y_model1 = Dropout(0.2)(y_model1)\n y_model1 = Dense(50, activation='relu')(y_model1)\n out_model1 = Dense(len(np.unique(y1)), activation='softmax')(y_model1)\n\n model1 = Model(input=x_model1, output=out_model1)\n\n optimizer = keras.optimizers.Adadelta()\n model1.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=200, min_lr=0.1)\n\n hist = model1.fit(x1, Y1, batch_size=x1.shape[0], nb_epoch=nb_epochs, verbose=1, shuffle=True, callbacks=[reduce_lr])\n\n dataset2_new_labels = []\n\n for i in range(x2.shape[0]):\n xTrain = x2[i,:].reshape((1,x2.shape[1]))\n dataset2_new_labels.append(np.argmax(model1.predict(xTrain, batch_size=1)))\n\n # Print the testing results which has the l in range(x_train.shape[0]):\n # for i in range(len(x_test1)):\n # xTest = x_test1[i,:].reshape((1,2048))\n # print((np.argmax(model.predict(xTest, batch_size=1)), y_test1[i]))\n # log = pd.DataFrame(hist.history)\n # print(\"saving results for 100 nodes\" + _MODE + fname)\n # log.to_json('accuracies/accuracy_100_' + _MODE + fname + '.json')\n\n # with open('Text_Files/' + fname + '_results.txt', 'w') as text_file:\n # text_file.write(fname + '<<<=====>>>' + str(max(log.val_acc.values)))\n\n # assert 2==1\n\n x_model1 = []\n y_model1 = []\n out_model1 = []\n model1 = []\n\n return dataset2_new_labels", "def _fit_apgl(x, mask, lmbd,\n max_iter=100, L=1e-3, beta=0.5,\n tol=1e-3, print_loss=False):\n # init\n n1, n2 = x.shape\n rdm = RandomState(123)\n theta = rdm.randn(n1, n2) # natural parameter\n thetaOld = theta\n alpha = 1\n alphaOld = 0\n\n # main loop\n loss = _cross_entropy(x, mask, theta) + lmbd * \\\n np.linalg.norm(theta, ord='nuc')\n iteration = []\n for i in range(int(max_iter)):\n if print_loss:\n print(f'Epoch {i}, loss {loss:.3f}')\n iteration.append(loss)\n lossOld = loss\n # nesterov extropolation\n A = theta + (alphaOld - 1) / alpha * (theta - thetaOld)\n for _ in range(50):\n S = A - L * _gradient(x, mask, A)\n thetaNew = svt(S, lmbd * L)\n ce = _cross_entropy(x, mask, thetaNew)\n if ce < _bound(x, mask, thetaNew, theta, L):\n break\n else:\n L = beta * L\n thetaOld = theta\n theta = thetaNew\n alphaOld = alpha\n alpha = (1 + np.sqrt(4 + alpha ** 2)) / 2\n loss = ce + lmbd * np.linalg.norm(theta, ord='nuc')\n if i == max_iter - 1:\n print(f'Reach max iteration {i+1}')\n if np.abs(lossOld - loss) < tol:\n break\n\n return theta, np.array(iteration)", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel_old, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n # norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.LeakyReLU(alpha=0.1))", "def L2X(train = True):\n print('Loading dataset...') \n x_train, y_train, x_val, y_val, id_to_word = load_data()\n #pred_train = np.load('data/pred_train.npy')\n #pred_val = np.load('data/pred_val.npy') \n print('Creating model...')\n\n # P(S|X)\n with tf.variable_scope('selection_model'):\n X_ph = Input(shape=(maxlen,), dtype='int32')\n\n logits_T_grp = construct_gumbel_selector(X_ph, max_features, embedding_dims, maxlen) # bs, max_len * num_groups\n tau = 0.5 \n T = Sample_Concrete(tau, k, num_feature=maxlen, num_groups=num_groups)(logits_T_grp)\n\n T = Reshape((maxlen, num_groups))(T)\n T = Permute((2, 1))(T) # bs, num_groups, max_len\n\n # q(X_S)\n with tf.variable_scope('prediction_model'):\n emb2 = Embedding(max_features, embedding_dims, \n input_length=maxlen)(X_ph)\n # emb2 bs, max_len, 50\n # apply the matrix trick as before\n # here the output size of matmul layer is different from before\n net = matmul_layer([T, emb2]) # bs, num_groups, 50\n #print(net.shape)\n net = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'merge_channel')(net) # bs, num_groups, 1\n\n # net = Mean(net) # bs, 50\n input_group = Flatten()(net) # bs, num_groups\n # num_groups = K.int_shape(input_group)[1]\n # here we add instance wise f-s again!!!!\n net = Dense(100, activation='relu', name = 's/dense1',\n kernel_regularizer=regularizers.l2(1e-3))(input_group)\n net = Dense(100, activation='relu', name = 's/dense2',\n kernel_regularizer=regularizers.l2(1e-3))(net)\n logits = Dense(num_groups)(net)\n\n\n\n\n # A tensor of shape, [batch_size, max_sents, 100]\n samples = Sample_Concrete_Original(tau, num_vital_group, name='group_importance')(logits)\n new_input_group = Multiply()([input_group, samples]) \n\n\n\n net = Dense(hidden_dims, activation='relu')(new_input_group)\n preds = Dense(2, activation='softmax', \n name = 'new_dense')(net)\n\n\n model = Model(inputs=X_ph, \n outputs=preds)\n model.summary()\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',#optimizer,\n metrics=['acc']) \n #train_acc = np.mean(np.argmax(pred_train, axis = 1)==np.argmax(y_train, axis = 1))\n #val_acc = np.mean(np.argmax(pred_val, axis = 1)==np.argmax(y_val, axis = 1))\n #print('The train and validation accuracy of the original model is {} and {}'.format(train_acc, val_acc))\n\n if train:\n filepath=\"models/l2x.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint] \n st = time.time()\n model.fit(x_train, y_train, \n validation_data=(x_val, y_val), \n callbacks = callbacks_list,\n epochs=epochs, batch_size=batch_size)\n duration = time.time() - st\n print('Training time is {}'.format(duration)) \n\n model.load_weights('models/l2x.hdf5', by_name=True) \n\n pred_model = Model(X_ph, [T, samples]) \n pred_model.summary()\n pred_model.compile(loss='categorical_crossentropy', \n optimizer='adam', metrics=['acc']) \n\n st = time.time()\n #scores = pred_model.predict(x_val, \n # verbose = 1, batch_size = batch_size)[:,:,0] \n #scores = np.reshape(scores, [scores.shape[0], maxlen])\n scores_t, group_importances_t = pred_model.predict(x_train, verbose = 1, batch_size = batch_size)\n scores_v, group_importances_v = pred_model.predict(x_val, verbose = 1, batch_size = batch_size)\n return scores_t, group_importances_t, scores_v, group_importances_v, x_val", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def EmbeddingL1RegularizationUpdate(embedding_variable, net_input, learn_rate, l1_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n sign_inside = tf.sign(tf.matmul(net_input, embedding_variable))\n where = tf.equal(sign_inside, 0)\n # should replace 0's with random in [-1, 1] for an better (not necessarily acute)implementation\n grad = l1_reg_val * tf.matmul(tf.transpose(net_input), sign_inside)\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l1_reg_val * tf.norm(tf.matmul(net_input, embedding_variable), ord=1)\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l1 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def validation_dubo(latent_dim, covar_module0, covar_module1, likelihood, train_xt, m, log_v, z, P, T, eps):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n v = torch.exp(log_v)\n torch_dtype = torch.double\n x_st = torch.reshape(train_xt, [P, T, train_xt.shape[1]]).to(device)\n stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=1)\n K0xz = covar_module0(train_xt, z).evaluate().to(device)\n K0zz = (covar_module0(z, z).evaluate() + eps * torch.eye(z.shape[1], dtype=torch_dtype).to(device)).to(device)\n LK0zz = torch.cholesky(K0zz).to(device)\n iK0zz = torch.cholesky_solve(torch.eye(z.shape[1], dtype=torch_dtype).to(device), LK0zz).to(device)\n K0_st = covar_module0(stacked_x_st, stacked_x_st).evaluate().transpose(0,1)\n B_st = (covar_module1(stacked_x_st, stacked_x_st).evaluate() + torch.eye(T, dtype=torch.double).to(device) * likelihood.noise_covar.noise.unsqueeze(dim=2)).transpose(0,1)\n LB_st = torch.cholesky(B_st).to(device)\n iB_st = torch.cholesky_solve(torch.eye(T, dtype=torch_dtype).to(device), LB_st)\n\n dubo_sum = torch.tensor([0.0]).double().to(device)\n for i in range(latent_dim):\n m_st = torch.reshape(m[:, i], [P, T, 1]).to(device)\n v_st = torch.reshape(v[:, i], [P, T]).to(device)\n K0xz_st = torch.reshape(K0xz[i], [P, T, K0xz.shape[2]]).to(device)\n iB_K0xz = torch.matmul(iB_st[i], K0xz_st).to(device)\n K0zx_iB_K0xz = torch.matmul(torch.transpose(K0xz[i], 0, 1), torch.reshape(iB_K0xz, [P*T, K0xz.shape[2]])).to(device)\n W = K0zz[i] + K0zx_iB_K0xz\n W = (W + W.T) / 2\n LW = torch.cholesky(W).to(device)\n logDetK0zz = 2 * torch.sum(torch.log(torch.diagonal(LK0zz[i]))).to(device)\n logDetB = 2 * torch.sum(torch.log(torch.diagonal(LB_st[i], dim1=-2, dim2=-1))).to(device)\n logDetW = 2 * torch.sum(torch.log(torch.diagonal(LW))).to(device)\n logDetSigma = -logDetK0zz + logDetB + logDetW\n iB_m_st = torch.solve(m_st, B_st[i])[0].to(device)\n qF1 = torch.sum(m_st*iB_m_st).to(device)\n p = torch.matmul(K0xz[i].T, torch.reshape(iB_m_st, [P * T])).to(device)\n qF2 = torch.sum(torch.triangular_solve(p[:,None], LW, upper=False)[0] ** 2).to(device)\n qF = qF1 - qF2\n tr = torch.sum(iB_st[i] * K0_st[i]) - torch.sum(K0zx_iB_K0xz * iK0zz[i])\n logDetD = torch.sum(torch.log(v[:, i])).to(device)\n tr_iB_D = torch.sum(torch.diagonal(iB_st[i], dim1=-2, dim2=-1)*v_st).to(device)\n D05_iB_K0xz = torch.reshape(iB_K0xz*torch.sqrt(v_st)[:,:,None], [P*T, K0xz.shape[2]])\n K0zx_iB_D_iB_K0zx = torch.matmul(torch.transpose(D05_iB_K0xz,0,1), D05_iB_K0xz).to(device)\n tr_iB_K0xz_iW_K0zx_iB_D = torch.sum(torch.diagonal(torch.cholesky_solve(K0zx_iB_D_iB_K0zx, LW))).to(device)\n tr_iSigma_D = tr_iB_D - tr_iB_K0xz_iW_K0zx_iB_D\n dubo = 0.5*(tr_iSigma_D + qF - P*T + logDetSigma - logDetD + tr)\n dubo_sum = dubo_sum + dubo\n return dubo_sum", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache", "def calculate2_simpleKL_norm(pred, truth, rnd=0.01):\n return 1 - calculate2_simpleKL(pred, truth, rnd=rnd) / 4000", "def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')", "def train(self,D,batch_size=64,iter=10000,l2_reg=0.01,noise_level=0.1,debug=False):\n sess = tf.get_default_session()\n\n idxes = np.random.permutation(len(D))\n train_idxes = idxes[:int(len(D)*0.8)]\n valid_idxes = idxes[int(len(D)*0.8):]\n\n def _batch(idx_list,add_noise):\n batch = []\n\n if len(idx_list) > batch_size:\n idxes = np.random.choice(idx_list,batch_size,replace=False)\n else:\n idxes = idx_list\n\n for i in idxes:\n batch.append(D[i])\n\n b_x,b_y,b_l = zip(*batch)\n x_split = np.array([len(x) for x in b_x])\n y_split = np.array([len(y) for y in b_y])\n b_x,b_y,b_l = np.concatenate(b_x,axis=0),np.concatenate(b_y,axis=0),np.array(b_l)\n\n if add_noise:\n b_l = (b_l + np.random.binomial(1,noise_level,batch_size)) % 2 #Flip it with probability 0.1\n\n return b_x,b_y,x_split,y_split,b_l\n\n for it in tqdm(range(iter),dynamic_ncols=True):\n b_x,b_y,x_split,y_split,b_l = _batch(train_idxes,add_noise=True)\n\n loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l,\n self.l2_reg:l2_reg,\n })\n\n if debug:\n if it % 100 == 0 or it < 10:\n b_x,b_y,x_split,y_split,b_l = _batch(valid_idxes,add_noise=False)\n valid_acc = sess.run(self.acc,feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l\n })\n tqdm.write(('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc)))\n\n #if valid_acc >= 0.95:\n # print('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc))\n # print('early termination@%08d'%it)\n # break", "def spatial_batchnorm_backward(dout, cache):\r\n \tN, C, H, W = dout.shape\r\n dout_new = dout.transpose(0, 2, 3, 1).reshape(N*H*W, C)\r\n dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)\r\n dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)\r\n\r\n return dx, dgamma, dbeta", "def configure_batchnorm(x, model):\n bs = x.size(0)\n # train mode, because dent optimizes the model to minimize entropy\n model.train()\n # disable grad, to (re-)enable only what dent updates\n model.requires_grad_(False)\n # configure norm for dent updates:\n # enable grad + keep statisics + repeat affine params\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.weight = nn.Parameter(m.ckpt_weight.unsqueeze(0).repeat(bs, 1))\n m.bias = nn.Parameter(m.ckpt_bias.unsqueeze(0).repeat(bs, 1))\n m.requires_grad_(True)\n return model", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))", "def scipy_minus_gradient(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n if perceptron:\n perceptron._gradient_iter += 1\n g = None\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n first_term = vector_graphs[correct_rows,:].sum(axis=0)\n all_scores = vector_graphs * w\n all_probs = []\n for batch in batches:\n batch_scores = all_scores[batch]\n S = logsumexp(batch_scores)\n all_probs.append(np.exp(batch_scores - S))\n all_probs = numpy.hstack(all_probs)\n second_term = all_probs * vector_graphs\n if g is None:\n g = second_term - first_term\n else:\n g = g + second_term - first_term\n index += 1\n if index % 100 == 0:\n print('Gradient '+str(index)+' processed')\n g = numpy.ndarray.flatten(numpy.asarray(g)) / len(all_vector_graphs)\n if sigma != None:\n g = g + sigma * w\n print('Gradient norm:'+str(scipy.linalg.norm(g)))\n sys.stdout.flush()\n if perceptron and perceptron._model_pickle:\n if perceptron._gradient_iter % 5 == 0:\n perceptron._weights = numpy.reshape(w,(1,perceptron._num_features))\n perceptron.save(perceptron._model_pickle+'_'+str(perceptron._gradient_iter))\n return g", "def neg_sampling_loss_and_gradient(\n center_word_vec,\n outside_word_idx,\n outside_vectors,\n dataset,\n K=10\n):\n\n # Negative sampling of words is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n neg_sample_word_indices = get_negative_samples(outside_word_idx, dataset, K)\n indices = [outside_word_idx] + neg_sample_word_indices\n\n ### YOUR CODE HERE\n \n outside_word_vector = outside_vectors[outside_word_idx]\n outside_words_dot_center_word = outside_word_vector.dot(center_word_vec)\n \n neg_samples_vector = outside_vectors[neg_sample_word_indices]\n neg_samples_dot_center_word = neg_samples_vector.dot(center_word_vec)\n \n sigmoid_outside_dot = sigmoid(outside_words_dot_center_word)\n sigmoid_negative_dot = sigmoid(-neg_samples_dot_center_word)\n\n loss = -np.log(sigmoid_outside_dot) -np.sum(np.log(sigmoid_negative_dot))\n \n grad_center_vec = \\\n (sigmoid_outside_dot - 1) * outside_word_vector + \\\n np.sum((1 - sigmoid_negative_dot)[:, np.newaxis] * neg_samples_vector, axis = 0)\n \n grad_outside_vecs = np.zeros_like(outside_vectors)\n grad_outside_vecs[outside_word_idx] = (sigmoid_outside_dot - 1) * center_word_vec\n \n for i, neg_index in enumerate(neg_sample_word_indices):\n grad_outside_vecs[neg_index] += \\\n (1 - sigmoid_negative_dot[i]) * center_word_vec\n\n ### END YOUR CODE\n\n return loss, grad_center_vec, grad_outside_vecs", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n CustomBatchNormalization(),\n LeakyReLU(alpha=0.1))", "def train_LR(self, X, y, eta=1e-3, batch_size=1, num_iters=1000) :\n loss_history = []\n N,d = X.shape\n for t in np.arange(num_iters):\n X_batch = None\n y_batch = None\n # ================================================================ #\n # YOUR CODE HERE:\n # Sample batch_size elements from the training data for use in gradient descent. \n # After sampling, X_batch should have shape: (batch_size,1), y_batch should have shape: (batch_size,)\n # The indices should be randomly generated to reduce correlations in the dataset. \n # Use np.random.choice. It is better to user WITHOUT replacement.\n # ================================================================ #\n \n # sample indices without replacement\n batch_idx = np.random.choice(N, batch_size, replace = False)\n X_batch = X[batch_idx]\n y_batch = y[batch_idx]\n \n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss = 0.0\n grad = np.zeros_like(self.w)\n # ================================================================ #\n # YOUR CODE HERE: \n # evaluate loss and gradient for batch data\n # save loss as loss and gradient as grad\n # update the weights self.w\n # ================================================================ #\n \n # compute the loss and gradient\n # loss_and_grad will take responsible for these\n \n loss, grad = self.loss_and_grad(X_batch, y_batch)\n \n self.w = self.w - eta * grad\n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss_history.append(loss)\n return loss_history, self.w", "def l2_normalization(inputs, scaling=True):\n with tf.variable_scope('L2Normalization'):\n inputs_shape = inputs.get_shape()\n channel_shape = inputs_shape[-1:]\n # cal l2_norm on channel\n outputs = tf.nn.l2_normalize(inputs, 3, epsilon=1e-12)\n # scalling\n if scaling:\n # scale.shape == channel.shape\n scale = slim.variable('gamma', channel_shape, tf.float32, tf.constant_initializer(1.0))\n outputs = tf.multiply(outputs, scale)\n\n return outputs", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n gamma, x_hat, num, denom, eps, sample_variance = cache\n N, D = dout.shape\n \n dbeta = np.sum(dout, axis=0)\n dyx_hat = dout\n dgamma = np.sum(dyx_hat*x_hat, axis=0)\n dx_hat = gamma*dyx_hat\n ddenom = np.sum(num*dx_hat, axis=0)\n dmu1 = (1/denom)*dx_hat\n dsqvar = ddenom*(-1)*(1/(denom**2))\n dvar = 0.5*((sample_variance+eps)**(-0.5))*dsqvar\n dsq = (1/N)*np.ones((N,D))*dvar\n dmu2 = 2*num*dsq\n dmu = (-1)*np.sum(dmu1+dmu2, axis=0)\n dx1 = dmu1 + dmu2\n dx2 = (1/N)*np.ones((N,D))*dmu\n dx = dx1+dx2\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def test_keras_unsafe_l2_norm():\n model, X, y, Xval, yval = make_small_model()\n\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model.compile(loss=loss, optimizer=None)\n\n isDP, msg = safekeras.check_optimizer_is_DP(model.optimizer)\n assert isDP, \"failed check that optimizer is dP\"\n\n model.l2_norm_clip = 0.9\n\n model.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n\n DPused, msg = safekeras.check_DP_used(model.optimizer)\n assert (\n DPused\n ), \"Failed check that DP version of optimiser was actually used in training\"\n\n loss, acc = model.evaluate(X, y)\n expected_accuracy = UNSAFE_ACC\n assert round(acc, 6) == round(\n expected_accuracy, 6\n ), \"failed check that accuracy is as expected\"\n\n msg, disclosive = model.preliminary_check()\n correct_msg = (\n \"WARNING: model parameters may present a disclosure risk:\"\n \"\\n- parameter l2_norm_clip = 0.9 identified as less than the recommended \"\n \"min value of 1.0.\"\n )\n assert msg == correct_msg, \"failed check correct warning message\"\n assert disclosive is True, \"failed check disclosive is True\"", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def darknet_CBL(*args, **kwargs):\n\n no_bias_kwargs = {'use_bias': False} # 没懂为啥用 no_bias\n no_bias_kwargs.update(kwargs)\n return compose(\n darknet_Conv2D(*args, **no_bias_kwargs),\n custom_batchnormalization(),\n LeakyReLU(alpha=0.1)\n )", "def reset_ref_batch(self, batch):\n with torch.no_grad():\n self.labels = batch[1]\n self.batch = batch[0]\n _, self.r_act_2, _ = self.inference_net(self.batch.cuda(self.gpu_id))\n\n self.mu2_c0, self.sigma2_c0 = calc_stats(self.r_act_2[self.labels.view(-1) == 0])\n self.mu2_c1, self.sigma2_c1 = calc_stats(self.r_act_2[self.labels.view(-1) == 1])", "def l2_norm(input_x, epsilon=1e-12):\n input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)\n return input_x_norm", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n x, mu, sigma, gamma, beta = cache\n N = dout.shape[0]\n X_mu = x - mu\n var_inv = 1./sigma\n \n dX_norm = dout * gamma\n dvar = np.sum(dX_norm * X_mu,axis=0) * -0.5 * sigma**(-3)\n dmu = np.sum(dX_norm * -var_inv ,axis=0) + dvar * 1/N * np.sum(-2.* X_mu, axis=0)\n\n dx = (dX_norm * var_inv) + (dmu / N) + (dvar * 2/N * X_mu)\n dbeta = np.sum(dout, axis=0)\n dgamma = np.sum(dout * X_mu/sigma, axis=0)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.):\n sigma2 = sigma * sigma\n\n inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))\n\n smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)\n smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)\n smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)\n smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),\n tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))\n\n outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)\n\n return outside_mul", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def relaxed_ba_bias(Xinput, L, lamb, beta, max_iter=300):\n X = Xinput.T # X: n_samples x n_dim\n D, m = X.shape\n B = np.sign(np.random.rand(L, m))\n c1 = np.random.rand(L,1)\n c2 = np.random.rand(D,1)\n\n for i in range(max_iter):\n # given B, compute W1\n W1 = lamb*np.matmul(np.matmul((B - c1), X.T), \\\n np.linalg.inv(lamb*np.matmul(X,X.T) + beta*np.eye(D)))\n\n # given B, compute W2\n W2 = np.matmul( np.matmul((X-c2), B.T), \\\n np.linalg.inv(np.matmul(B,B.T) + beta*np.eye(L)))\n\n # compute c1\n c1 = (1.0/m)*np.matmul(B - np.matmul(W1, X), np.ones((m,1)))\n # compute c2\n c2 = (1.0/m)*np.matmul(X - np.matmul(W2, B), np.ones((m,1)))\n\n # given W1, W2, c1, c2, compute B\n Xtmp = X - c2\n H = np.matmul(W1, X) + c1\n B = learn_B_new(Xtmp.T, W2.T, B.T, H.T, lamb);\n\n B = B.T\n\n # X_reconst = np.matmul(W2, np.sign(np.matmul(W1, X) + c1)) + c2\n # mse = np.mean(np.square(X_reconst - X))\n # print('mse {}'.format(mse))\n return W2, W1, c2, c1, B", "def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n padding_type='reflect'):\n assert (n_blocks >= 0)\n super(DenseGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n dense_features = ngf * mult\n dense_features = dense_features + 6 * 32\n for i in range(n_blocks):\n model += [DenseBlock(num_layers=6, num_input_features=ngf * mult, bn_size=4, growth_rate=32, drop_rate=0,\n norm_layer=norm_layer)]\n model += [norm_layer(dense_features), nn.ReLU(inplace=True),\n nn.Conv2d(dense_features, ngf * mult, kernel_size=1, stride=1, bias=use_bias),\n ]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*model)", "def build_nn_experimental(dropout: float=0.3, verbosity: int=0):\n # Setting Up Input layer\n input_q1 = Input(shape=(512,))\n input_q2 = Input(shape=(512,))\n \n # Network for 1st input Dense 128 --> Relu --> Dense 264 --> Relu\n input1_layer = Dense(512, activation='relu')(input_q1)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Dense(512, activation='relu')(input1_layer)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Model(inputs=input_q1, outputs=input1_layer)\n \n # Network for 2st input Dense 128 --> Relu --> Dense 264 --> Relu\n input2_layer = Dense(512, activation='relu')(input_q2)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Dense(512, activation='relu')(input2_layer)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Model(inputs=input_q2, outputs=input2_layer)\n \n merged = concatenate([input1_layer.output, input2_layer.output])\n\n # Fully connected layer & final prediction layer\n pred_layer = Dense(4096, activation='relu')(merged)\n pred_layer = Dense(1024, activation='relu')(pred_layer)\n pred_layer = Dense(256, activation='relu')(pred_layer)\n pred_layer = Dense(64, activation='relu')(pred_layer)\n pred_layer = Dropout(dropout)(pred_layer)\n \n pred_layer = Dense(1, activation='sigmoid')(pred_layer)\n \n model = Model(inputs=[input1_layer.input, input2_layer.input], outputs=pred_layer)\n if verbosity > 0:\n model.summary()\n return model", "def batch_norm(x, training, name):\n with tf.variable_scope(name):\n x = tf.cond(training, lambda: tf.contrib.layers.batch_norm(x, is_training=True, scope=name+'_batch_norm'),\n lambda: tf.contrib.layers.batch_norm(x, is_training=False, scope=name+'_batch_norm', reuse=True))\n return x", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def derive_sample_params(self, global_state):\n return global_state.l2_norm_clip", "def __init__(self, rng, input, layer_sizes, dropout_rates,\r\n activations=None, use_bias=True, prob_constraint_on=True):\r\n # Set up all the hidden layers\r\n weight_matrix_sizes = list(zip(layer_sizes, layer_sizes[1:]))\r\n # we build two parallel layers\r\n # - training_layers for training with/without dropout\r\n # - testing_layers for testing the performance\r\n self.training_layers = []\r\n self.testing_layers = []\r\n \r\n # dropout the input\r\n next_training_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])\r\n next_testing_layer_input = input\r\n \r\n layer_counter = 0\r\n for n_in, n_out in weight_matrix_sizes[:-1]:\r\n \r\n # setup the training layer\r\n next_training_layer = DropoutHiddenLayer(rng=rng,\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n dropout_rate=dropout_rates[layer_counter])\r\n self.training_layers.append(next_training_layer)\r\n next_training_layer_input = next_training_layer.output\r\n\r\n # setup the testing layer\r\n # Reuse the paramters from the dropout layer here, in a different\r\n # path through the graph.\r\n next_testing_layer = HiddenLayer(rng=rng,\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=next_training_layer.W * (1 - dropout_rates[layer_counter]),\r\n b=next_training_layer.b)\r\n self.testing_layers.append(next_testing_layer)\r\n next_testing_layer_input = next_testing_layer.output\r\n \r\n layer_counter += 1\r\n \r\n # Set up the output layer for training layers\r\n n_in, n_out = weight_matrix_sizes[-1]\r\n training_output_layer = LogisticRegression(\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n prob_constraint_on=prob_constraint_on)\r\n self.training_layers.append(training_output_layer)\r\n\r\n # Set up the output layer for testing layers\r\n # Again, reuse paramters in the dropout output.\r\n testing_output_layer = LogisticRegression(\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=training_output_layer.W * (1 - dropout_rates[-1]),\r\n b=training_output_layer.b,\r\n prob_constraint_on=prob_constraint_on)\r\n self.testing_layers.append(testing_output_layer)\r\n\r\n # Use the MSE of the logistic regression layer as the objective\r\n # In training phase, we use the MSE of the logistic regression layer\r\n # which is on top of the dropout_layers\r\n self.training_MSE = self.training_layers[-1].MSE\r\n # In validation/testing phase, we use the MSE of the logistic regression layer\r\n # which is on top of the normal_layers\r\n self.testing_MSE = self.testing_layers[-1].MSE\r\n \r\n # NOTE: for prediction, we use all the weights, thus we should use\r\n # the normal layers instead of the dropout layers\r\n self.y_pred = self.testing_layers[-1].y_pred\r\n \r\n # Grab all the parameters together.\r\n self.params = [ param for layer in self.training_layers for param in layer.params ]\r\n # The above is Double Iteration in List Comprehension\r\n # See the discussion in\r\n # http://stackoverflow.com/questions/17657720/python-list-comprehension-double-for\r\n # In regular for-loop format, we have\r\n # for layer in self.dropout_layers:\r\n # for param in layer.params:\r\n # put param in the resulting list\r", "def train(args,train_loader, model, criterion, optimizer, epoch, pruner, writer):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n \n\n model.train()\n total =0 \n correct = 0\n reg_loss = 0.0\n train_loss = 0.0\n end = time.time()\n\n for i, (inputs, target) in enumerate(train_loader):\n\n target = target.cuda()\n inputs = inputs.cuda()\n \n inputs, targets_a, targets_b, lam = mixup_data(inputs, target, args.alpha, True)\n inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))\n\n ##input_var = torch.autograd.Variable(input)\n ##target_var = torch.autograd.Variable(target)\n\n\n outputs = model(inputs)\n ##outputs, Qs, Ys = model(inputs)\n ##loss = criterion(output, target_var)\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n## print(\"loss:\")\n## print(loss)\n## print(loss.item())\n## train_loss += loss.data[0]\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += target.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n\n## prec1 = accuracy(output.data, target, topk=(1,))[0]\n## losses.update(loss.data.item(), input.size(0))\n## top1.update(prec1.item(), input.size(0))\n\n optimizer.zero_grad()\n\n\n\n## for y in Ys:\n## y.retain_grad()\n\n\n\n loss.backward()\n\n\n optimizer.step()\n\n\n\n\n if pruner is not None:\n pruner.prune(update_state=False)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n\n if 0:\n kwalt = epoch*len(train_loader)+i\n if writer is not None:\n for j,q in enumerate(Qs):\n writer.add_scalar(\"variances %d\" % j, q.cpu().numpy(), kwalt)\n\n for l,y in enumerate(Ys):\n if y.grad is not None:\n writer.add_scalar(\"grad %d\" % (l-j), getQ(y.grad).cpu().numpy(), kwalt)\n\n## writer.add_scalars(\"variancess\", { \"%d\"% j : q.cpu().numpy() for j,q in enumerate(Qs)}, i)\n\n\n\n if 0:\n if i % args.print_freq == 0:\n print(\n f\"Epoch: [{epoch}][{i}/{len(train_loader)}]\\t\"\n f\"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t\"\n f\"Loss {losses.val:.4f} ({losses.avg:.4f})\\t\"\n f\"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\"\n )\n niter = epoch*len(train_loader)+i\n\n batch_idx = i\n if writer is not None:\n writer.add_scalar('Train/Loss', train_loss/batch_idx, epoch)\n writer.add_scalar('Train/Prec@1', 100.*correct/total, epoch)", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def __init__(self, X_train, y_train, input_shape, filters, kernel_size,\n maxpool, loss_function='categorical_crossentropy', nb_classes= 2, droput_iteration=20, dropout = 0.05):\n\n # We normalize the training data to have zero mean and unit standard\n # deviation in the training set if necessary\n\n # if normalize:\n # self.std_X_train = np.std(X_train, 0)\n # self.std_X_train[ self.std_X_train == 0 ] = 1\n # self.mean_X_train = np.mean(X_train, 0)\n # else:\n # self.std_X_train = np.ones(X_train.shape[ 1 ])\n # self.mean_X_train = np.zeros(X_train.shape[ 1 ])\n\n self.droput_iteration = droput_iteration\n self.nb_classes = nb_classes\n self.mean_y_train = np.mean(y_train)\n self.std_y_train = np.std(y_train)\n\n\n\n # model = Sequential()\n # model.add(Conv2D(filters, (kernel_size, kernel_size), padding='same',\n # input_shape=input_shape))\n # model.add(Activation('relu'))\n # model.add(Conv2D(filters, (kernel_size, kernel_size)))\n # model.add(Activation('relu'))\n # model.add(MaxPooling2D(pool_size=(maxpool, maxpool)))\n # model.add(Dropout(dropout))\n # c = 3.5\n # Weight_Decay = c / float(X_train.shape[0])\n # model.add(Flatten())\n # model.add(Dense(128, W_regularizer=l2(Weight_Decay)))\n # model.add(Activation('relu'))\n # model.add(Dropout(dropout))\n # model.add(Dense(nb_classes))\n # model.add(Activation('softmax'))\n\n # model.compile(loss=loss_function, optimizer='adam')\n\n c = 3.5\n Weight_Decay = c / float(X_train.shape[0])\n\n model = Sequential()\n model.add(Dense(256, input_shape =input_shape))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Dense(256, W_regularizer=l2(Weight_Decay)))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Flatten())\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n\n model.compile(loss=loss_function, optimizer='adam')\n\n\n self.model = model\n # # We iterate the learning process\n # model.fit(X_train, y_train, batch_size=self.batch_size, nb_epoch=n_epochs, verbose=0)\n\n # #function for bayesian inference using dropouts\n # self.f = K.function([model.layers[0].input, K.learning_phase()],\n # [model.layers[-1].output])", "def xlarge_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n #model.add(Dense(18, input_dim=12, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)", "def build_nn(dropout: float=0.3,verbosity: int=0):\n model = Sequential()\n model.add(Dense(1024, input_shape=(1024,), activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1, activation='sigmoid'))\n \n if verbosity > 0:\n model.summary()\n return model", "def remove_tracking(model, norm_type, norm_power=0.2):\n normlayer = select_norm(norm_type, norm_power=norm_power)\n # find total number of childern\n model_len = 0\n for n, child in enumerate(model.children()):\n model_len = n\n\n # for layer 0 which is outside\n conv_shape = model.conv1.out_channels\n w = model.bn1.weight\n b = model.bn1.bias\n model.bn1 = normlayer(conv_shape)\n model.bn1.weight = w\n model.bn1.bias = b\n\n # replace in all other layers\n for n, child in enumerate(model.children()):\n if 4 <= n <= model_len - 2:\n for i in range(len(child)):\n conv_shape = child[i].conv1.out_channels\n w = child[i].bn1.weight\n b = child[i].bn1.bias\n child[i].bn1 = normlayer(conv_shape)\n child[i].bn1.weight = w\n child[i].bn1.bias = b\n\n conv_shape = child[i].conv2.out_channels\n w = child[i].bn2.weight\n b = child[i].bn2.bias\n child[i].bn2 = normlayer(conv_shape)\n child[i].bn2.weight = w\n child[i].bn2.bias = b\n # if model have bn3 as well\n try:\n conv_shape = child[i].conv3.out_channels\n w = child[i].bn3.weight\n b = child[i].bn3.bias\n child[i].bn3 = normlayer(conv_shape)\n child[i].bn3.weight = w\n child[i].bn3.bias = b\n except:\n pass\n try:\n conv_shape = child[i].downsample[0].out_channels\n w = child[i].downsample[1].weight\n b = child[i].downsample[1].bias\n child[i].downsample[1] = normlayer(conv_shape)\n child[i].downsample[1].weight = w\n child[i].downsample[1].bias = b\n print(\"downsample\")\n except:\n print(\"no downsample\")\n\n return model", "def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n s_cache,shape_x = cache\n reshaped_dout = np.reshape(dout,(-1,dout.shape[1]))\n dx_reshaped,dgamma,dbeta = batchnorm_backward_alt(reshaped_dout,s_cache)\n dx = np.reshape(dx_reshaped,shape_x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=True):\n super(NLayerTFDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if(no_antialias):\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if(no_antialias):\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)]\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.encoder = nn.Sequential(*sequence)\n dim = ndf * nf_mult\n self.transformer_enc = transformer.TransformerDecoders(dim, nhead=4, num_encoder_layers=4, dim_feedforward=dim*2, dropout=0.0)\n\n self.query_embed = nn.Embedding(1, dim)\n self.classifier = nn.Sequential(\n nn.Linear(dim, dim//2),\n nn.LayerNorm(dim//2),\n nn.ReLU(),\n nn.Linear(dim//2, dim//4),\n nn.LayerNorm(dim//4),\n nn.ReLU(),\n nn.Linear(dim//4, 1),\n nn.Sigmoid()\n )", "def linf1(parameter, bias=None, reg=0.01, lr=0.1):\n\n Norm = reg*lr\n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n sorted_w_and_b, indices = torch.sort(torch.abs(w_and_b), descending=True)\n\n # CUDA or CPU\n devicetype=\"cuda\"\n if w_and_b.is_cuda:\n devicetype=\"cuda\"\n else:\n devicetype=\"cpu\"\n\n\n #SLOW\n rows, cols = sorted_w_and_b.size()\n\n sorted_z = torch.cat((sorted_w_and_b, torch.zeros(rows,1, device=torch.device(devicetype))),1)\n subtracted = torch.clamp(sorted_w_and_b - sorted_z[:,1:],max=Norm) #Max=Norm important\n\n scale_indices = torch.cumsum(torch.ones(rows,cols, device=torch.device(devicetype)),1)\n scaled_subtracted = subtracted * scale_indices\n max_mass = torch.cumsum(scaled_subtracted,1)\n nonzero = torch.clamp(-1*(max_mass - Norm),0)\n\n oneN = 1.0/scale_indices\n\n # Algorithm described in paper, but these are all efficient GPU operation steps)\n # First we subtract every value from the cell next to it\n nonzero_ones = torch.clamp(nonzero * 1000000, max=1) #Hacky, but efficient\n shifted_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,:(cols-1)]),1)\n over_one = -1*(nonzero_ones - shifted_ones)\n last_one = torch.cat((over_one,torch.zeros(rows,1, device=torch.device(devicetype))),1)[:,1:]\n max_remain = last_one * nonzero\n shift_max = torch.cat((torch.zeros(rows,1, device=torch.device(devicetype)),max_remain[:,:(cols-1)]),1)\n first_col_nonzero_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,1:]),1) #Edge case for only first column\n tosub = first_col_nonzero_ones * subtracted + shift_max * oneN\n\n # We flip the tensor so that we can get a cumulative sum for the value to subtract, then flip back\n nastyflipS = torch.flip(torch.flip(tosub,[0,1]),[0])\n aggsubS = torch.cumsum(nastyflipS,1)\n nastyflipagainS = torch.flip(torch.flip(aggsubS,[0,1]),[0])\n\n # The proximal gradient step is equal to subtracting the sorted cumulative sum\n updated_weights = sorted_w_and_b - nastyflipagainS\n unsorted = torch.zeros(rows,cols, device=torch.device(devicetype)).scatter_(1,indices,updated_weights)\n final_w_and_b = torch.sign(w_and_b) * unsorted\n\n # Actually update parameters and bias\n if bias is not None:\n update = final_w_and_b[:,:cols-1]\n parameter.data = update\n update_b = final_w_and_b[:,-1]\n bias.data = update_b\n else:\n parameter.data = final_w_and_b", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n super(LightweightConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)", "def batch_norm_conv(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will translate\r\n # into a HiddenLayer with a tanh activation function connected to the\r\n # LogisticRegression layer; the activation function can be replaced by\r\n # sigmoid or any other nonlinear function\r\n self.hiddenLayer = HiddenLayer(rng=rng, input=input,\r\n n_in=n_in, n_out=n_hidden,\r\n activation=T.tanh)\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input=self.hiddenLayer.output,\r\n n_in=n_hidden,\r\n n_out=n_out)\r\n\r\n # L1 norm ; one regularization option is to enforce L1 norm to\r\n # be small\r\n self.L1 = abs(self.hiddenLayer.W).sum() \\\r\n + abs(self.logRegressionLayer.W).sum()\r\n\r\n # square of L2 norm ; one regularization option is to enforce\r\n # square of L2 norm to be small\r\n self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \\\r\n + (self.logRegressionLayer.W ** 2).sum()\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n # same holds for the function computing the number of errors\r\n self.errors = self.logRegressionLayer.errors\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def __init__(self, mode, dim, epsilon=1e-12, name='l2Normalize'):\n super(L2Normalization, self).__init__(mode, name)\n self.dim = dim\n self.epsilon = epsilon", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))", "def TCN_V1(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def l2_normalization(\n inputs,\n name,\n scaling=False,\n scale_initializer=init_ops.ones_initializer(),\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n data_format='NHWC',\n trainable=True,\n scope=None):\n\n with variable_scope.variable_scope(\n scope, 'L2Normalization_'+name, [inputs], reuse=reuse) as sc:\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n dtype = inputs.dtype.base_dtype\n if data_format == 'NHWC':\n # norm_dim = tf.range(1, inputs_rank-1)\n norm_dim = tf.range(inputs_rank-1, inputs_rank)\n params_shape = inputs_shape[-1:]\n elif data_format == 'NCHW':\n # norm_dim = tf.range(2, inputs_rank)\n norm_dim = tf.range(1, 2)\n params_shape = (inputs_shape[1])\n\n # Normalize along spatial dimensions.\n outputs = nn.l2_normalize(inputs, norm_dim, epsilon=1e-12)\n # Additional scaling.\n if scaling:\n scale_collections = utils.get_variable_collections(\n variables_collections, 'scale')\n scale = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=scale_initializer,\n collections=scale_collections,\n trainable=trainable)\n if data_format == 'NHWC':\n outputs = tf.multiply(outputs, scale)\n elif data_format == 'NCHW':\n scale = tf.expand_dims(scale, axis=-1)\n scale = tf.expand_dims(scale, axis=-1)\n outputs = tf.multiply(outputs, scale)\n # outputs = tf.transpose(outputs, perm=(0, 2, 3, 1))\n\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache", "def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line.#\n ###########################################################################\n N = dout.shape[0]\n x_norm,inv_var,gamma = cache\n dgamma = np.sum(dout * x_norm,axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Simplified calculation of dx.\n dx_normalized = dout * gamma\n dx = (1 / N) * inv_var * (N * dx_normalized - np.sum(dx_normalized,axis = 0) \\\n - x_norm * np.sum(dx_normalized * x_norm,axis = 0)) \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def BatchNorm(X): # (X - mu) / sigma -> Have to implement trainable parameters gamma and beta on this\n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n bn = (X - torch.mean(X)) / (torch.std(X)+epsilon)\n sigma.append(torch.std(X)+epsilon)\n return bn", "def compute_loss(model, loader, loss_fn, optimizer=None):\n\n total_loss = 0.0\n count_batches = 0\n for x_, y_, qm_, db_mask, blink_mask in loader:\n batch_loss_list = []\n xbatch_list = []\n for mask in [db_mask, blink_mask]:\n idxes = get_idxes_from_mask(mask)\n x_pick, y_pick, qm_pick = x_[idxes], y_[idxes], qm_[idxes]\n y_pos_idxes = torch.nonzero(y_pick.squeeze(), as_tuple=False).reshape(1, -1)[0]\n y_neg_idxes = torch.nonzero(~y_pick.squeeze().bool(), as_tuple=False).reshape(1, -1)[0]\n\n if (len(y_pos_idxes) == 0) or (len(y_neg_idxes) == 0):\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(x_pick) <= 1:\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(y_pos_idxes) == 1:\n y_pos_idx = y_pos_idxes[0]\n else: # len(y_pos_idxes) > 1:\n # TODO: I am just always using the first positive example for now\n # rand_idx = random.choice(list(range(len(y_pos_idxes))))\n # print(y_pos_idxes)\n rand_idx = 0\n y_pos_idx = y_pos_idxes[rand_idx]\n\n batch_length = 1 + len(y_neg_idxes)\n batch_feature_len = x_.shape[1]\n x_batch = torch.zeros(batch_length, batch_feature_len)\n x_batch[:-1:, :] = x_pick[y_neg_idxes]\n x_batch[-1, :] = x_pick[y_pos_idx] # put positive to the end\n xbatch_list.append(x_batch)\n # print(y_pos_idx, len(y_neg_idxes))\n # print(\"batch\", x_batch.shape)\n\n if (len(xbatch_list[0]) == 0) and (len(xbatch_list[1]) == 0):\n # skip if both batches are []\n # print(\"hitting cases without any examples [SHOULD BE WRONG]\")\n continue\n elif (len(xbatch_list[0]) == 0) or (len(xbatch_list[1]) == 0):\n # continue # TODO: testing whether improvements made if we only use cases where there are sources from both\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 1\n yhat_neg = yhat[:-1]\n yhat_pos = yhat[-1].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones((len(yhat) - 1), 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n else:\n # get yhats for both BLINK and DB batches\n # print(len(xbatch_list[0]), len(xbatch_list[1]))\n # print((xbatch_list[0], xbatch_list[1]))\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 2\n yhat_neg = torch.zeros(extended_batch_length, 1)\n yhat_neg[:len(xbatch_list[0])-1] = yhat[:len(xbatch_list[0])-1]\n yhat_neg[len(xbatch_list[0])-1:] = yhat[len(xbatch_list[0]):-1]\n for idx in [len(xbatch_list[0]), -1]:\n yhat_pos = yhat[idx].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones(extended_batch_length, 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n\n # update every question-mention\n if batch_loss_list and optimizer:\n (sum(batch_loss_list)/len(batch_loss_list)).backward()\n optimizer.step()\n\n avg_loss = total_loss / count_batches\n\n return avg_loss, batch_length", "def train_net(epoch, data, net, opti, batch_graph):\n global num_batches, batch_size\n # train the network\n for num in range(epoch):\n # run each batch through each round\n for batch_id in range(num_batches):\n # calculate the neighborhood for the graph\n batch = torch.from_numpy(data[batch_id]).float()\n batch = batch.view(batch_size, -1)\n batch_distances = pairwise_distances(batch)\n nbr_graph_tensor = torch.from_numpy(batch_graph[batch_id]).float()\n batch_distances_masked = batch_distances * nbr_graph_tensor.float()\n global lbda\n out = net(batch, False) # run the batch through the network\n svd_loss, out = implement_svd(out) # calculate the SVD L2,1 loss and SVD representation\n output_distances = pairwise_distances(out)\n # Multiply the distances between each pair of points with the neighbor mask\n output_distances_masked = output_distances * nbr_graph_tensor.float()\n # Find the difference between |img_i - img_j|^2 and |output_i - output_j|^2\n nbr_diff = torch.abs((output_distances_masked - batch_distances_masked))\n nbr_distance = nbr_diff.norm()\n svd_loss *= lbda_svd # multiply SVD loss by its scaling factor\n # find variance in all directions\n var = 0\n for i in range(out.size()[0]):\n var += lbda_var / out[i].var()\n loss = nbr_distance + svd_loss + var # loss contains all three terms\n opti.zero_grad()\n loss.backward()\n opti.step()\n print('Epoch: %f, Step: %f, Loss: %.2f' % (num, batch_id + 1, loss.data.cpu().numpy()))\n\n # find the ideal number of dimensions\n global final_dim\n batch = torch.from_numpy(data[0]).float()\n batch = batch.view(batch_size, -1)\n out = net(batch, False)\n u, s, v = torch.svd(out)\n final_dim = calc_dim(s)", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic):\n self.binary=binary\n self.stochastic=stochastic\n \n # Since we are dealing with a one hidden layer MLP, this will translate\n # into a HiddenLayer with a tanh activation function connected to the\n # LogisticRegression layer; the activation function can be replaced by\n # sigmoid or any other nonlinear function.\n self.hiddenLayers = []\n self.normLayers=[]\n for i in xrange(n_hiddenLayers):\n h_input = input if i == 0 else self.hiddenLayers[i-1].output\n h_in = n_in if i == 0 else n_hidden\n\n # if binary==True, we append a binary hiddenlayer\n if binary==True:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=True,\n stochastic=stochastic\n ))\n self.normLayers.append(\n BatchNormLayer(\n input=self.hiddenLayers[i].output,\n n_in=n_hidden,\n n_out=n_hidden\n ))\n else:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=False,\n stochastic=False\n ))\n\n # The logistic regression layer gets as input the hidden units\n # of the hidden layer\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayers[-1].output,\n n_in=n_hidden,\n n_out=n_out,\n binary=binary,\n stochastic=stochastic\n )\n \n # same holds for the function computing the number of errors\n self.errors = self.logRegressionLayer.errors\n\n # the parameters of the model are the parameters of the two layer it is\n # made out of\n self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params\n self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt\n self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws\n # keep track of model input\n self.input = input", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)" ]
[ "0.6197579", "0.60561115", "0.60153174", "0.5969007", "0.5949365", "0.58943415", "0.58943415", "0.58114785", "0.5737982", "0.56550455", "0.5645784", "0.5630686", "0.56221515", "0.5616789", "0.560882", "0.56057966", "0.5596632", "0.55800354", "0.55554694", "0.5553616", "0.55404234", "0.55348885", "0.5515137", "0.54819775", "0.5480998", "0.5471492", "0.5471175", "0.54526573", "0.5446201", "0.5444185", "0.5443679", "0.5440715", "0.54362315", "0.5435729", "0.5432077", "0.5426908", "0.5414537", "0.54085386", "0.5403075", "0.54007167", "0.5393908", "0.5386882", "0.53829986", "0.53827435", "0.53787476", "0.5378375", "0.53741467", "0.5371197", "0.53709996", "0.5369706", "0.5367214", "0.53639436", "0.5363786", "0.5360341", "0.5358296", "0.53546065", "0.5349536", "0.5346626", "0.5342665", "0.53425694", "0.53417367", "0.5341117", "0.53298694", "0.53252625", "0.53203535", "0.53126216", "0.5310054", "0.53093", "0.5308642", "0.53085566", "0.53063136", "0.5303918", "0.52968645", "0.5290463", "0.5287995", "0.528491", "0.52816653", "0.5276854", "0.52762324", "0.52722526", "0.5269062", "0.5266669", "0.5266129", "0.52631813", "0.52603745", "0.52594364", "0.5259121", "0.5257585", "0.52545387", "0.52544713", "0.52541715", "0.5251661", "0.5250683", "0.5246145", "0.52447414", "0.52433527", "0.5242091", "0.5238574", "0.5238041", "0.52362114" ]
0.55448663
20
dropout + batch norm + l1_l2
def architecture_CONV_FC_batch_norm_dropout_L1_l2_LEAKY_ReLU( X, nbclasses, nb_conv=1, nb_fc=1 ): # input size width, height, depth = X.shape input_shape = (height, depth) # parameters of the architecture l1_l2_rate = 1.0e-3 dropout_rate = 0.5 conv_kernel = 3 conv_filters = 64 nbunits_fc = 128 model = Sequential( name=f"""{str(nb_conv)}_CONV_k_{str(conv_kernel)}_ {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)} _LEAKY_ReLU""" ) model.add( Conv1D( input_shape=input_shape, kernel_regularizer=l1_l2(l1_l2_rate), kernel_size=conv_kernel, filters=conv_filters, ) ) model.add(LeakyReLU()) model.add(BatchNormalization()) # if more covolutional layers are defined in parameters if nb_conv > 1: for _layer in range(nb_conv): model.add( Conv1D( kernel_size=conv_kernel, filters=conv_filters, kernel_regularizer=l1_l2(l1_l2_rate), ) ) model.add(LeakyReLU()) model.add(BatchNormalization()) # Flatten + FC layers model.add(Flatten()) for _layer in range(nb_fc): model.add(Dense(nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate))) model.add(LeakyReLU()) model.add(Dropout(dropout_rate)) model.add(Dense(nbclasses, activation=softmax)) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def architecture_CONV_FC_batch_norm_dropout_L1_l2(\n X, nbclasses, nb_conv=1, nb_fc=1, kernel_initializer=\"random_normal\"\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = relu\n kernel_initializer = kernel_initializer\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}__CONV_k{str(conv_kernel)}_\n {str(nb_fc)}_initializer_{kernel_initializer}_\n _FC128_bn_d_{str(dropout_rate)}\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_TANH(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = tanh\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_\n CONV_k_{str(conv_kernel)}_\n {str(nb_fc)}_FC128_bn_d_{str(dropout_rate)}\n _TANH\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n \n dims = [input_dim] + hidden_dims + [num_classes]\n\n # initialise all parameters (weight, bias, gamma, beta)\n for i in range(len(dims)-1):\n w = 'W' + str(i+1)\n b = 'b' + str(i+1)\n self.params[w] = np.random.randn(dims[i], dims[i+1])*weight_scale\n self.params[b] = np.zeros(dims[i+1])\n \n if self.use_batchnorm:\n for i in range(len(dims)-2):\n #no gamma and beta for last layer\n gamma = 'gamma' + str(i+1)\n beta = 'beta' + str(i+1)\n self.params[gamma] = np.ones(dims[i+1])\n self.params[beta] = np.zeros(dims[i+1])\n \n \n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def optimize(self):\n self.u = np.random.uniform(-1, 1, (self.batchsize, 288, 1, 1))\n self.l2 = torch.from_numpy(self.u).float()\n self.n = torch.randn(self.batchsize, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n print(self.l1.shape,99999999999999999999999999999999999)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def __init__(\n self,\n hidden_dims,\n input_dim=3 * 32 * 32,\n num_classes=10,\n dropout=1,\n normalization=None,\n reg=0.0,\n weight_scale=1e-2,\n dtype=np.float32,\n seed=None,\n ):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n Din, Dout = input_dim, hidden_dims[0]\n for i in range(self.num_layers):\n self.params['W' + str(i+1)] = np.random.normal(scale=weight_scale, size=(Din, Dout))\n self.params['b' + str(i+1)] = np.zeros((Dout,))\n Din = Dout\n if i < len(hidden_dims) - 1:\n Dout = hidden_dims[i+1]\n if i == len(hidden_dims) - 1:\n Dout = num_classes\n \n # BN params initialization\n if self.normalization != None:\n for i in range(self.num_layers - 1):\n self.params['gamma' + str(i+1)] = np.ones(shape=(hidden_dims[i]))\n self.params['beta' + str(i+1)] = np.zeros(shape=(hidden_dims[i]))\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {\"mode\": \"train\", \"p\": dropout}\n if seed is not None:\n self.dropout_param[\"seed\"] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization == \"batchnorm\":\n self.bn_params = [{\"mode\": \"train\"} for i in range(self.num_layers - 1)]\n if self.normalization == \"layernorm\":\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)", "def build_generator(latent_dim=100):\n # The weight initialization and the slope are chosen to accord with the\n # Parameters in the paper. I only change padding when it seems neccesary to\n # to mantain adequate dimensons. \n weight_initializer = tf.keras.initializers.RandomNormal(stddev=0.02)\n slope = 0.3\n \n inputs = keras.Input(shape=(1,1,100))\n # First convolutional layer\n x = Conv2DTranspose(\n 1024, \n kernel_size=(4,4), \n strides=1, \n kernel_initializer=weight_initializer,\n padding='valid',\n use_bias=False\n )(inputs)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Second convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 512,\n kernel_size = 4,\n strides = (2,2),\n padding = 'same',\n use_bias = False\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n \n # Third convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 256,\n kernel_size = 5,\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fourth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 128,\n kernel_size = (5,5),\n strides = (2,2),\n use_bias=False,\n padding = 'same',\n )(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=slope)(x)\n\n # Fifth convolutional layer\n x = Conv2DTranspose(\n kernel_initializer=weight_initializer,\n filters = 3,\n kernel_size = (5,5),\n use_bias=False,\n strides = (2,2),\n padding = 'same',\n activation='tanh'\n )(x)\n model = keras.Model(inputs=inputs, outputs=x)\n return model", "def tf_l2_loss(Gt, pred,_axis):\n l2diff = tf.subtract(Gt, pred)\n l2loss = tf.reduce_sum(tf.square(l2diff), axis=_axis)\n l2loss = tf.maximum(l2loss, 1e-10)\n l2loss = tf.sqrt(l2loss) # (n_batch, n_class) -> (n_batch, 1)\n\n return l2loss", "def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n if type(hidden_dims) != list:\n raise ValueError('hidden_dim has to be a list')\n\n self.L = len(hidden_dims) + 1\n self.N = input_dim\n self.C = num_classes\n dims = [self.N] + hidden_dims + [self.C]\n Ws = {'W' + str(i + 1):\n weight_scale * np.random.randn(dims[i], dims[i + 1]) for i in range(len(dims) - 1)}\n b = {'b' + str(i + 1): np.zeros(dims[i + 1])\n for i in range(len(dims) - 1)}\n\n self.params.update(b)\n self.params.update(Ws)\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def norm2d(w_in):\n return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)", "def masked_l2(preds, actuals, mask):\n loss = tf.nn.l2(preds, actuals)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)", "def loss_false(code_batch, k=1):\n\n _, n_latent = code_batch.get_shape()\n\n # changing these parameters is equivalent to changing the strength of the\n # regularizer, so we keep these fixed (these values correspond to the\n # original values used in Kennel et al 1992).\n rtol = 20.0\n atol = 2.0\n # k_frac = 0.01\n # n_batch = tf.cast(tf.keras.backend.shape(code_batch)[0], tf.float32)\n # assert False, n_batch\n # k = max(1, int(k_frac * n_batch))\n\n ## Vectorized version of distance matrix calculation\n tri_mask = tf.linalg.band_part(tf.ones((n_latent, n_latent), tf.float32), -1, 0)\n batch_masked = tf.multiply(tri_mask[:, tf.newaxis, :], code_batch[tf.newaxis, ...])\n X_sq = tf.reduce_sum(batch_masked * batch_masked, axis=2, keepdims=True)\n pdist_vector = (\n X_sq\n + tf.transpose(X_sq, [0, 2, 1])\n - 2 * tf.matmul(batch_masked, tf.transpose(batch_masked, [0, 2, 1]))\n )\n all_dists = pdist_vector\n all_ra = tf.sqrt(\n (1 / (tf.range(1, 1 + n_latent, dtype=tf.float32)))\n * tf.squeeze(\n tf.reduce_sum(\n tf.square(tf.math.reduce_std(batch_masked, axis=1, keepdims=True)),\n axis=2,\n )\n )\n )\n\n # Avoid singularity in the case of zeros\n all_dists = tf.clip_by_value(all_dists, 1e-14, tf.reduce_max(all_dists))\n\n # inds = tf.argsort(all_dists, axis=-1)\n _, inds = tf.math.top_k(-all_dists, int(k + 1))\n # top_k currently faster than argsort because it truncates matrix\n\n neighbor_dists_d = tf.gather(all_dists, inds, batch_dims=-1)\n neighbor_new_dists = tf.gather(all_dists[1:], inds[:-1], batch_dims=-1)\n\n # Eq. 4 of Kennel et al.\n scaled_dist = tf.sqrt(\n (neighbor_new_dists - neighbor_dists_d[:-1]) / neighbor_dists_d[:-1]\n )\n\n # Kennel condition #1\n is_false_change = scaled_dist > rtol\n # Kennel condition 2\n is_large_jump = neighbor_new_dists > atol * all_ra[:-1, tf.newaxis, tf.newaxis]\n\n is_false_neighbor = tf.math.logical_or(is_false_change, is_large_jump)\n total_false_neighbors = tf.cast(is_false_neighbor, tf.int32)[..., 1 : (k + 1)]\n\n # Pad zero to match dimensionality of latent space\n reg_weights = 1 - tf.reduce_mean(\n tf.cast(total_false_neighbors, tf.float64), axis=(1, 2)\n )\n reg_weights = tf.pad(reg_weights, [[1, 0]])\n\n # Find average batch activity\n activations_batch_averaged = tf.sqrt(tf.reduce_mean(tf.square(code_batch), axis=0))\n\n # L2 Activity regularization\n activations_batch_averaged = tf.cast(activations_batch_averaged, tf.float64)\n loss = tf.reduce_sum(tf.multiply(reg_weights, activations_batch_averaged))\n\n return tf.cast(loss, tf.float32)", "def layer_norm_and_dropout(input_tensor, dropout_prob, name=None, dropout_name=None):\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob, dropout_name=dropout_name)\n return output_tensor", "def architecture_CONV_FC_batch_norm_dropout_L1_l2_SIGMOID(\n X, nbclasses, nb_conv=1, nb_fc=1\n):\n # input size\n width, height, depth = X.shape\n input_shape = (height, depth)\n\n # parameters of the architecture\n l1_l2_rate = 1.0e-3\n dropout_rate = 0.5\n conv_kernel = 3\n conv_filters = 64\n nbunits_fc = 128\n activation = sigmoid\n\n model = Sequential(\n name=f\"\"\"{str(nb_conv)}_CONV_k_\n {str(conv_kernel)}_{str(nb_fc)}\n _FC128_bn_d_{str(dropout_rate)}\n _SIGMOID\"\"\"\n )\n model.add(\n Conv1D(\n input_shape=input_shape,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n kernel_size=conv_kernel,\n filters=conv_filters,\n )\n )\n model.add(BatchNormalization())\n\n # if more covolutional layers are defined in parameters\n if nb_conv > 1:\n for _layer in range(nb_conv):\n model.add(\n Conv1D(\n kernel_size=conv_kernel,\n filters=conv_filters,\n activation=activation,\n kernel_regularizer=l1_l2(l1_l2_rate),\n )\n )\n model.add(BatchNormalization())\n\n # Flatten + FC layers\n model.add(Flatten())\n for _layer in range(nb_fc):\n model.add(\n Dense(\n nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate), activation=activation\n )\n )\n model.add(Dropout(dropout_rate))\n\n model.add(Dense(nbclasses, activation=softmax))\n\n return model", "def L2_norm(x, axis=-1):\n return keras.backend.l2_normalize(x, axis=axis)", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #\n # initialized from a normal distribution centered at 0 with standard #\n # deviation equal to weight_scale. Biases should be initialized to zero. #\n # #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n dimension = [input_dim] + hidden_dims + [num_classes]\n for i in range(1, self.num_layers+1):\n self.params['W{0}'.format(i)] = weight_scale * np.random.randn(dimension[i-1], dimension[i])\n self.params['b{0}'.format(i)] = np.zeros(dimension[i])\n\n if self.normalization in ['batchnorm', 'layernorm']:\n self._batchnormInit()\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def batchnorm_init(m, kernelsize=3):\r\n n = kernelsize**2 * m.num_features\r\n m.weight.data.normal_(0, math.sqrt(2. / (n)))\r\n m.bias.data.zero_()", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n input_size = input_dim\n for i in range(len(hidden_dims)):\n output_size = hidden_dims[i]\n self.params['W' + str(i+1)] = np.random.randn(input_size,output_size) * weight_scale\n self.params['b' + str(i+1)] = np.zeros(output_size)\n if self.normalization:\n self.params['gamma' + str(i+1)] = np.ones(output_size)\n self.params['beta' + str(i+1)] = np.zeros(output_size)\n input_size = output_size # 下一层的输入\n # 输出层,没有BN操作\n self.params['W' + str(self.num_layers)] = np.random.randn(input_size,num_classes) * weight_scale\n self.params['b' + str(self.num_layers)] = np.zeros(num_classes)\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def _optimization(dataset1, dataset2, nb_epochs=3000):\n\n x1_mean = dataset1['data'].mean()\n x1_std = dataset1['data'].std()\n x1 = (dataset1['data'] - x1_mean) / (x1_std)\n y1 = dataset1['labels']\n Y1 = dataset1['hot_labels']\n\n x2_mean = dataset2['data'].mean()\n x2_std = dataset2['data'].std()\n x2 = (dataset2['data'] - x2_mean) / (x2_std)\n\n x_model1 = Input(x1.shape[1:])\n y_model1 = Dropout(0.1)(x_model1)\n y_model1 = Dense(50, activation='relu')(x_model1)\n y_model1 = Dropout(0.2)(y_model1)\n y_model1 = Dense(50, activation='relu')(y_model1)\n out_model1 = Dense(len(np.unique(y1)), activation='softmax')(y_model1)\n\n model1 = Model(input=x_model1, output=out_model1)\n\n optimizer = keras.optimizers.Adadelta()\n model1.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=200, min_lr=0.1)\n\n hist = model1.fit(x1, Y1, batch_size=x1.shape[0], nb_epoch=nb_epochs, verbose=1, shuffle=True, callbacks=[reduce_lr])\n\n dataset2_new_labels = []\n\n for i in range(x2.shape[0]):\n xTrain = x2[i,:].reshape((1,x2.shape[1]))\n dataset2_new_labels.append(np.argmax(model1.predict(xTrain, batch_size=1)))\n\n # Print the testing results which has the l in range(x_train.shape[0]):\n # for i in range(len(x_test1)):\n # xTest = x_test1[i,:].reshape((1,2048))\n # print((np.argmax(model.predict(xTest, batch_size=1)), y_test1[i]))\n # log = pd.DataFrame(hist.history)\n # print(\"saving results for 100 nodes\" + _MODE + fname)\n # log.to_json('accuracies/accuracy_100_' + _MODE + fname + '.json')\n\n # with open('Text_Files/' + fname + '_results.txt', 'w') as text_file:\n # text_file.write(fname + '<<<=====>>>' + str(max(log.val_acc.values)))\n\n # assert 2==1\n\n x_model1 = []\n y_model1 = []\n out_model1 = []\n model1 = []\n\n return dataset2_new_labels", "def _fit_apgl(x, mask, lmbd,\n max_iter=100, L=1e-3, beta=0.5,\n tol=1e-3, print_loss=False):\n # init\n n1, n2 = x.shape\n rdm = RandomState(123)\n theta = rdm.randn(n1, n2) # natural parameter\n thetaOld = theta\n alpha = 1\n alphaOld = 0\n\n # main loop\n loss = _cross_entropy(x, mask, theta) + lmbd * \\\n np.linalg.norm(theta, ord='nuc')\n iteration = []\n for i in range(int(max_iter)):\n if print_loss:\n print(f'Epoch {i}, loss {loss:.3f}')\n iteration.append(loss)\n lossOld = loss\n # nesterov extropolation\n A = theta + (alphaOld - 1) / alpha * (theta - thetaOld)\n for _ in range(50):\n S = A - L * _gradient(x, mask, A)\n thetaNew = svt(S, lmbd * L)\n ce = _cross_entropy(x, mask, thetaNew)\n if ce < _bound(x, mask, thetaNew, theta, L):\n break\n else:\n L = beta * L\n thetaOld = theta\n theta = thetaNew\n alphaOld = alpha\n alpha = (1 + np.sqrt(4 + alpha ** 2)) / 2\n loss = ce + lmbd * np.linalg.norm(theta, ord='nuc')\n if i == max_iter - 1:\n print(f'Reach max iteration {i+1}')\n if np.abs(lossOld - loss) < tol:\n break\n\n return theta, np.array(iteration)", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel_old, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n # norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.LeakyReLU(alpha=0.1))", "def L2X(train = True):\n print('Loading dataset...') \n x_train, y_train, x_val, y_val, id_to_word = load_data()\n #pred_train = np.load('data/pred_train.npy')\n #pred_val = np.load('data/pred_val.npy') \n print('Creating model...')\n\n # P(S|X)\n with tf.variable_scope('selection_model'):\n X_ph = Input(shape=(maxlen,), dtype='int32')\n\n logits_T_grp = construct_gumbel_selector(X_ph, max_features, embedding_dims, maxlen) # bs, max_len * num_groups\n tau = 0.5 \n T = Sample_Concrete(tau, k, num_feature=maxlen, num_groups=num_groups)(logits_T_grp)\n\n T = Reshape((maxlen, num_groups))(T)\n T = Permute((2, 1))(T) # bs, num_groups, max_len\n\n # q(X_S)\n with tf.variable_scope('prediction_model'):\n emb2 = Embedding(max_features, embedding_dims, \n input_length=maxlen)(X_ph)\n # emb2 bs, max_len, 50\n # apply the matrix trick as before\n # here the output size of matmul layer is different from before\n net = matmul_layer([T, emb2]) # bs, num_groups, 50\n #print(net.shape)\n net = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'merge_channel')(net) # bs, num_groups, 1\n\n # net = Mean(net) # bs, 50\n input_group = Flatten()(net) # bs, num_groups\n # num_groups = K.int_shape(input_group)[1]\n # here we add instance wise f-s again!!!!\n net = Dense(100, activation='relu', name = 's/dense1',\n kernel_regularizer=regularizers.l2(1e-3))(input_group)\n net = Dense(100, activation='relu', name = 's/dense2',\n kernel_regularizer=regularizers.l2(1e-3))(net)\n logits = Dense(num_groups)(net)\n\n\n\n\n # A tensor of shape, [batch_size, max_sents, 100]\n samples = Sample_Concrete_Original(tau, num_vital_group, name='group_importance')(logits)\n new_input_group = Multiply()([input_group, samples]) \n\n\n\n net = Dense(hidden_dims, activation='relu')(new_input_group)\n preds = Dense(2, activation='softmax', \n name = 'new_dense')(net)\n\n\n model = Model(inputs=X_ph, \n outputs=preds)\n model.summary()\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',#optimizer,\n metrics=['acc']) \n #train_acc = np.mean(np.argmax(pred_train, axis = 1)==np.argmax(y_train, axis = 1))\n #val_acc = np.mean(np.argmax(pred_val, axis = 1)==np.argmax(y_val, axis = 1))\n #print('The train and validation accuracy of the original model is {} and {}'.format(train_acc, val_acc))\n\n if train:\n filepath=\"models/l2x.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint] \n st = time.time()\n model.fit(x_train, y_train, \n validation_data=(x_val, y_val), \n callbacks = callbacks_list,\n epochs=epochs, batch_size=batch_size)\n duration = time.time() - st\n print('Training time is {}'.format(duration)) \n\n model.load_weights('models/l2x.hdf5', by_name=True) \n\n pred_model = Model(X_ph, [T, samples]) \n pred_model.summary()\n pred_model.compile(loss='categorical_crossentropy', \n optimizer='adam', metrics=['acc']) \n\n st = time.time()\n #scores = pred_model.predict(x_val, \n # verbose = 1, batch_size = batch_size)[:,:,0] \n #scores = np.reshape(scores, [scores.shape[0], maxlen])\n scores_t, group_importances_t = pred_model.predict(x_train, verbose = 1, batch_size = batch_size)\n scores_v, group_importances_v = pred_model.predict(x_val, verbose = 1, batch_size = batch_size)\n return scores_t, group_importances_t, scores_v, group_importances_v, x_val", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def EmbeddingL1RegularizationUpdate(embedding_variable, net_input, learn_rate, l1_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n sign_inside = tf.sign(tf.matmul(net_input, embedding_variable))\n where = tf.equal(sign_inside, 0)\n # should replace 0's with random in [-1, 1] for an better (not necessarily acute)implementation\n grad = l1_reg_val * tf.matmul(tf.transpose(net_input), sign_inside)\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l1_reg_val * tf.norm(tf.matmul(net_input, embedding_variable), ord=1)\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l1 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def validation_dubo(latent_dim, covar_module0, covar_module1, likelihood, train_xt, m, log_v, z, P, T, eps):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n v = torch.exp(log_v)\n torch_dtype = torch.double\n x_st = torch.reshape(train_xt, [P, T, train_xt.shape[1]]).to(device)\n stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=1)\n K0xz = covar_module0(train_xt, z).evaluate().to(device)\n K0zz = (covar_module0(z, z).evaluate() + eps * torch.eye(z.shape[1], dtype=torch_dtype).to(device)).to(device)\n LK0zz = torch.cholesky(K0zz).to(device)\n iK0zz = torch.cholesky_solve(torch.eye(z.shape[1], dtype=torch_dtype).to(device), LK0zz).to(device)\n K0_st = covar_module0(stacked_x_st, stacked_x_st).evaluate().transpose(0,1)\n B_st = (covar_module1(stacked_x_st, stacked_x_st).evaluate() + torch.eye(T, dtype=torch.double).to(device) * likelihood.noise_covar.noise.unsqueeze(dim=2)).transpose(0,1)\n LB_st = torch.cholesky(B_st).to(device)\n iB_st = torch.cholesky_solve(torch.eye(T, dtype=torch_dtype).to(device), LB_st)\n\n dubo_sum = torch.tensor([0.0]).double().to(device)\n for i in range(latent_dim):\n m_st = torch.reshape(m[:, i], [P, T, 1]).to(device)\n v_st = torch.reshape(v[:, i], [P, T]).to(device)\n K0xz_st = torch.reshape(K0xz[i], [P, T, K0xz.shape[2]]).to(device)\n iB_K0xz = torch.matmul(iB_st[i], K0xz_st).to(device)\n K0zx_iB_K0xz = torch.matmul(torch.transpose(K0xz[i], 0, 1), torch.reshape(iB_K0xz, [P*T, K0xz.shape[2]])).to(device)\n W = K0zz[i] + K0zx_iB_K0xz\n W = (W + W.T) / 2\n LW = torch.cholesky(W).to(device)\n logDetK0zz = 2 * torch.sum(torch.log(torch.diagonal(LK0zz[i]))).to(device)\n logDetB = 2 * torch.sum(torch.log(torch.diagonal(LB_st[i], dim1=-2, dim2=-1))).to(device)\n logDetW = 2 * torch.sum(torch.log(torch.diagonal(LW))).to(device)\n logDetSigma = -logDetK0zz + logDetB + logDetW\n iB_m_st = torch.solve(m_st, B_st[i])[0].to(device)\n qF1 = torch.sum(m_st*iB_m_st).to(device)\n p = torch.matmul(K0xz[i].T, torch.reshape(iB_m_st, [P * T])).to(device)\n qF2 = torch.sum(torch.triangular_solve(p[:,None], LW, upper=False)[0] ** 2).to(device)\n qF = qF1 - qF2\n tr = torch.sum(iB_st[i] * K0_st[i]) - torch.sum(K0zx_iB_K0xz * iK0zz[i])\n logDetD = torch.sum(torch.log(v[:, i])).to(device)\n tr_iB_D = torch.sum(torch.diagonal(iB_st[i], dim1=-2, dim2=-1)*v_st).to(device)\n D05_iB_K0xz = torch.reshape(iB_K0xz*torch.sqrt(v_st)[:,:,None], [P*T, K0xz.shape[2]])\n K0zx_iB_D_iB_K0zx = torch.matmul(torch.transpose(D05_iB_K0xz,0,1), D05_iB_K0xz).to(device)\n tr_iB_K0xz_iW_K0zx_iB_D = torch.sum(torch.diagonal(torch.cholesky_solve(K0zx_iB_D_iB_K0zx, LW))).to(device)\n tr_iSigma_D = tr_iB_D - tr_iB_K0xz_iW_K0zx_iB_D\n dubo = 0.5*(tr_iSigma_D + qF - P*T + logDetSigma - logDetD + tr)\n dubo_sum = dubo_sum + dubo\n return dubo_sum", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache", "def calculate2_simpleKL_norm(pred, truth, rnd=0.01):\n return 1 - calculate2_simpleKL(pred, truth, rnd=rnd) / 4000", "def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')", "def train(self,D,batch_size=64,iter=10000,l2_reg=0.01,noise_level=0.1,debug=False):\n sess = tf.get_default_session()\n\n idxes = np.random.permutation(len(D))\n train_idxes = idxes[:int(len(D)*0.8)]\n valid_idxes = idxes[int(len(D)*0.8):]\n\n def _batch(idx_list,add_noise):\n batch = []\n\n if len(idx_list) > batch_size:\n idxes = np.random.choice(idx_list,batch_size,replace=False)\n else:\n idxes = idx_list\n\n for i in idxes:\n batch.append(D[i])\n\n b_x,b_y,b_l = zip(*batch)\n x_split = np.array([len(x) for x in b_x])\n y_split = np.array([len(y) for y in b_y])\n b_x,b_y,b_l = np.concatenate(b_x,axis=0),np.concatenate(b_y,axis=0),np.array(b_l)\n\n if add_noise:\n b_l = (b_l + np.random.binomial(1,noise_level,batch_size)) % 2 #Flip it with probability 0.1\n\n return b_x,b_y,x_split,y_split,b_l\n\n for it in tqdm(range(iter),dynamic_ncols=True):\n b_x,b_y,x_split,y_split,b_l = _batch(train_idxes,add_noise=True)\n\n loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l,\n self.l2_reg:l2_reg,\n })\n\n if debug:\n if it % 100 == 0 or it < 10:\n b_x,b_y,x_split,y_split,b_l = _batch(valid_idxes,add_noise=False)\n valid_acc = sess.run(self.acc,feed_dict={\n self.x:b_x,\n self.y:b_y,\n self.x_split:x_split,\n self.y_split:y_split,\n self.l:b_l\n })\n tqdm.write(('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc)))\n\n #if valid_acc >= 0.95:\n # print('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc))\n # print('early termination@%08d'%it)\n # break", "def spatial_batchnorm_backward(dout, cache):\r\n \tN, C, H, W = dout.shape\r\n dout_new = dout.transpose(0, 2, 3, 1).reshape(N*H*W, C)\r\n dx, dgamma, dbeta = batchnorm_backward(dout_new, cache)\r\n dx = dx.reshape(N, H, W, C).transpose(0, 3, 1, 2)\r\n\r\n return dx, dgamma, dbeta", "def configure_batchnorm(x, model):\n bs = x.size(0)\n # train mode, because dent optimizes the model to minimize entropy\n model.train()\n # disable grad, to (re-)enable only what dent updates\n model.requires_grad_(False)\n # configure norm for dent updates:\n # enable grad + keep statisics + repeat affine params\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.weight = nn.Parameter(m.ckpt_weight.unsqueeze(0).repeat(bs, 1))\n m.bias = nn.Parameter(m.ckpt_bias.unsqueeze(0).repeat(bs, 1))\n m.requires_grad_(True)\n return model", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))", "def scipy_minus_gradient(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n if perceptron:\n perceptron._gradient_iter += 1\n g = None\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n first_term = vector_graphs[correct_rows,:].sum(axis=0)\n all_scores = vector_graphs * w\n all_probs = []\n for batch in batches:\n batch_scores = all_scores[batch]\n S = logsumexp(batch_scores)\n all_probs.append(np.exp(batch_scores - S))\n all_probs = numpy.hstack(all_probs)\n second_term = all_probs * vector_graphs\n if g is None:\n g = second_term - first_term\n else:\n g = g + second_term - first_term\n index += 1\n if index % 100 == 0:\n print('Gradient '+str(index)+' processed')\n g = numpy.ndarray.flatten(numpy.asarray(g)) / len(all_vector_graphs)\n if sigma != None:\n g = g + sigma * w\n print('Gradient norm:'+str(scipy.linalg.norm(g)))\n sys.stdout.flush()\n if perceptron and perceptron._model_pickle:\n if perceptron._gradient_iter % 5 == 0:\n perceptron._weights = numpy.reshape(w,(1,perceptron._num_features))\n perceptron.save(perceptron._model_pickle+'_'+str(perceptron._gradient_iter))\n return g", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n CustomBatchNormalization(),\n LeakyReLU(alpha=0.1))", "def neg_sampling_loss_and_gradient(\n center_word_vec,\n outside_word_idx,\n outside_vectors,\n dataset,\n K=10\n):\n\n # Negative sampling of words is done for you. Do not modify this if you\n # wish to match the autograder and receive points!\n neg_sample_word_indices = get_negative_samples(outside_word_idx, dataset, K)\n indices = [outside_word_idx] + neg_sample_word_indices\n\n ### YOUR CODE HERE\n \n outside_word_vector = outside_vectors[outside_word_idx]\n outside_words_dot_center_word = outside_word_vector.dot(center_word_vec)\n \n neg_samples_vector = outside_vectors[neg_sample_word_indices]\n neg_samples_dot_center_word = neg_samples_vector.dot(center_word_vec)\n \n sigmoid_outside_dot = sigmoid(outside_words_dot_center_word)\n sigmoid_negative_dot = sigmoid(-neg_samples_dot_center_word)\n\n loss = -np.log(sigmoid_outside_dot) -np.sum(np.log(sigmoid_negative_dot))\n \n grad_center_vec = \\\n (sigmoid_outside_dot - 1) * outside_word_vector + \\\n np.sum((1 - sigmoid_negative_dot)[:, np.newaxis] * neg_samples_vector, axis = 0)\n \n grad_outside_vecs = np.zeros_like(outside_vectors)\n grad_outside_vecs[outside_word_idx] = (sigmoid_outside_dot - 1) * center_word_vec\n \n for i, neg_index in enumerate(neg_sample_word_indices):\n grad_outside_vecs[neg_index] += \\\n (1 - sigmoid_negative_dot[i]) * center_word_vec\n\n ### END YOUR CODE\n\n return loss, grad_center_vec, grad_outside_vecs", "def train_LR(self, X, y, eta=1e-3, batch_size=1, num_iters=1000) :\n loss_history = []\n N,d = X.shape\n for t in np.arange(num_iters):\n X_batch = None\n y_batch = None\n # ================================================================ #\n # YOUR CODE HERE:\n # Sample batch_size elements from the training data for use in gradient descent. \n # After sampling, X_batch should have shape: (batch_size,1), y_batch should have shape: (batch_size,)\n # The indices should be randomly generated to reduce correlations in the dataset. \n # Use np.random.choice. It is better to user WITHOUT replacement.\n # ================================================================ #\n \n # sample indices without replacement\n batch_idx = np.random.choice(N, batch_size, replace = False)\n X_batch = X[batch_idx]\n y_batch = y[batch_idx]\n \n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss = 0.0\n grad = np.zeros_like(self.w)\n # ================================================================ #\n # YOUR CODE HERE: \n # evaluate loss and gradient for batch data\n # save loss as loss and gradient as grad\n # update the weights self.w\n # ================================================================ #\n \n # compute the loss and gradient\n # loss_and_grad will take responsible for these\n \n loss, grad = self.loss_and_grad(X_batch, y_batch)\n \n self.w = self.w - eta * grad\n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss_history.append(loss)\n return loss_history, self.w", "def l2_normalization(inputs, scaling=True):\n with tf.variable_scope('L2Normalization'):\n inputs_shape = inputs.get_shape()\n channel_shape = inputs_shape[-1:]\n # cal l2_norm on channel\n outputs = tf.nn.l2_normalize(inputs, 3, epsilon=1e-12)\n # scalling\n if scaling:\n # scale.shape == channel.shape\n scale = slim.variable('gamma', channel_shape, tf.float32, tf.constant_initializer(1.0))\n outputs = tf.multiply(outputs, scale)\n\n return outputs", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n gamma, x_hat, num, denom, eps, sample_variance = cache\n N, D = dout.shape\n \n dbeta = np.sum(dout, axis=0)\n dyx_hat = dout\n dgamma = np.sum(dyx_hat*x_hat, axis=0)\n dx_hat = gamma*dyx_hat\n ddenom = np.sum(num*dx_hat, axis=0)\n dmu1 = (1/denom)*dx_hat\n dsqvar = ddenom*(-1)*(1/(denom**2))\n dvar = 0.5*((sample_variance+eps)**(-0.5))*dsqvar\n dsq = (1/N)*np.ones((N,D))*dvar\n dmu2 = 2*num*dsq\n dmu = (-1)*np.sum(dmu1+dmu2, axis=0)\n dx1 = dmu1 + dmu2\n dx2 = (1/N)*np.ones((N,D))*dmu\n dx = dx1+dx2\n\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def batch_norm(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def test_keras_unsafe_l2_norm():\n model, X, y, Xval, yval = make_small_model()\n\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model.compile(loss=loss, optimizer=None)\n\n isDP, msg = safekeras.check_optimizer_is_DP(model.optimizer)\n assert isDP, \"failed check that optimizer is dP\"\n\n model.l2_norm_clip = 0.9\n\n model.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n\n DPused, msg = safekeras.check_DP_used(model.optimizer)\n assert (\n DPused\n ), \"Failed check that DP version of optimiser was actually used in training\"\n\n loss, acc = model.evaluate(X, y)\n expected_accuracy = UNSAFE_ACC\n assert round(acc, 6) == round(\n expected_accuracy, 6\n ), \"failed check that accuracy is as expected\"\n\n msg, disclosive = model.preliminary_check()\n correct_msg = (\n \"WARNING: model parameters may present a disclosure risk:\"\n \"\\n- parameter l2_norm_clip = 0.9 identified as less than the recommended \"\n \"min value of 1.0.\"\n )\n assert msg == correct_msg, \"failed check correct warning message\"\n assert disclosive is True, \"failed check disclosive is True\"", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def darknet_CBL(*args, **kwargs):\n\n no_bias_kwargs = {'use_bias': False} # 没懂为啥用 no_bias\n no_bias_kwargs.update(kwargs)\n return compose(\n darknet_Conv2D(*args, **no_bias_kwargs),\n custom_batchnormalization(),\n LeakyReLU(alpha=0.1)\n )", "def reset_ref_batch(self, batch):\n with torch.no_grad():\n self.labels = batch[1]\n self.batch = batch[0]\n _, self.r_act_2, _ = self.inference_net(self.batch.cuda(self.gpu_id))\n\n self.mu2_c0, self.sigma2_c0 = calc_stats(self.r_act_2[self.labels.view(-1) == 0])\n self.mu2_c1, self.sigma2_c1 = calc_stats(self.r_act_2[self.labels.view(-1) == 1])", "def normF2(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return LA.norm(X, 'fro')**2", "def l2_norm(input_x, epsilon=1e-12):\n input_x_norm = input_x/(tf.reduce_sum(input_x**2)**0.5 + epsilon)\n return input_x_norm", "def batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n ###########################################################################\n x, mu, sigma, gamma, beta = cache\n N = dout.shape[0]\n X_mu = x - mu\n var_inv = 1./sigma\n \n dX_norm = dout * gamma\n dvar = np.sum(dX_norm * X_mu,axis=0) * -0.5 * sigma**(-3)\n dmu = np.sum(dX_norm * -var_inv ,axis=0) + dvar * 1/N * np.sum(-2.* X_mu, axis=0)\n\n dx = (dX_norm * var_inv) + (dmu / N) + (dvar * 2/N * X_mu)\n dbeta = np.sum(dout, axis=0)\n dgamma = np.sum(dout * X_mu/sigma, axis=0)\n \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.):\n sigma2 = sigma * sigma\n\n inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))\n\n smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)\n smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)\n smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)\n smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),\n tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))\n\n outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)\n\n return outside_mul", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def relaxed_ba_bias(Xinput, L, lamb, beta, max_iter=300):\n X = Xinput.T # X: n_samples x n_dim\n D, m = X.shape\n B = np.sign(np.random.rand(L, m))\n c1 = np.random.rand(L,1)\n c2 = np.random.rand(D,1)\n\n for i in range(max_iter):\n # given B, compute W1\n W1 = lamb*np.matmul(np.matmul((B - c1), X.T), \\\n np.linalg.inv(lamb*np.matmul(X,X.T) + beta*np.eye(D)))\n\n # given B, compute W2\n W2 = np.matmul( np.matmul((X-c2), B.T), \\\n np.linalg.inv(np.matmul(B,B.T) + beta*np.eye(L)))\n\n # compute c1\n c1 = (1.0/m)*np.matmul(B - np.matmul(W1, X), np.ones((m,1)))\n # compute c2\n c2 = (1.0/m)*np.matmul(X - np.matmul(W2, B), np.ones((m,1)))\n\n # given W1, W2, c1, c2, compute B\n Xtmp = X - c2\n H = np.matmul(W1, X) + c1\n B = learn_B_new(Xtmp.T, W2.T, B.T, H.T, lamb);\n\n B = B.T\n\n # X_reconst = np.matmul(W2, np.sign(np.matmul(W1, X) + c1)) + c2\n # mse = np.mean(np.square(X_reconst - X))\n # print('mse {}'.format(mse))\n return W2, W1, c2, c1, B", "def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n padding_type='reflect'):\n assert (n_blocks >= 0)\n super(DenseGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n dense_features = ngf * mult\n dense_features = dense_features + 6 * 32\n for i in range(n_blocks):\n model += [DenseBlock(num_layers=6, num_input_features=ngf * mult, bn_size=4, growth_rate=32, drop_rate=0,\n norm_layer=norm_layer)]\n model += [norm_layer(dense_features), nn.ReLU(inplace=True),\n nn.Conv2d(dense_features, ngf * mult, kernel_size=1, stride=1, bias=use_bias),\n ]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Sigmoid()]\n\n self.model = nn.Sequential(*model)", "def build_nn_experimental(dropout: float=0.3, verbosity: int=0):\n # Setting Up Input layer\n input_q1 = Input(shape=(512,))\n input_q2 = Input(shape=(512,))\n \n # Network for 1st input Dense 128 --> Relu --> Dense 264 --> Relu\n input1_layer = Dense(512, activation='relu')(input_q1)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Dense(512, activation='relu')(input1_layer)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Model(inputs=input_q1, outputs=input1_layer)\n \n # Network for 2st input Dense 128 --> Relu --> Dense 264 --> Relu\n input2_layer = Dense(512, activation='relu')(input_q2)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Dense(512, activation='relu')(input2_layer)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Model(inputs=input_q2, outputs=input2_layer)\n \n merged = concatenate([input1_layer.output, input2_layer.output])\n\n # Fully connected layer & final prediction layer\n pred_layer = Dense(4096, activation='relu')(merged)\n pred_layer = Dense(1024, activation='relu')(pred_layer)\n pred_layer = Dense(256, activation='relu')(pred_layer)\n pred_layer = Dense(64, activation='relu')(pred_layer)\n pred_layer = Dropout(dropout)(pred_layer)\n \n pred_layer = Dense(1, activation='sigmoid')(pred_layer)\n \n model = Model(inputs=[input1_layer.input, input2_layer.input], outputs=pred_layer)\n if verbosity > 0:\n model.summary()\n return model", "def batch_norm(x, training, name):\n with tf.variable_scope(name):\n x = tf.cond(training, lambda: tf.contrib.layers.batch_norm(x, is_training=True, scope=name+'_batch_norm'),\n lambda: tf.contrib.layers.batch_norm(x, is_training=False, scope=name+'_batch_norm', reuse=True))\n return x", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def derive_sample_params(self, global_state):\n return global_state.l2_norm_clip", "def __init__(self, rng, input, layer_sizes, dropout_rates,\r\n activations=None, use_bias=True, prob_constraint_on=True):\r\n # Set up all the hidden layers\r\n weight_matrix_sizes = list(zip(layer_sizes, layer_sizes[1:]))\r\n # we build two parallel layers\r\n # - training_layers for training with/without dropout\r\n # - testing_layers for testing the performance\r\n self.training_layers = []\r\n self.testing_layers = []\r\n \r\n # dropout the input\r\n next_training_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])\r\n next_testing_layer_input = input\r\n \r\n layer_counter = 0\r\n for n_in, n_out in weight_matrix_sizes[:-1]:\r\n \r\n # setup the training layer\r\n next_training_layer = DropoutHiddenLayer(rng=rng,\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n dropout_rate=dropout_rates[layer_counter])\r\n self.training_layers.append(next_training_layer)\r\n next_training_layer_input = next_training_layer.output\r\n\r\n # setup the testing layer\r\n # Reuse the paramters from the dropout layer here, in a different\r\n # path through the graph.\r\n next_testing_layer = HiddenLayer(rng=rng,\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n activation=activations[layer_counter],\r\n use_bias=use_bias,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=next_training_layer.W * (1 - dropout_rates[layer_counter]),\r\n b=next_training_layer.b)\r\n self.testing_layers.append(next_testing_layer)\r\n next_testing_layer_input = next_testing_layer.output\r\n \r\n layer_counter += 1\r\n \r\n # Set up the output layer for training layers\r\n n_in, n_out = weight_matrix_sizes[-1]\r\n training_output_layer = LogisticRegression(\r\n input=next_training_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n prob_constraint_on=prob_constraint_on)\r\n self.training_layers.append(training_output_layer)\r\n\r\n # Set up the output layer for testing layers\r\n # Again, reuse paramters in the dropout output.\r\n testing_output_layer = LogisticRegression(\r\n input=next_testing_layer_input,\r\n n_in=n_in, n_out=n_out,\r\n # for testing, we SHOULD scale the weight matrix W with (1-p)\r\n W=training_output_layer.W * (1 - dropout_rates[-1]),\r\n b=training_output_layer.b,\r\n prob_constraint_on=prob_constraint_on)\r\n self.testing_layers.append(testing_output_layer)\r\n\r\n # Use the MSE of the logistic regression layer as the objective\r\n # In training phase, we use the MSE of the logistic regression layer\r\n # which is on top of the dropout_layers\r\n self.training_MSE = self.training_layers[-1].MSE\r\n # In validation/testing phase, we use the MSE of the logistic regression layer\r\n # which is on top of the normal_layers\r\n self.testing_MSE = self.testing_layers[-1].MSE\r\n \r\n # NOTE: for prediction, we use all the weights, thus we should use\r\n # the normal layers instead of the dropout layers\r\n self.y_pred = self.testing_layers[-1].y_pred\r\n \r\n # Grab all the parameters together.\r\n self.params = [ param for layer in self.training_layers for param in layer.params ]\r\n # The above is Double Iteration in List Comprehension\r\n # See the discussion in\r\n # http://stackoverflow.com/questions/17657720/python-list-comprehension-double-for\r\n # In regular for-loop format, we have\r\n # for layer in self.dropout_layers:\r\n # for param in layer.params:\r\n # put param in the resulting list\r", "def train(args,train_loader, model, criterion, optimizer, epoch, pruner, writer):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n \n\n model.train()\n total =0 \n correct = 0\n reg_loss = 0.0\n train_loss = 0.0\n end = time.time()\n\n for i, (inputs, target) in enumerate(train_loader):\n\n target = target.cuda()\n inputs = inputs.cuda()\n \n inputs, targets_a, targets_b, lam = mixup_data(inputs, target, args.alpha, True)\n inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))\n\n ##input_var = torch.autograd.Variable(input)\n ##target_var = torch.autograd.Variable(target)\n\n\n outputs = model(inputs)\n ##outputs, Qs, Ys = model(inputs)\n ##loss = criterion(output, target_var)\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n## print(\"loss:\")\n## print(loss)\n## print(loss.item())\n## train_loss += loss.data[0]\n train_loss += loss.item()\n _, predicted = torch.max(outputs.data, 1)\n total += target.size(0)\n correct += (lam * predicted.eq(targets_a.data).cpu().sum().float()\n + (1 - lam) * predicted.eq(targets_b.data).cpu().sum().float())\n\n## prec1 = accuracy(output.data, target, topk=(1,))[0]\n## losses.update(loss.data.item(), input.size(0))\n## top1.update(prec1.item(), input.size(0))\n\n optimizer.zero_grad()\n\n\n\n## for y in Ys:\n## y.retain_grad()\n\n\n\n loss.backward()\n\n\n optimizer.step()\n\n\n\n\n if pruner is not None:\n pruner.prune(update_state=False)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n\n if 0:\n kwalt = epoch*len(train_loader)+i\n if writer is not None:\n for j,q in enumerate(Qs):\n writer.add_scalar(\"variances %d\" % j, q.cpu().numpy(), kwalt)\n\n for l,y in enumerate(Ys):\n if y.grad is not None:\n writer.add_scalar(\"grad %d\" % (l-j), getQ(y.grad).cpu().numpy(), kwalt)\n\n## writer.add_scalars(\"variancess\", { \"%d\"% j : q.cpu().numpy() for j,q in enumerate(Qs)}, i)\n\n\n\n if 0:\n if i % args.print_freq == 0:\n print(\n f\"Epoch: [{epoch}][{i}/{len(train_loader)}]\\t\"\n f\"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t\"\n f\"Loss {losses.val:.4f} ({losses.avg:.4f})\\t\"\n f\"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\"\n )\n niter = epoch*len(train_loader)+i\n\n batch_idx = i\n if writer is not None:\n writer.add_scalar('Train/Loss', train_loss/batch_idx, epoch)\n writer.add_scalar('Train/Prec@1', 100.*correct/total, epoch)", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def __init__(self, X_train, y_train, input_shape, filters, kernel_size,\n maxpool, loss_function='categorical_crossentropy', nb_classes= 2, droput_iteration=20, dropout = 0.05):\n\n # We normalize the training data to have zero mean and unit standard\n # deviation in the training set if necessary\n\n # if normalize:\n # self.std_X_train = np.std(X_train, 0)\n # self.std_X_train[ self.std_X_train == 0 ] = 1\n # self.mean_X_train = np.mean(X_train, 0)\n # else:\n # self.std_X_train = np.ones(X_train.shape[ 1 ])\n # self.mean_X_train = np.zeros(X_train.shape[ 1 ])\n\n self.droput_iteration = droput_iteration\n self.nb_classes = nb_classes\n self.mean_y_train = np.mean(y_train)\n self.std_y_train = np.std(y_train)\n\n\n\n # model = Sequential()\n # model.add(Conv2D(filters, (kernel_size, kernel_size), padding='same',\n # input_shape=input_shape))\n # model.add(Activation('relu'))\n # model.add(Conv2D(filters, (kernel_size, kernel_size)))\n # model.add(Activation('relu'))\n # model.add(MaxPooling2D(pool_size=(maxpool, maxpool)))\n # model.add(Dropout(dropout))\n # c = 3.5\n # Weight_Decay = c / float(X_train.shape[0])\n # model.add(Flatten())\n # model.add(Dense(128, W_regularizer=l2(Weight_Decay)))\n # model.add(Activation('relu'))\n # model.add(Dropout(dropout))\n # model.add(Dense(nb_classes))\n # model.add(Activation('softmax'))\n\n # model.compile(loss=loss_function, optimizer='adam')\n\n c = 3.5\n Weight_Decay = c / float(X_train.shape[0])\n\n model = Sequential()\n model.add(Dense(256, input_shape =input_shape))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Dense(256, W_regularizer=l2(Weight_Decay)))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Flatten())\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n\n model.compile(loss=loss_function, optimizer='adam')\n\n\n self.model = model\n # # We iterate the learning process\n # model.fit(X_train, y_train, batch_size=self.batch_size, nb_epoch=n_epochs, verbose=0)\n\n # #function for bayesian inference using dropouts\n # self.f = K.function([model.layers[0].input, K.learning_phase()],\n # [model.layers[-1].output])", "def xlarge_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n #model.add(Dense(18, input_dim=12, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(16, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def l2_normalize(data, eps, axis=None):\n return cpp.nn.l2_normalize(data, eps, axis)", "def build_nn(dropout: float=0.3,verbosity: int=0):\n model = Sequential()\n model.add(Dense(1024, input_shape=(1024,), activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(dropout))\n\n model.add(Dense(1, activation='sigmoid'))\n \n if verbosity > 0:\n model.summary()\n return model", "def remove_tracking(model, norm_type, norm_power=0.2):\n normlayer = select_norm(norm_type, norm_power=norm_power)\n # find total number of childern\n model_len = 0\n for n, child in enumerate(model.children()):\n model_len = n\n\n # for layer 0 which is outside\n conv_shape = model.conv1.out_channels\n w = model.bn1.weight\n b = model.bn1.bias\n model.bn1 = normlayer(conv_shape)\n model.bn1.weight = w\n model.bn1.bias = b\n\n # replace in all other layers\n for n, child in enumerate(model.children()):\n if 4 <= n <= model_len - 2:\n for i in range(len(child)):\n conv_shape = child[i].conv1.out_channels\n w = child[i].bn1.weight\n b = child[i].bn1.bias\n child[i].bn1 = normlayer(conv_shape)\n child[i].bn1.weight = w\n child[i].bn1.bias = b\n\n conv_shape = child[i].conv2.out_channels\n w = child[i].bn2.weight\n b = child[i].bn2.bias\n child[i].bn2 = normlayer(conv_shape)\n child[i].bn2.weight = w\n child[i].bn2.bias = b\n # if model have bn3 as well\n try:\n conv_shape = child[i].conv3.out_channels\n w = child[i].bn3.weight\n b = child[i].bn3.bias\n child[i].bn3 = normlayer(conv_shape)\n child[i].bn3.weight = w\n child[i].bn3.bias = b\n except:\n pass\n try:\n conv_shape = child[i].downsample[0].out_channels\n w = child[i].downsample[1].weight\n b = child[i].downsample[1].bias\n child[i].downsample[1] = normlayer(conv_shape)\n child[i].downsample[1].weight = w\n child[i].downsample[1].bias = b\n print(\"downsample\")\n except:\n print(\"no downsample\")\n\n return model", "def convert_norm(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n axis_l = [axis]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.l2_normalize(x, epsilon, axis_l)\n g.add_node(op.output(\"Out\")[0], out)", "def spatial_batchnorm_backward(dout, cache):\n dx, dgamma, dbeta = None, None, None\n\n ###########################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should#\n # be very short; ours is less than five lines. #\n ###########################################################################\n s_cache,shape_x = cache\n reshaped_dout = np.reshape(dout,(-1,dout.shape[1]))\n dx_reshaped,dgamma,dbeta = batchnorm_backward_alt(reshaped_dout,s_cache)\n dx = np.reshape(dx_reshaped,shape_x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=True):\n super(NLayerTFDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if(no_antialias):\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if(no_antialias):\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)]\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n self.encoder = nn.Sequential(*sequence)\n dim = ndf * nf_mult\n self.transformer_enc = transformer.TransformerDecoders(dim, nhead=4, num_encoder_layers=4, dim_feedforward=dim*2, dropout=0.0)\n\n self.query_embed = nn.Embedding(1, dim)\n self.classifier = nn.Sequential(\n nn.Linear(dim, dim//2),\n nn.LayerNorm(dim//2),\n nn.ReLU(),\n nn.Linear(dim//2, dim//4),\n nn.LayerNorm(dim//4),\n nn.ReLU(),\n nn.Linear(dim//4, 1),\n nn.Sigmoid()\n )", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def linf1(parameter, bias=None, reg=0.01, lr=0.1):\n\n Norm = reg*lr\n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n sorted_w_and_b, indices = torch.sort(torch.abs(w_and_b), descending=True)\n\n # CUDA or CPU\n devicetype=\"cuda\"\n if w_and_b.is_cuda:\n devicetype=\"cuda\"\n else:\n devicetype=\"cpu\"\n\n\n #SLOW\n rows, cols = sorted_w_and_b.size()\n\n sorted_z = torch.cat((sorted_w_and_b, torch.zeros(rows,1, device=torch.device(devicetype))),1)\n subtracted = torch.clamp(sorted_w_and_b - sorted_z[:,1:],max=Norm) #Max=Norm important\n\n scale_indices = torch.cumsum(torch.ones(rows,cols, device=torch.device(devicetype)),1)\n scaled_subtracted = subtracted * scale_indices\n max_mass = torch.cumsum(scaled_subtracted,1)\n nonzero = torch.clamp(-1*(max_mass - Norm),0)\n\n oneN = 1.0/scale_indices\n\n # Algorithm described in paper, but these are all efficient GPU operation steps)\n # First we subtract every value from the cell next to it\n nonzero_ones = torch.clamp(nonzero * 1000000, max=1) #Hacky, but efficient\n shifted_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,:(cols-1)]),1)\n over_one = -1*(nonzero_ones - shifted_ones)\n last_one = torch.cat((over_one,torch.zeros(rows,1, device=torch.device(devicetype))),1)[:,1:]\n max_remain = last_one * nonzero\n shift_max = torch.cat((torch.zeros(rows,1, device=torch.device(devicetype)),max_remain[:,:(cols-1)]),1)\n first_col_nonzero_ones = torch.cat((torch.ones(rows,1, device=torch.device(devicetype)),nonzero_ones[:,1:]),1) #Edge case for only first column\n tosub = first_col_nonzero_ones * subtracted + shift_max * oneN\n\n # We flip the tensor so that we can get a cumulative sum for the value to subtract, then flip back\n nastyflipS = torch.flip(torch.flip(tosub,[0,1]),[0])\n aggsubS = torch.cumsum(nastyflipS,1)\n nastyflipagainS = torch.flip(torch.flip(aggsubS,[0,1]),[0])\n\n # The proximal gradient step is equal to subtracting the sorted cumulative sum\n updated_weights = sorted_w_and_b - nastyflipagainS\n unsorted = torch.zeros(rows,cols, device=torch.device(devicetype)).scatter_(1,indices,updated_weights)\n final_w_and_b = torch.sign(w_and_b) * unsorted\n\n # Actually update parameters and bias\n if bias is not None:\n update = final_w_and_b[:,:cols-1]\n parameter.data = update\n update_b = final_w_and_b[:,-1]\n bias.data = update_b\n else:\n parameter.data = final_w_and_b", "def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n super(LightweightConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)", "def batch_norm_conv(x, n_out, phase_train):\n with tf.variable_scope('bn'):\n beta = tf.Variable(tf.constant(0.0, shape=[n_out]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will translate\r\n # into a HiddenLayer with a tanh activation function connected to the\r\n # LogisticRegression layer; the activation function can be replaced by\r\n # sigmoid or any other nonlinear function\r\n self.hiddenLayer = HiddenLayer(rng=rng, input=input,\r\n n_in=n_in, n_out=n_hidden,\r\n activation=T.tanh)\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input=self.hiddenLayer.output,\r\n n_in=n_hidden,\r\n n_out=n_out)\r\n\r\n # L1 norm ; one regularization option is to enforce L1 norm to\r\n # be small\r\n self.L1 = abs(self.hiddenLayer.W).sum() \\\r\n + abs(self.logRegressionLayer.W).sum()\r\n\r\n # square of L2 norm ; one regularization option is to enforce\r\n # square of L2 norm to be small\r\n self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \\\r\n + (self.logRegressionLayer.W ** 2).sum()\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n # same holds for the function computing the number of errors\r\n self.errors = self.logRegressionLayer.errors\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def __init__(self, mode, dim, epsilon=1e-12, name='l2Normalize'):\n super(L2Normalization, self).__init__(mode, name)\n self.dim = dim\n self.epsilon = epsilon", "def TCN_V1(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))", "def l2_normalization(\n inputs,\n name,\n scaling=False,\n scale_initializer=init_ops.ones_initializer(),\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n data_format='NHWC',\n trainable=True,\n scope=None):\n\n with variable_scope.variable_scope(\n scope, 'L2Normalization_'+name, [inputs], reuse=reuse) as sc:\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n dtype = inputs.dtype.base_dtype\n if data_format == 'NHWC':\n # norm_dim = tf.range(1, inputs_rank-1)\n norm_dim = tf.range(inputs_rank-1, inputs_rank)\n params_shape = inputs_shape[-1:]\n elif data_format == 'NCHW':\n # norm_dim = tf.range(2, inputs_rank)\n norm_dim = tf.range(1, 2)\n params_shape = (inputs_shape[1])\n\n # Normalize along spatial dimensions.\n outputs = nn.l2_normalize(inputs, norm_dim, epsilon=1e-12)\n # Additional scaling.\n if scaling:\n scale_collections = utils.get_variable_collections(\n variables_collections, 'scale')\n scale = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=scale_initializer,\n collections=scale_collections,\n trainable=trainable)\n if data_format == 'NHWC':\n outputs = tf.multiply(outputs, scale)\n elif data_format == 'NCHW':\n scale = tf.expand_dims(scale, axis=-1)\n scale = tf.expand_dims(scale, axis=-1)\n outputs = tf.multiply(outputs, scale)\n # outputs = tf.transpose(outputs, perm=(0, 2, 3, 1))\n\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n Xshape = x.shape\n\n if len(Xshape) > 2: #deal with 2d inputs\n N,C,H,W = x.shape\n x = np.swapaxes(x,1,3)\n D = C\n x = np.reshape(x,[N*H*W,C])\n else:\n N = x.shape[0]\n x = np.reshape(x,[N,-1])\n _, D = x.shape\n\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n mu = np.mean(x,axis=0)\n var = np.var(x, axis=0)\n x_norm = (x - mu)/np.sqrt(var + eps)\n out = gamma * x_norm + beta\n running_mean = momentum*running_mean + (1-momentum)*mu\n running_var = momentum*running_var + (1-momentum)*var\n cache = (x_norm, gamma, np.sqrt(var + eps))\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n x_norm = (x - running_mean)/np.sqrt(running_var + eps)\n out = gamma * x_norm + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n if len(Xshape) > 2:\n out = np.reshape(out,[N,W,H,C])\n out = np.swapaxes(out,1,3)\n else:\n out = np.reshape(out,Xshape)\n return out, cache", "def BatchNorm(X): # (X - mu) / sigma -> Have to implement trainable parameters gamma and beta on this\n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n bn = (X - torch.mean(X)) / (torch.std(X)+epsilon)\n sigma.append(torch.std(X)+epsilon)\n return bn", "def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n ###########################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line.#\n ###########################################################################\n N = dout.shape[0]\n x_norm,inv_var,gamma = cache\n dgamma = np.sum(dout * x_norm,axis = 0)\n dbeta = np.sum(dout,axis = 0)\n #Simplified calculation of dx.\n dx_normalized = dout * gamma\n dx = (1 / N) * inv_var * (N * dx_normalized - np.sum(dx_normalized,axis = 0) \\\n - x_norm * np.sum(dx_normalized * x_norm,axis = 0)) \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return dx, dgamma, dbeta", "def train_net(epoch, data, net, opti, batch_graph):\n global num_batches, batch_size\n # train the network\n for num in range(epoch):\n # run each batch through each round\n for batch_id in range(num_batches):\n # calculate the neighborhood for the graph\n batch = torch.from_numpy(data[batch_id]).float()\n batch = batch.view(batch_size, -1)\n batch_distances = pairwise_distances(batch)\n nbr_graph_tensor = torch.from_numpy(batch_graph[batch_id]).float()\n batch_distances_masked = batch_distances * nbr_graph_tensor.float()\n global lbda\n out = net(batch, False) # run the batch through the network\n svd_loss, out = implement_svd(out) # calculate the SVD L2,1 loss and SVD representation\n output_distances = pairwise_distances(out)\n # Multiply the distances between each pair of points with the neighbor mask\n output_distances_masked = output_distances * nbr_graph_tensor.float()\n # Find the difference between |img_i - img_j|^2 and |output_i - output_j|^2\n nbr_diff = torch.abs((output_distances_masked - batch_distances_masked))\n nbr_distance = nbr_diff.norm()\n svd_loss *= lbda_svd # multiply SVD loss by its scaling factor\n # find variance in all directions\n var = 0\n for i in range(out.size()[0]):\n var += lbda_var / out[i].var()\n loss = nbr_distance + svd_loss + var # loss contains all three terms\n opti.zero_grad()\n loss.backward()\n opti.step()\n print('Epoch: %f, Step: %f, Loss: %.2f' % (num, batch_id + 1, loss.data.cpu().numpy()))\n\n # find the ideal number of dimensions\n global final_dim\n batch = torch.from_numpy(data[0]).float()\n batch = batch.view(batch_size, -1)\n out = net(batch, False)\n u, s, v = torch.svd(out)\n final_dim = calc_dim(s)", "def compute_loss(model, loader, loss_fn, optimizer=None):\n\n total_loss = 0.0\n count_batches = 0\n for x_, y_, qm_, db_mask, blink_mask in loader:\n batch_loss_list = []\n xbatch_list = []\n for mask in [db_mask, blink_mask]:\n idxes = get_idxes_from_mask(mask)\n x_pick, y_pick, qm_pick = x_[idxes], y_[idxes], qm_[idxes]\n y_pos_idxes = torch.nonzero(y_pick.squeeze(), as_tuple=False).reshape(1, -1)[0]\n y_neg_idxes = torch.nonzero(~y_pick.squeeze().bool(), as_tuple=False).reshape(1, -1)[0]\n\n if (len(y_pos_idxes) == 0) or (len(y_neg_idxes) == 0):\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(x_pick) <= 1:\n xbatch_list.append(torch.tensor([]))\n continue\n elif len(y_pos_idxes) == 1:\n y_pos_idx = y_pos_idxes[0]\n else: # len(y_pos_idxes) > 1:\n # TODO: I am just always using the first positive example for now\n # rand_idx = random.choice(list(range(len(y_pos_idxes))))\n # print(y_pos_idxes)\n rand_idx = 0\n y_pos_idx = y_pos_idxes[rand_idx]\n\n batch_length = 1 + len(y_neg_idxes)\n batch_feature_len = x_.shape[1]\n x_batch = torch.zeros(batch_length, batch_feature_len)\n x_batch[:-1:, :] = x_pick[y_neg_idxes]\n x_batch[-1, :] = x_pick[y_pos_idx] # put positive to the end\n xbatch_list.append(x_batch)\n # print(y_pos_idx, len(y_neg_idxes))\n # print(\"batch\", x_batch.shape)\n\n if (len(xbatch_list[0]) == 0) and (len(xbatch_list[1]) == 0):\n # skip if both batches are []\n # print(\"hitting cases without any examples [SHOULD BE WRONG]\")\n continue\n elif (len(xbatch_list[0]) == 0) or (len(xbatch_list[1]) == 0):\n # continue # TODO: testing whether improvements made if we only use cases where there are sources from both\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 1\n yhat_neg = yhat[:-1]\n yhat_pos = yhat[-1].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones((len(yhat) - 1), 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n else:\n # get yhats for both BLINK and DB batches\n # print(len(xbatch_list[0]), len(xbatch_list[1]))\n # print((xbatch_list[0], xbatch_list[1]))\n yhat = model(xbatch_list[0], xbatch_list[1])\n extended_batch_length = len(yhat) - 2\n yhat_neg = torch.zeros(extended_batch_length, 1)\n yhat_neg[:len(xbatch_list[0])-1] = yhat[:len(xbatch_list[0])-1]\n yhat_neg[len(xbatch_list[0])-1:] = yhat[len(xbatch_list[0]):-1]\n for idx in [len(xbatch_list[0]), -1]:\n yhat_pos = yhat[idx].repeat(extended_batch_length, 1)\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones(extended_batch_length, 1).to(device))\n batch_loss_list.append(loss)\n total_loss += loss.item() * extended_batch_length\n count_batches += 1\n\n # update every question-mention\n if batch_loss_list and optimizer:\n (sum(batch_loss_list)/len(batch_loss_list)).backward()\n optimizer.step()\n\n avg_loss = total_loss / count_batches\n\n return avg_loss, batch_length", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic):\n self.binary=binary\n self.stochastic=stochastic\n \n # Since we are dealing with a one hidden layer MLP, this will translate\n # into a HiddenLayer with a tanh activation function connected to the\n # LogisticRegression layer; the activation function can be replaced by\n # sigmoid or any other nonlinear function.\n self.hiddenLayers = []\n self.normLayers=[]\n for i in xrange(n_hiddenLayers):\n h_input = input if i == 0 else self.hiddenLayers[i-1].output\n h_in = n_in if i == 0 else n_hidden\n\n # if binary==True, we append a binary hiddenlayer\n if binary==True:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=True,\n stochastic=stochastic\n ))\n self.normLayers.append(\n BatchNormLayer(\n input=self.hiddenLayers[i].output,\n n_in=n_hidden,\n n_out=n_hidden\n ))\n else:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=False,\n stochastic=False\n ))\n\n # The logistic regression layer gets as input the hidden units\n # of the hidden layer\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayers[-1].output,\n n_in=n_hidden,\n n_out=n_out,\n binary=binary,\n stochastic=stochastic\n )\n \n # same holds for the function computing the number of errors\n self.errors = self.logRegressionLayer.errors\n\n # the parameters of the model are the parameters of the two layer it is\n # made out of\n self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params\n self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt\n self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws\n # keep track of model input\n self.input = input", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)" ]
[ "0.6198059", "0.6015281", "0.596931", "0.5949474", "0.58950055", "0.58950055", "0.5811575", "0.5737953", "0.56548023", "0.5646338", "0.563125", "0.5623065", "0.56162375", "0.5609015", "0.5605381", "0.5597299", "0.5579035", "0.55545306", "0.5553643", "0.5545299", "0.5540341", "0.5535407", "0.5515462", "0.54829633", "0.54822296", "0.54721457", "0.54714465", "0.54513067", "0.5445833", "0.54440796", "0.54438937", "0.54410386", "0.54368514", "0.5435608", "0.54316634", "0.5427213", "0.54140234", "0.5408871", "0.5403867", "0.54000425", "0.53938586", "0.53867435", "0.5382787", "0.5382499", "0.5379223", "0.5378991", "0.5374265", "0.53716004", "0.5370554", "0.53692496", "0.5366847", "0.5364064", "0.5363727", "0.5360618", "0.53578925", "0.5355598", "0.5349586", "0.5346617", "0.5343644", "0.5342756", "0.53417397", "0.5340065", "0.5329678", "0.5324891", "0.532156", "0.5312524", "0.5310327", "0.5309088", "0.530873", "0.53077424", "0.5306497", "0.53038675", "0.5296198", "0.5291257", "0.5288853", "0.5284629", "0.5281967", "0.52765024", "0.527627", "0.52717", "0.5270032", "0.5266901", "0.5266495", "0.5263451", "0.52611005", "0.5259601", "0.5258436", "0.52583617", "0.52553266", "0.5255199", "0.52551407", "0.5251535", "0.5251468", "0.52457523", "0.5245629", "0.5242619", "0.52421576", "0.52384233", "0.52382934", "0.523669" ]
0.60564125
1
This first section contains the functions that make the program convert the different number systems
def calculator (menuWindow): #This procedure accepts the parameter subProgram which will tell it which conversion function #to call. These functions will then return a value to outputUpdate and #set resultText to the appropriate message""" def popUP(message): pop = tk.Tk() pop.title("Error") #resultText.set(message) tk.Label(pop, text=message).pack() pop.mainloop() def outputUpdate(subProgram): #Selection block that will run the appropriate function based upon #the button the user pushes #It first obtains the entered value in the input box number = baseNumber.get() if subProgram == 1: value = hex_bin() if value != "Must only contain numbers and letters in the Hex set\n" \ "0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f": resultText.set("The binary for this number is: " + str(value)[2:].upper()) else: popUP(value) elif subProgram == 2: #The function is run within a variable to that the returned #value is stored and usable value = hex_dec() if value != "Must only contain numbers and letters in the Hex set\n" \ "0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f": resultText.set("The decimal for this number is: " + str(value).upper()) else: popUP(value) elif subProgram == 3: value = dec_hex() #using the is digit method to see if the returned value is a number. #If the value is a number the user has entered a valid decimal value if value != "Must only enter whole numbers e.g. 1, 10, 14": resultText.set("The decimal for this number is: " + str(value).upper()) else: #If the user did not enter a valid decimal value #The function will have returned an appropriate error message popUP(value) elif subProgram == 4: value = dec_bin() test = value.replace(" ","") if test.isalpha(): popUP(value) else: #string slicing used to remove the leading 0b from the binary value resultText.set("The binary value of " + str(number) + " is " + str(value)[2:]) elif subProgram == 5: value = bin_dec() if value != "Must enter a valid binary number i.e. only containint 1 or 0": resultText.set("The decimal value of " + str(number) + " is " + str(value)) else: popUP(value) else: value = bin_hex() if value != "Must enter a valid binary number i.e. only containint 1 or 0": resultText.set("The hexadecimal value of " + str(number) + " is " + str(value)[2:].upper()) else: popUP(value) def hex_bin(): #This makes use of the hex_dec function to get the decimal value of the hex number #This means I don't have to re-write code number = hex_dec() try: binValue = bin(number) #Returning the value to the output function return binValue except: return "Must only contain numbers and letters in the Hex set\n" \ "0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f" def hex_dec(): #Establish a dictionary to store the hex value of each position number = baseNumber.get() try: value = int(number,16) return value except: value = "Must only contain numbers and letters in the Hex set\n" \ "0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f" return value def dec_hex(): #As before this is getting the entered value number = baseNumber.get() if number.isdigit(): #Converting the input to an integer so that we can use it in calculations number = int(number) #Making use of the inbuilt hex function that returns the hex value of a decimal hexConvert = hex(number) #hex() returns this with a leading 0x #I have used string slicing to remove the elements I do not want hexConvert = hexConvert[2:] #As with the other functions this returns the numerical value else: hexConvert = "Must only enter whole numbers e.g. 1, 10, 14" return hexConvert '''Completed Not Commented''' def dec_bin(): #Retrieving the value entered by the user to the GUI number = baseNumber.get() #Selection statement testing if the value etered was a digit if number.isdigit(): #If a digit is entered the conversion is carried out number = bin(int(number)) else: #If the user enters a non-digit, the error message is returned number = "Must enter a valid digit" return number def bin_hex(): #the bin_dec() function is called to obtain a decimal value for conversion decValue = bin_dec() #Error checking takes place in an attempt to carry out the conversion try: #the hex and int functions are used to convert the returned decValue #If no error is caused the conversion is carried out and returned hexVal = hex(int(decValue)) return hexVal except: #Any errors are caught and returned to the output procedure return "Must enter a valid binary number i.e. only containint 1 or 0" def bin_dec(): #The entered number is retrieved and stored in a variable for use number = baseNumber.get() #Error checking to stop the program crashing try: #Attempt to convert the entered value into an int with base 2 #If no error is caused the value is returned value = int(number , 2) return value except: #If an error occurs the error is caught and the appropriate message #returned to the output function return "Must enter a valid binary number i.e. only containint 1 or 0" #Procedure to convert the text the user enters in the entry box to upper case def caps(event): entryText.set(entryText.get().upper()) def close(): root.destroy() menu() #Setting the tk environment to start the GUI menuWindow.destroy() root = tk.Tk() '''I have set up different frames to allow for different grid layouts''' #Setting the title that will appear at the top of the window root.title("BinHexDec Calculator") #Creating a frame that will hold the top text of the window titleFrame = tk.Frame(root, width=400, height=50) titleFrame.pack() #Creating a frame that will hold the entry widget entryFrame = tk.Frame(root, width=400, height=200) entryFrame.pack() resultFrame = tk.Frame(root, width=400, height=200) resultFrame.pack() buttonFrame = tk.Frame(root, width=400, height=200) buttonFrame.pack() menuFrame = tk.Frame(root, width=400, height=200) menuFrame.pack() #Creating a label to display text on the screen title = tk.Label(titleFrame, text="BinHexDec Converter").pack() entryText = tk.Label(entryFrame, text="Enter the number to convert and select the conversion below").grid(row=0, columnspan=3) #Creatingan entry widget that will allow the user to enter a value entryText = tk.StringVar() baseNumber = tk.Entry(entryFrame, textvariable=entryText) baseNumber.grid(row=1, column=1) baseNumber.bind("<KeyRelease>",caps) #Initialising a variable as a "string variable" this allows me #to change this value dynamically within the program resultText = tk.StringVar() #This creates a label that will display whatever is in resultText #To create this dynamic label I don't set it with a text, it has a textVariable displayResult = tk.Label(resultFrame, textvariable=resultText).grid(row=0, column=1) resultText.set("The result of the calculation will appear here") #Here I am creating a series of buttons. #These will all run the outputUpdate procedure #So that the correct function is run a value is passed into outputUpdate hexBinBtn = tk.Button(buttonFrame, text="Hex to Bin", command= lambda: outputUpdate(1)).grid(row=0,column=0) hexDecBtn = tk.Button(buttonFrame, text="Hex to Dec", command= lambda: outputUpdate(2)).grid(row=0,column=1) decHexBtn = tk.Button(buttonFrame, text="Dec to Hex", command= lambda: outputUpdate(3)).grid(row=0,column=2) decBinBtn = tk.Button(buttonFrame, text="Dec to Bin", command= lambda: outputUpdate(4)).grid(row=0,column=3) binDecBtn = tk.Button(buttonFrame, text="Bin to Dec", command= lambda: outputUpdate(5)).grid(row=1,column=1) binHexBtn = tk.Button(buttonFrame, text="Bin to Hex", command = lambda: outputUpdate(6)).grid(row=1,column=2) closeBtn = tk.Button(menuFrame, text = "Return to Menu", command = close).grid(row=2,column=2) #This initialises the window and keeps it running constantly root.mainloop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pettifor_numbers():\n return { \"Li\": 0.45,\n \"Be\": 1.5,\n \"B\": 2.0,\n \"C\": 2.5,\n \"N\": 3.0, \n \"O\": 3.5,\n \"F\": 4.0,\n \n \"Na\": 0.4,\n \"Mg\": 1.28,\n \"Al\": 1.66,\n \"Si\": 1.92,\n \"P\": 2.18,\n \"S\": 2.44,\n \"Cl\": 2.70,\n \n \"K\": 0.35,\n \"Ca\": 0.60,\n \"Sc\": 0.74,\n \"Ti\": 0.79,\n \"V\": 0.84,\n \"Cr\": 0.89,\n \"Mn\": 0.94,\n \"Fe\": 0.99,\n \"Co\": 1.04,\n \"Ni\": 1.09,\n \"Cu\": 1.20,\n \"Zn\": 1.44,\n \"Ga\": 1.68,\n \"Ge\": 1.92,\n \"As\": 2.16,\n \"Se\": 2.40,\n \"Br\": 2.64,\n\n \"Rb\": 0.30,\n \"Sr\": 0.55,\n \"Y\": 0.70,\n \"Zr\": 0.76,\n \"Nb\": 0.82,\n \"Mo\": 0.88,\n \"Tc\": 0.94,\n \"Ru\": 1.00,\n \"Rh\": 1.06,\n \"Pd\": 1.12,\n \"Ag\": 1.18,\n \"Cd\": 1.36,\n \"In\": 1.60,\n \"Sn\": 1.84,\n \"Sb\": 2.08,\n \"Te\": 2.32,\n \"I\": 2.56,\n \n \"Cs\": 0.25,\n \"Ba\": 0.50,\n \"La\": 0.748,\n \"Hf\": 0.775,\n \"Ta\": 0.83,\n \"W\": 0.885,\n \"Re\": 0.94,\n \"Os\": 0.995,\n \"Ir\": 1.05,\n \"Pt\": 1.105,\n \"Au\": 1.16,\n \"Hg\": 1.32,\n \"Tl\": 1.56,\n \"Pb\": 1.80,\n \"Bi\": 2.04,\n \"Po\": 2.28, \n \"At\": 2.52 }", "def nintl(self):", "def ftb_impl(numstr, from_base='10', to_base='16'):\n ENONALNUM = list(numstr + ' has a non alpha-numeric character')\n EFBDEC = list(from_base + ' is not decimal')\n ETBDEC = list(to_base + ' is not decimal')\n ENOTINFB = list(numstr + ' is not in base ' + from_base)\n E2TO36 = list('supported bases are >= 2 and <= 36')\n MAXBASE = 36\n MINBASE = 2\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G',\n 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n try:\n # handle numstr sign\n numstrsign = 0\n if numstr[0] == '+':\n numstrsign = 1\n elif numstr[0] == '-':\n numstrsign = -1\n\n if numstrsign in (1, -1):\n numstr = numstr[1:]\n # end of handle numstr sign\n\n if from_base[0] == '+':\n from_base = from_base[1:]\n elif from_base[0] == '-':\n return E2TO36\n for char in from_base:\n if not str.isdigit(char):\n return EFBDEC\n from_base = int(from_base)\n\n for char in numstr:\n if not (str.isalnum(char) and char != '.'):\n return ENONALNUM\n if int(char, MAXBASE) >= from_base:\n return ENOTINFB\n\n if to_base[0] == '+':\n to_base = to_base[1:]\n elif to_base[0] == '-':\n return E2TO36\n for char in to_base:\n if not str.isdigit(char):\n return ETBDEC\n to_base = int(to_base)\n\n if from_base < MINBASE or from_base > MAXBASE \\\n or to_base < MINBASE or to_base > MAXBASE:\n return E2TO36\n\n numdec = int(numstr, from_base)\n\n result = []\n while numdec:\n result = [numdec % to_base] + result\n numdec = numdec // to_base\n\n for i in range(len(result)):\n char_idx = result[i]\n result[i] = numbers[result[i]]\n\n if numstrsign != 0:\n result = [str(numstrsign)] + result\n return result\n except UnicodeEncodeError as err:\n return list(str(err))", "def numerize():\n pass", "def _num(s, unit=1):\r\n if s[-2:]=='cm':\r\n unit=cm\r\n s = s[:-2]\r\n if s[-2:]=='in':\r\n unit=inch\r\n s = s[:-2]\r\n if s[-2:]=='pt':\r\n unit=1\r\n s = s[:-2]\r\n if s[-1:]=='i':\r\n unit=inch\r\n s = s[:-1]\r\n if s[-2:]=='mm':\r\n unit=mm\r\n s = s[:-2]\r\n if s[-4:]=='pica':\r\n unit=pica\r\n s = s[:-4]\r\n return _convnum(s,unit)", "def test_convertsent(self):\n convert6 = cnv()\n\n convert6.setnum(\"also haben wir hundertunddrei nein hundert 4 tausend\")\n self.assertEqual(convert6.getnum(), 104000)\n\n convert6.setnum(\"also ein haben wir hundertunddrei nein tausend\")\n self.assertEqual(convert6.getnum(), 1000)\n\n convert6.setnum(\" \")\n self.assertEqual(convert6.getnum(), 0)\n\n convert6.setnum(\"fünfundzwanzig\")\n self.assertEqual(convert6.getnum(), 25)\n\n convert6.setnum(\"albert ein\")\n self.assertEqual(convert6.getnum(), 1)", "def main():\r\n num = enter_num()\r\n if num is not None:\r\n num_lst = mk_num_lst(num)\r\n dec = convert(num_lst)\r\n print(\"decimal value of BASE 36 number\", num, \"is\", dec)\r\n else:\r\n print(\"user terminated program\")", "def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return", "def num2str(numval,formstr=None,decrep='p',negrep='n',nsig=3,trimzero=True):\n import copy\n from string import maketrans\n # Set default\n if formstr is None: formstr='.{}g'.format(nsig)\n # Round numval if within precision\n if trimzero and not isinstance(numval,(int,long)):\n precval=10.**(-(nsig-1))\n if numval == 0:\n numval=int(numval)\n else:\n if int(numval) == 0:\n if abs(numval) < precval:\n numval=int(numval)\n else:\n if abs(numval % int(numval)) < precval: \n numval=int(numval)\n # Set default format for integers/longs\n if isinstance(numval,(int,long)): formstr='d'\n # Create string\n decstr=(r'{:'+formstr+'}').format(numval)\n # Trim zeros\n if trimzero and '.' in decstr:\n if 'e' in decstr or 'E' in decstr:\n if 'e' in decstr: decbas,decexp=decstr.split('e')\n if 'E' in decstr: decbas,decexp=decstr.split('E')\n if 'e' and 'E' in decstr:\n raise Exception('Both e and E are in the string: {}'.format(decstr))\n else:\n decbas=copy.deepcopy(decstr)\n decexp=''\n decbas.rstrip('0')\n decbas.rstrip('.')\n decstr=decbas+decexp\n # Create tables to replace .- characters and remove + .\n rtab='+ '\n itab='.-'\n ftab=decrep+negrep\n trantab=maketrans(itab,ftab)\n # Return translated string\n return decstr.translate(trantab,rtab)", "def convert(num_lst):\r\n dec = 0\r\n for i in range(0, len(num_lst)):\r\n print(\"position right to left is >\", i+1,\r\n \"value is \", BASE[(num_lst[i])],\r\n \"decimal value is\",\r\n (36**i) * BASE[(num_lst[i])])\r\n dec += (36**i) * BASE[(num_lst[i])]\r\n return dec", "def TransformBase(base:int, number:list, digts:int) -> int :\n i = 0\n res = 0\n while ( i < digts):\n index = digts - i - 1\n number[index] = int(number[index]) * (base ** i) \n res += number[index]\n i += 1\n return res", "def convert(temp_in_c):\n \n return temp_in_c * (9/5) + 32", "def numeric(*args):", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def number_conversion(number):\n units = ['', 'K', 'M', 'G', 'T', 'P']\n k = 1000.0\n magnitude = int(floor(log(number, k)))\n return '%.2f%s' % (number / k**magnitude, units[magnitude])", "def convert(number):\n out = \"\"\n if number % 3 == 0:\n out = \"Pling\"\n if number % 5 == 0:\n out = out + \"Plang\"\n if number % 7 == 0:\n out = out + \"Plong\"\n if out == \"\":\n out = str(number)\n return out", "def multiply_str(num1,num2):\r\n \r\n max_num_decs = max_num_decims(num1, num2)\r\n \r\n #total_num_decs = num_decims(num1) + num_decims(num2)\r\n \r\n num1, num2 = decim_modif_str(num1, num2)\r\n \r\n # Calculate the steps.\r\n \r\n steps = []\r\n for i in range(0,len(num2)): \r\n \r\n if (int(num1) * int(num2[::-1][i]) != 0):\r\n steps.append( str(int(num1)*int(num2[::-1][i])) + \"0\"*i )\r\n \r\n else:\r\n steps.append( \"0\"*len(num1) + \"0\"*i )\r\n \r\n # Add the steps.\r\n \r\n sum_steps = \"0\"\r\n for i in range(0,len(num2)):\r\n sum_steps = sum_str(sum_steps,steps[i])\r\n \r\n if len(sum_steps) != len(steps[-1]):\r\n \r\n n_zeros = abs(len(sum_steps)-len(steps[-1]))\r\n sum_steps = \"0\"*n_zeros + sum_steps\r\n \r\n # Put the decimal point.\r\n \r\n if max_num_decs != 0:\r\n \r\n result = sum_steps[:-2*max_num_decs] + \".\" + sum_steps[-2*max_num_decs:]\r\n \r\n elif max_num_decs == 0:\r\n \r\n result = sum_steps \r\n \r\n result = del_decim_zeros(result)\r\n \r\n return result", "def standard_form(num: Decimal):\n num = Decimal(num)\n\n if num >= 1:\n power = 0\n while num > 1:\n power += 1\n num /= 10\n\n else:\n power = 0\n while num < 1:\n power -= 1\n num *= 10\n if num > 1:\n break\n power += 1\n\n return num, power", "def getNumber():", "def _numeric_system(self):\n if self.__numeric_system is None:\n self.__numeric_system = self._lambdify(self._symbolic_system)\n return self.__numeric_system", "def test_convert(self):\n height = 1.6 * self.meter\n foot = .305 * self.meter\n inch = 1 / 12 * foot\n\n self.assertTrue(abs(height / foot - 5.246) < .001)\n self.assertTrue(abs(height / inch - 62.951) < .001)\n\n newton = self.kgram * self.meter / (self.second ** 2)\n pound = 4.448222 * newton\n accel = 9.8 * self.meter / (self.second ** 2)\n\n weight = 150 * pound\n mass = weight / accel\n self.assertTrue(abs(mass / self.kgram - 68.085) < .001)", "def translate_number(number):\n return NUMBER_TRANSLATOR[number]", "def describe_a_library_of_units_converters_that():\n def blows_smoke():\n assert True\n\n def can_convert_psi_to_kpa():\n assert psi2kpa(32) == 220.631712 # 32 PSI == 220.631712 KPa; average car tire pressure\n assert psi2kpa(8.5) == 58.6052985 # 8.5 PSI == 58.6052985 KPa; basketball pressure\n\n # def can_convert_kpa_to_psi():\n # assert kpa2psi(101.325) == 14.695952495133 # KPa => PSI; average air pressure at sea level\n # assert kpa2psi(220.631712) == 31.999932479367043 # KPa => PSI; average car tire pressure\n\n # def can_convert_mpg_to_lp100k():\n # assert mpg2lp100k(40) == 5.8803694563 # miles-per-gallon => liters per 100km\n # assert mpg2lp100k(25) == 9.408591130080001 # miles-per-gallon => liters per 100km\n\n # def can_convert_lp100k_to_mpg():\n # assert lp100k2mpg(9.4) == 25.022895167663442 # liters per 100km => mpg\n # assert lp100k2mpg(5.1) == 46.12063030902673 # liters per 100km => mpg", "def euroadolaramerica(x):\r\n\tconversion = x * 1.35750\r\n\treturn conversion", "def to_baseN(self, value, base, other_base = False):\r\n numeral = self.numeral\r\n \r\n if other_base: #If value is not in base 10\r\n conv_to_x = self.to_base_ten(value, other_base) #Use the above function to first convert to base 10.\r\n return self.to_baseN(conv_to_x, base) # Recursively convert from base 10 to the new base.\r\n\r\n else: # Since value supplied to this part is in decimal, we can work in base 10\r\n int_part = int(value) #Remove fractional part\r\n frac_part = value - int_part #Keep fractional part\r\n\r\n if value == 0:\r\n return \"0\"\r\n\r\n if int_part < 0:\r\n return '-' + self.to_baseN(abs(int_part), base, other_base) #for number < 0, work with its absolute form before adding -\r\n\r\n if not 2 <= base <= len(numeral):\r\n raise ValueError(f'Base must be between 2 and {len(numeral)}')\r\n \r\n int_result = \"-\" if int_part < 0 else \"\" #add - to negatiive numbers\r\n frac_result = \"\"\r\n\r\n while int_part != 0:\r\n int_result += numeral[int_part % base]\r\n int_part //= base\r\n\r\n while frac_part != 0:\r\n frac_result += numeral[int(frac_part * base)]\r\n frac_part = (frac_part * base) - int(frac_part * base)\r\n result = (int_result[::-1] + \".\" + frac_result[::1]) if frac_result else int_result[::-1]\r\n \r\n if result.startswith('.'):\r\n return \"0\" + result\r\n else:\r\n return result", "def main():\n while True:\n # ysb\n vals = input().split(\" \")\n print (convert(vals[0], vals[1], float(vals[2])))", "def test_converter_number_system(self):\n \n input_values = [0,0,2,'97']\n\n output = []\n expected_result = \"Result: 97 Dec equals to 61 Hex\"\n\n def mock_input(s):\n output.append(s)\n return input_values.pop(0)\n\n mp2.input = mock_input\n mp2.print = lambda s:output.append(s)\n mp2.main()\n self.assertEqual(output[-1],expected_result)", "def test_numbers(self):\n \n result = gen_expansion(sym.pi, 2)\n self.assertEqual(result, '14')\n result = gen_expansion(sym.exp(1), 2)\n self.assertEqual(result, '72')", "def standardization(a, p):\r\n return a * 10 / 100 * p * p", "def cvt_to_readable(num):\n\n # Find the degree of the number like if it is in thousands or millions, etc.\n index = int(math.log(num) / math.log(1000))\n\n # Converts the number to the human readable format and returns it.\n newNum = round(num / (1000 ** index), 3)\n degree = UNITS[index]\n\n return (newNum, degree)", "def baseconvert(number,fromdigits,todigits):\r\n base_length = len(todigits)\r\n\r\n if str(number)[0]=='-':\r\n number = str(number)[1:]\r\n neg=1\r\n else:\r\n neg=0\r\n\r\n # make an integer out of the number\r\n x=0\r\n for digit in str(number):\r\n x = x*len(fromdigits) + fromdigits.index(digit)\r\n\r\n # create the result in base 'len(todigits)'\r\n if x is 0:\r\n res = todigits[0]\r\n else:\r\n res=\"\"\r\n while x>0:\r\n digit = x % base_length\r\n res = todigits[digit] + res\r\n x = int(x / base_length)\r\n if neg:\r\n res = \"-\"+res\r\n\r\n return res", "def _to_cn(number):\n\n chinese_numeral_dict = {\n '0': '零',\n '1': '一',\n '2': '二',\n '3': '三',\n '4': '四',\n '5': '五',\n '6': '六',\n '7': '七',\n '8': '八',\n '9': '九'\n }\n chinese_unit_map = [('', '十', '百', '千'),\n ('万', '十万', '百万', '千万'),\n ('亿', '十亿', '百亿', '千亿'),\n ('兆', '十兆', '百兆', '千兆'),\n ('吉', '十吉', '百吉', '千吉')]\n chinese_unit_sep = ['万', '亿', '兆', '吉']\n\n reversed_n_string = reversed(str(number))\n\n result_lst = []\n unit = 0\n\n for integer in reversed_n_string:\n if integer is not '0':\n result_lst.append(chinese_unit_map[unit // 4][unit % 4])\n result_lst.append(chinese_numeral_dict[integer])\n unit += 1\n else:\n if result_lst and result_lst[-1] != '零':\n result_lst.append('零')\n unit += 1\n\n result_lst.reverse()\n\n # clean convert result, make it more natural\n if result_lst[-1] is '零':\n result_lst.pop()\n\n result_lst = list(''.join(result_lst))\n\n for unit_sep in chinese_unit_sep:\n flag = result_lst.count(unit_sep)\n while flag > 1:\n result_lst.pop(result_lst.index(unit_sep))\n flag -= 1\n\n '''\n length = len(str(number))\n if 4 < length <= 8:\n flag = result_lst.count('万')\n while flag > 1:\n result_lst.pop(result_lst.index('万'))\n flag -= 1\n elif 8 < length <= 12:\n flag = result_lst.count('亿')\n while flag > 1:\n result_lst.pop(result_lst.index('亿'))\n flag -= 1\n elif 12 < length <= 16:\n flag = result_lst.count('兆')\n while flag > 1:\n result_lst.pop(result_lst.index('兆'))\n flag -= 1\n elif 16 < length <= 20:\n flag = result_lst.count('吉')\n while flag > 1:\n result_lst.pop(result_lst.index('吉'))\n flag -= 1\n '''\n\n return ''.join(result_lst)", "def _conversion_factor_internal(unit: str):\n return globals()[unit]", "def pow2num(x, y):\n return x**y", "def micros() -> int:", "def _convertType(self, s):\n for func in (int, float):\n try:\n n = func(s)\n return n\n except:\n pass\n return s", "def spell_number(num):\n tens, units = num / 10, num % 10\n tens_str = NUMBERS_10[tens]\n units_str = NUMBERS_1[units]\n if tens == 1:\n return NUMBERS_TEEN[units]\n elif tens:\n if units:\n return \"{t} {u}\".format(t=tens_str, u=units_str)\n return \"{t}\".format(t=tens_str)\n else:\n return units_str", "def to_digit(s: str) -> Union[float, str, int]:\n out = s.strip()\n f_twin = r'\\d+[,.]\\d{2,} {0,}- {0,}\\d+[.,]\\d{2,}'\n f_rank = r'\\d/10'\n f_score = r'[ ]{0,}\\d+[ ]{0,}'\n f_date = r'\\d\\d\\.\\d\\d\\.\\d\\d\\d\\d'\n f_main = r'(-?\\d*\\,?\\d+\\.?\\d*)[%BM]?'\n\n if isinstance(s, str) and re.findall(f_date, s) == [] and len(s) < 50 and s != '-':\n try: # begin from big one, because bigs consist small re\n\n if re.search(f_main, s) is not None:\n res = re.search(f_main, s.strip()).groups()[0]\n if res == '-':\n return '-'\n k = 1\n mul = 1\n after_point = res.split('.')\n if len(after_point) == 2:\n k = 10 ** len(after_point[1].replace(',', ''))\n\n mul = 1000000000 if s.find('B') > 0 else mul # found Billions\n mul = 1000000 if s.find('M') > 0 else mul # found Millions\n mul = 0.01 if s.find('%') > 0 else mul # found Percent format\n mul = mul * -1 if s.find(')') > 0 else mul # financial format to show minus : -192.34 = (192.34)\n\n return round(float(res.replace('.', '').replace(',', '')), 2) * mul / k if k > 1 else \\\n int(res.replace('.', '').replace(',', '')) * mul\n\n if len(re.findall(f_twin, s)) > 0: # format range xxx.xx - xxx.xx\n return float(re.findall(f_twin, s)[0]\n .replace(' ', '')\n .split('-')[0]\n .replace(',', '')\n .replace('.', '')) / 100\n\n if len(re.findall(f_rank, s)) > 0: # format score like 9/10 -> 9\n return int(re.findall(f_rank, s)[0].split('/')[0])\n\n if len(re.findall(f_score, s)) > 0: # format one digit score like ' 5 ' -> 5\n return int(re.findall(f_score, s)[0].replace(' ', ''))\n\n except Exception as e:\n\n logging.error(f\"Error in to_digit(). Input {s}, Out \")\n return out", "def convert_to_numerals(number):\n assert number < 4000\n expression = factorize(int(number), ROMAN_NUMERALS)\n result = list(numeral(f) * c for c, f in normalize(expression))\n return \"\".join(result)", "def conversion(temp, mode):\n if mode == 1:\n c_to_f = (temp * 9/5) + 32\n return c_to_f\n else:\n f_to_c = (temp - 32) * 5 / 9\n return f_to_c", "def convertebasenbase10(baseorig, numero):\n base10 = 0\n for i in range(len(numero)-1, -1, -1):\n base10 += DIGITOS.index(numero[i]) * baseorig**(len(numero)-i-1)\n\n return base10", "def fnum(num, sf = 0):\n\n\ts = []\n\tnf = 0\n\tppos = -1\n\tfor x in str(num):\n#\t\tprint((x, s))\n\t\tif x == '.':\n\t\t\tppos = len(s)\n\t\t\tcontinue\n\t\tif nf == 0 and ppos < 0 and x == '0':\n\t\t\tcontinue\n\t\ts.append(x)\n\t\tif x != '-' and (x != '0' or nf > 0):\n\t\t\tnf += 1\n\t\tif ppos >= 0 and sf > 0 and nf > sf:\n\t\t\tif int(s[-1]) >= 5:\n\t\t\t\ts[-2] = str(int(s[-2]) + 1)\n\t\t\ts = s[:-1]\n\t\t\tbreak\n\tif len(s) == 0:\n\t\ts = ['0']\n\tif ppos >= 0:\n\t\ts.insert(ppos, '.')\n\t\tif s[0] == '.':\n\t\t\ts.insert(0, '0')\n\t\treturn(''.join(s).rstrip('0').rstrip('.'))\n\telse:\n\t\treturn(''.join(s))", "def numify(vu):\n a, b = vu.split('/', 1)\n\n # For composite variant units, we'll lose everything after the comma\n b = b.split(',')[0]\n\n if '-' in b:\n bits = [int(x) for x in b.split('-')]\n b = float(\"{}.{}\".format(*bits))\n else:\n b = int(b)\n\n if re_vref.match(a):\n # This is a full ref\n bits = [int(x) for x in re_vref.match(a).groups()]\n a = 100000 * bits[0] + 1000 * bits[1] + bits[2]\n else:\n # Assume it's a simple verse number\n a = int(a)\n\n return [a, b]", "def main():\n for i in range(11):\n i = i / 10\n print('L * 1000', i, L(i) * 1000)", "def get_num(digits):\r\n\r\n number = 0\r\n sign = 1\r\n digit_number = 0\r\n decimal = 0\r\n number_of_digits = len(digits)\r\n \r\n for digit in digits:\r\n if digit == \"-\":\r\n sign = -1\r\n digit_number += 1\r\n elif digit == \".\":\r\n decimal = number_of_digits - digit_number\r\n else:\r\n number += int(digit)*(10**(number_of_digits-digit_number-1))\r\n digit_number += 1\r\n \r\n return float(number*sign)/(10**decimal)", "def base_conversion(s, b_src, b_dst):\n if not s: return None\n if s == '0': return s\n res = ''\n number = 0\n sign = '-' if s[0] == '-' else ''\n start = 1 if sign == '-' else 0\n table = {\n 'A': 10,\n 'B': 11,\n 'C': 12,\n 'D': 13,\n 'E': 14,\n 'F': 15,\n 10: 'A',\n 11: 'B',\n 12: 'C',\n 13: 'D',\n 14: 'E',\n 15: 'F',\n }\n\n for i in xrange(start, len(s)):\n digit = table[s[i]] if s[i] in table else ord(s[i]) - ord('0')\n number = number * b_src + digit\n\n while number != 0:\n \"\"\"\n The time complexity of this loop is math.floor(math.log(number, b_dst))+1 because\n the number of digits is the total loop time.\n If you want to know how many digits of a number(in decimal, that is base 10) under some base,\n just take log with the base you want.\n Ex: Let's consider a number 1024 in decimal(base 10), then\n 1. Under base 10, the number of digits of 1024 is math.floor(math.log(1024, 10)) + 1 = 4\n 2. Under base 2, the number of digits of 1024 is math.floor(math.log(1024, 2)) + 1 = 11\n \"\"\"\n r = number % b_dst\n res = (table[r] if r in table else chr(r + ord('0')))+ res\n number /= b_dst\n\n return sign + res", "def _format_numbers(smth: any) -> any:\n if isinstance(smth, int):\n return float(smth)\n elif smth == 'N.V.':\n return 0.0 # meaning, wine is of type 'non-vintage' and is made of grapes from more than one harvest\n else:\n return smth", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def morph_numeric(lex, ord_or_card, value, digit):\n raise NotImplementedError", "def base_conversion(destination_base, decimal_number):\n remainder_4 = decimal_number % destination_base\n remainder_3 = (decimal_number // destination_base) % destination_base\n remainder_2 = (decimal_number // destination_base // destination_base) % destination_base\n remainder_1 = (decimal_number // destination_base // destination_base // destination_base) % destination_base\n\n converted_number = str(remainder_1)+str(remainder_2)+str(remainder_3)+str(remainder_4)\n return converted_number", "def reformat(number):\n if number.find('E') == -1:\n exponent = \"-101\"\n mantissa = number.split(exponent)\n return float(mantissa[0])*10**float(exponent)\n else:\n mantissa, exponent = number.split('E')\n\n return float(mantissa)*10**float(exponent)", "def convertbase(number, base=10):\n\n integer = number\n if not integer:\n return '0'\n sign = 1 if integer > 0 else -1\n alphanum = string.digits + string.ascii_lowercase\n nums = alphanum[:base]\n res = ''\n integer *= sign\n while integer:\n integer, mod = divmod(integer, base)\n res += nums[mod]\n return ('' if sign == 1 else '-') + res[::-1]", "def baseconvert(num, base):\n\n digits = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\n try:\n num = int(num)\n base = int(base)\n except ValueError:\n return \"\"\n\n if num < 0 or base < 2 or base > 36:\n return \"\"\n\n num_string = \"\"\n while 1:\n remainder = num % base\n num_string = digits[remainder] + num_string\n num = num / base\n if num == 0:\n break\n\n return num_string", "def test_convert_amounts(self):\n pass", "def number(self):", "def test_getnumber(self):\n convert = cnv()\n\n convert.setnum('einhundertdreiundzwanzig')\n self.assertEqual(convert.getnum(), 123)", "def preprocess(inpString):\n\n figure = re.compile(r'\\([^\\W\\d_]+ [0-9]+\\.[0-9]+\\)') # i.e (Tablo 3.2)\n out_string = figure.sub('', inpString)\n\n digit_dot = re.compile(r'([0-9]+)\\.([0-9]{3})') # i.e 19.000 --> 19000\n out_string = digit_dot.sub(r'\\1\\2', out_string)\n out_string = digit_dot.sub(r'\\1\\2', out_string)\n\n centigrade = re.compile(r'(°C)|(°c)|(0C)') # °C --> santigrat\n out_string = centigrade.sub(r' santigrat', out_string)\n\n out_string = re.sub(r'°', ' derece', out_string) # ° --> derece\n\n digit_space = re.compile(r'([0-9]+) ([0-9]+)') # 19 000 --> 19000\n out_string = digit_space.sub(r'\\1\\2', out_string)\n\n out_string = re.sub(r'â', 'a', out_string) # Elâzig --> Elazig\n\n spec_hyphen = re.compile(r'([A-Za-z])-([0-9]+)') # G-20 --> G20\n out_string = spec_hyphen.sub(r'\\1\\2', out_string)\n\n out_string = re.sub(r'-', ' ', out_string) # replace hyphen with space\n\n out_string = re.sub(r'%|‰', 'yüzde ', out_string) # % --> yuzde\n\n year = re.compile(\"([0-9]{4})(’|')([a-z]+)\") # 1815'te --> 1815 yilinda\n out_string = year.sub(r'\\1 yılında', out_string)\n\n out_string = re.sub(r' km2', ' kilometrekare', out_string) # converting km2, m, km\n out_string = re.sub(r' m ', ' metre ', out_string)\n out_string = re.sub(r' km ', ' kilometre ', out_string)\n\n out_string = re.sub(r\"(’|')([a-züşöıç]+)\", '', out_string) # turkiye'de --> turkiye\n\n out_string = re.sub(r'([0-9]+),([0-9]+)', r'\\1CBN\\2', out_string) # replacing comma between\n # digits with a placeholder\n\n puncs = string.punctuation + '”' + '“' + '’' + '‘'\n translator = str.maketrans('', '', puncs)\n out_string = out_string.translate(translator) # removing pucntuations\n\n out_string = re.sub(r'CBN', ',', out_string) # bringing back the comma between numbers\n # out_string= out_string.split(' ') #[s.split(' ') for s in out_string.split('#')] #splitting from end of sentences\n # end sentence into words\n\n return out_string", "def test_number_input(self):\r\n easy_eval = lambda x: calc.evaluator({}, {}, x)\r\n\r\n self.assertEqual(easy_eval(\"13\"), 13)\r\n self.assertEqual(easy_eval(\"3.14\"), 3.14)\r\n self.assertEqual(easy_eval(\".618033989\"), 0.618033989)\r\n\r\n self.assertEqual(easy_eval(\"-13\"), -13)\r\n self.assertEqual(easy_eval(\"-3.14\"), -3.14)\r\n self.assertEqual(easy_eval(\"-.618033989\"), -0.618033989)", "def convert(x, unit1, unit2):\r\n return conversions[unit1][unit2](x)", "def spice_unit_convert(valuet, restrict=[]):\n # valuet is a tuple of (unit, value), where \"value\" is numeric\n # and \"unit\" is a string. \"restrict\" may be used to require that\n # the value be of a specific class like \"time\" or \"resistance\". \n\n # Recursive handling of '/' and multiplicatioon dot in expressions\n if '/' in valuet[0]:\n parts = valuet[0].split('/', 1)\n result = numeric(spice_unit_convert([parts[0], valuet[1]], restrict))\n result /= numeric(spice_unit_convert([parts[1], \"1.0\"], restrict))\n return str(result)\n\n if '\\u22c5' in valuet[0]:\t# multiplication dot\n parts = valuet[0].split('\\u22c5')\n result = numeric(spice_unit_convert([parts[0], valuet[1]], restrict))\n result *= numeric(spice_unit_convert([parts[1], \"1.0\"], restrict))\n return str(result)\n\n if '\\u00b2' in valuet[0]:\t# squared\n part = valuet[0].split('\\u00b2')[0]\n result = numeric(spice_unit_unconvert([part, valuet[1]], restrict))\n result *= numeric(spice_unit_unconvert([part, \"1.0\"], restrict))\n return str(result)\n\n if valuet[0] == \"\":\t\t# null case, no units\n return valuet[1]\n\n for unitrec in unittypes:\t# case of no prefix\n if re.match('^' + unitrec + '$', valuet[0]):\n if restrict:\n if unittypes[unitrec] == restrict.lower():\n return valuet[1]\n else:\n return valuet[1]\n\n for prerec in prefixtypes:\n for unitrec in unittypes:\n if re.match('^' + prerec + unitrec + '$', valuet[0]):\n if restrict:\n if unittypes[unitrec] == restrict.lower():\n newvalue = numeric(valuet[1]) * prefixtypes[prerec]\n return str(newvalue)\n else:\n newvalue = numeric(valuet[1]) * prefixtypes[prerec]\n return str(newvalue)\n\n # Check for \"%\", which can apply to anything.\n if valuet[0][0] == '%':\n newvalue = numeric(valuet[1]) * 0.01\n return str(newvalue)\n \n if restrict:\n raise ValueError('units ' + valuet[0] + ' cannot be parsed as ' + restrict.lower())\n else:\n # raise ValueError('units ' + valuet[0] + ' cannot be parsed')\n # (Assume value is not in SI units and will be passed back as-is)\n return valuet[1]", "def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None", "def ttrans(cdmtype, kinds=kinds): \n nptype=numpy.float32\n try:\n nptype=kinds[cdmtype.strip()]\n except:\n pass\n #print(cdmtype,'not found, using numpy.float32') \n return nptype", "def convert_int_to_form(num: int, form_num: int) -> int:\n output = 0\n bin_digits = []\n\n while num > 0:\n num, r = divmod(num , form_num)\n bin_digits.insert(0, r)\n\n num_digits = len(bin_digits) - 1\n for i in range(num_digits + 1):\n digit = bin_digits[i] * 10 ** (num_digits - i)\n output += digit\n return str(output)", "def _initialize_number_data_nl(short_scale):\n multiplies = _MULTIPLIES_SHORT_SCALE_NL if short_scale \\\n else _MULTIPLIES_LONG_SCALE_NL\n\n string_num_ordinal_nl = _STRING_SHORT_ORDINAL_NL if short_scale \\\n else _STRING_LONG_ORDINAL_NL\n\n string_num_scale_nl = _SHORT_SCALE_NL if short_scale else _LONG_SCALE_NL\n string_num_scale_nl = invert_dict(string_num_scale_nl)\n\n return multiplies, string_num_ordinal_nl, string_num_scale_nl", "def test_reference_conversion_factors():\n assert constants.eV == pytest.approx(1.602176565e-19)\n assert constants.eV * constants.N_A / constants.kcal == pytest.approx(23.06, 3e-5)\n assert constants.hartree * constants.N_A / constants.kcal == pytest.approx(627.5095)\n assert constants.hartree / constants.eV == pytest.approx(27.2114)\n assert constants.hartree * constants.centi / (\n constants.h * constants.c\n ) == pytest.approx(219474.63)", "def temp_converter():\n degreeC = input(\"What degree in C do you want to convert to F? \")\n degreeF = int(degreeC) * 9 / 5 + 32\n print(\"\\nRobbie says:\\n\")\n print(\"I converted %s C in to %s F!\" % (degreeC, degreeF))", "def test_setnumber(self):\n convert1 = cnv()\n\n convert1.setnum('einhundertdreiundzwanzig')\n self.assertEqual(convert1.numstring, 'einhundertdreiundzwanzig')", "def friendly_number(number, base=1000, decimals=0, suffix='',\n\t\t\t\t\tpowers=['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']):\n\tfrom math import log, pow\n\textend = pow(10, decimals)\n\tpower_index = int(log(number * extend, base))\n\tpower = powers[power_index]\n\tif decimals:\n\t\tcut_off_length = base * power_index - decimals\n\t\tstr_num = str(number)[:-cut_off_length]\n\t\tif number[-cut_off_length] >= '5':\n\t\t\tstr_num = str(int(str_num)+1)\n\n\treal = number / power\n\treturn str(number)", "def r2v(digits, base):\n b = len(base)\n n = 0\n for d in digits:\n n = b * n + base[:b].index(d)\n return n", "def numeral(number):\n return ROMAN_NUMERALS[number]", "def calc_soma(n):\n \n # Comecamos por percorrer os caracteres de n, e juntamos a cada caracter o que estava à sua direira, do lado esquerdo, invertendo o numero. Caso um dos caracteres nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Seguidamente, percorremos a cadeia recem criada. OS caracteres nas posicoes impares da cadeia anterior (indices 0,2,4,..) vao ser multiplicados por 2. Se a multiplicacao der um resultado superior a 9, subtrai-se 9. Os caracteres nas posicoes pares vao para a nova cadeia sem qualquer alteracao.\n # Finalmente percorremos os elementos da cadeia e somamos, convertidos a inteiros.\n \n \n comp = len(n)\n num_invertido , num_invertido2 = '' , ''\n soma_luhn = 0\n \n for e in n:\n \n if '0' <= e <= '9': \n num_invertido = e + num_invertido\n \n else:\n raise ValueError ('function calc_soma(): O string recebido apenas pode conter digitos')\n \n \n for i in range(comp):\n \n if i%2 == 0:\n resultado = eval(num_invertido[i]) * 2\n \n if resultado > 9:\n num_invertido2 = num_invertido2 + str(resultado - 9)\n \n else:\n num_invertido2 = num_invertido2 + str(resultado)\n \n else:\n num_invertido2 = num_invertido2 + (num_invertido[i])\n \n\n for e in num_invertido2:\n soma_luhn = soma_luhn + eval(e)\n \n return soma_luhn", "def base_number(number, count, dict_cardinal_num):\n special_numeral = [\"trăm\", \"mười\", \"mươi\", \"linh\", \"lăm\", \"mốt\"]\n list_cardinal_numeral = []\n # Divide number (abc) and follow place's number\n a = number // 100 # hundreds\n b = (number % 100) // 10 # Tens\n c = number % 10 # Ones\n # check a\n if a > 0:\n list_cardinal_numeral.append(dict_cardinal_num[a])\n list_cardinal_numeral.append(special_numeral[0])\n elif a == 0:\n if count > 1 and (b > 0 or c > 0):\n list_cardinal_numeral.append(dict_cardinal_num[a])\n list_cardinal_numeral.append(special_numeral[0])\n # check b\n if b == 0:\n if c > 0:\n if a > 0 or count > 1:\n list_cardinal_numeral.append(special_numeral[3])\n elif b > 0:\n if b == 1:\n list_cardinal_numeral.append(special_numeral[1])\n elif b > 1:\n list_cardinal_numeral.append(dict_cardinal_num[b])\n list_cardinal_numeral.append(special_numeral[2])\n # check c\n if c == 0:\n if count == 1 and a == 0 and b == 0:\n list_cardinal_numeral.append(dict_cardinal_num[c])\n elif c > 0:\n if b >= 1 and c == 5:\n list_cardinal_numeral.append(special_numeral[4])\n elif b >= 2 and c == 1:\n list_cardinal_numeral.append(special_numeral[5])\n else:\n list_cardinal_numeral.append(dict_cardinal_num[c])\n\n return \" \".join(list_cardinal_numeral)", "def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val", "def convert(num):\r\n if len(str(num))==1:\r\n return \"000%i\"%num\r\n elif len(str(num)) == 2:\r\n return \"00%i\"%num\r\n elif len(str(num)) == 3:\r\n return \"0%i\"%num\r\n elif len(str(num)) == 4:\r\n return \"%i\"%num", "def spelled_num_to_digits(spelled_num):\n words = re.split(r\",?\\s+|-\", spelled_num.lower())\n major = 0\n units = 0\n for w in words:\n x = SMALL.get(w, None)\n if x is not None:\n units += x\n elif w == \"hundred\":\n units *= 100\n elif w == \"and\":\n continue\n else:\n x = MAGNITUDE.get(w, None)\n if x is not None:\n major += units * x\n units = 0\n else:\n raise NumberException(\"Unknown number: %s\" % w)\n return major + units", "def main(num1, num2, text):\n return print(\"%30i\"%num1), print(\"%030i\"%num1), print(\"%.2f\"%num2), print(\"%.12f\"%num2), \\\n print(\"%40s\"%text)", "def _math_transform(fqdn, value):\n import math\n return _package_transform(math, fqdn, value)", "def int_to_base(num, base):\n if base<=0: return '0' \n digits = []\n if (num <0):\n \tcur= -num\n else: cur = num\n while(cur>0):\n\t\tdigits.append(str(cur%base))\n\t\tcur/=base\n if (num <0): digits.append('-')\n digits.reverse()\n\n \n \n return ''.join(digits)", "def convert_qty (qty,unit,ing) :\n portion_presence = False\n try :\n div = re.search(r\"[^ \\w]\", qty).start()\n portion = float(qty[div-1]) / float(qty[div+1])\n qty_float=portion\n portion_presence = True\n qty = qty[:div-1]\n except :\n try : \n qty_float = float(qty)\n except :\n qty_float = 10\n\n if portion_presence == True :\n if len(qty) > 0 :\n qty_float += float(qty[:div-2])\n \n #use the unit to have in ml\n #qty_float*=conversion_unit[unit]\n \n #convert in grammes with the database of density\n #qty_float*=density[ing]\n \n return qty_float", "def base10toN(num, base):\n\n converted_string, modstring = \"\", \"\"\n currentnum = num\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n if not num:\n return '0'\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n return converted_string", "def test_number_sci_notation(self):\r\n self.assertEquals(\r\n preview.latex_preview('6.0221413E+23'),\r\n r'6.0221413\\!\\times\\!10^{+23}'\r\n )\r\n self.assertEquals(\r\n preview.latex_preview('-6.0221413E+23'),\r\n r'-6.0221413\\!\\times\\!10^{+23}'\r\n )", "def mpc2internal(self,Code):\n if (Code.isdigit()):\n internal_code=int(Code)\n else:\n internal_code=(ord(Code[0])-55)*100+int(Code[1:])\n internal_code = -internal_code\n return (internal_code)", "def main(destination_base, max_number, decimal_number):\n if 2 <= destination_base <= 9:\n if 0 <= decimal_number <= max_number:\n converted_number = base_conversion(destination_base, decimal_number)\n print(f\"the converted number is: {converted_number}\")\n else:\n print(\"invalid input for base 10 number\")\n else:\n print(\"invalid input for destination base\")", "def sixteen():\r\n \r\n number = str(pow(2, 1000))\r\n sum = 0\r\n \r\n for i in number:\r\n sum += int(i)\r\n \r\n return sum", "def main():\n print()\n number = input(\"Enter the number to be converted (whole numbers only, < 4000): \")\n\n if float(number) >= 4000 or float(number) <= 0:\n print(\"That number is out of range!\")\n exit()\n print()\n print(\"{} is the same as {}\".format(number, convert_to_numerals(int(number))))\n print()", "def parseNum(num):\n num = str(num).strip()\n base = 10\n if (num[0] == '0') & (len(num) > 1):\n if num[1] == 'x':\n base = 16\n elif num[1] == 'b':\n base = 2\n else:\n base = 8\n return int(num, base)", "def test_to_celcius():\n\tassert to_celcius(32) == 0\n\tpass", "def test_get_as_num():\n\n def test(input, output):\n eq_(_get_as_num(input), output)\n\n yield test, 1, 1\n yield test, 1.0, 1.0\n yield test, \"1\", 1\n yield test, \"1.0\", 1.0\n yield test, None, 0\n yield test, \"0xF\", 15\n yield test, True, 1\n yield test, False, 0\n\n yield test, JSWrapper(3), 3\n yield test, JSWrapper(None), 0", "def read_number(stream):\n number_chars = []\n\n if stream.eof():\n raise VeryUnexpectedEndException(stream, \"Encountered EOF while scanning number\")\n\n while not stream.eof() and stream.peek() in NUMBER_CHARS:\n number_chars.append(stream.consume())\n\n number = \"\".join(number_chars).lower()\n\n if not re.match(r\"^-?(0|[1-7][0-7]*)(\\.[0-7]+|[0-7]*)((very|VERY)(\\+|-)?[0-7]+)?\", number):\n raise ManyParseException(stream, \"Invalid number {!r}\".format(number))\n\n negative = False\n if '-' == number[0]:\n negative = True\n number = number[1:] # strip off negative sign\n\n int_part, dot, frac_part = number.partition(\".\")\n\n # Format is [int . frac very exponent]\n if \".\" == dot:\n int_value = int(int_part, 8)\n frac_part, _, exponent = frac_part.partition(\"very\")\n frac_value = octal_frac_to_decimal(frac_part)\n\n # Format is [int . frac], unless very exponent\n result = (int_value + frac_value)\n\n else:\n # Need to further break out int part\n int_part, _, exponent = int_part.partition(\"very\")\n # Format is [int], unless very exponent\n result = int(int_part, 8)\n\n # Calculate exponent, if applicable\n if exponent:\n result *= (8.0 ** int(exponent, 8))\n\n # Negate, if applicable\n if negative:\n result = -result\n\n return result", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def gon2dec(gon):\n return 9/10 * gon", "def dec2FactString(nb):\n num_str = '0'\n if nb <= 0:\n return num_str\n \n # find largest factorial base\n largest_base = 0\n while nb >= factorial(largest_base):\n largest_base += 1\n largest_base -= 1\n \n digit = ['0'] * largest_base\n digit[0] = str(nb / factorial(largest_base))\n remainder = nb % factorial(largest_base)\n for i in range(largest_base - 1, 0, -1):\n digit[largest_base - i] = str(remainder / factorial(i))\n remainder = remainder % factorial(i)\n for i in range(0, len(digit)):\n if int(digit[i]) > 9:\n digit[i] = chr(int(digit[i]) + 55)\n return \"\".join(digit) + '0", "def convert_base(num, n):\r\n new_num_string = ''\r\n current = num\r\n while current != 0:\r\n remainder = current % n\r\n if remainder > 9:\r\n remainder_string = HEX_CHARS[remainder]\r\n elif remainder >= 36:\r\n remainder_string = '('+str(remainder)+')'\r\n else:\r\n remainder_string = str(remainder)\r\n new_num_string = remainder_string+new_num_string\r\n current = current//n\r\n return new_num_string", "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def test_getNumbers():\n assert formatter.getNumbers(\"some chars and $10.00\") == 10.0\n assert formatter.getNumbers(\n \"some chars and $10.99 some other chars\") == 10.99\n assert formatter.getNumbers(\"\") == -math.inf", "def autohard(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[1])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[1])\n\n # If the lowercase version of the operation equals 'log'\n if equation.split(\" \")[0].lower() == \"log\":\n # Return the answer\n return math.log(num1)\n\n # If the lowercase version of the operation equals 'acos'\n elif equation.split(\" \")[0].lower() == \"acos\":\n # Return the answer\n return math.acos(num1)\n\n # If the lowercase version of the operation equals 'asin'\n elif equation.split(\" \")[0].lower() == \"asin\":\n # Return the answer\n return math.asin(num1)\n\n # If the lowercase version of the operation equals 'atan'\n elif equation.split(\" \")[0].lower() == \"atan\":\n # Return the answer\n return math.atan(num1)\n\n # If the lowercase version of the operation equals 'cos'\n elif equation.split(\" \")[0].lower() == \"cos\":\n # Return the answer\n return math.cos(num1)\n\n # If the lowercase version of the operation equals 'hypot'\n elif equation.split(\" \")[0].lower() == \"hypot\":\n try:\n # Try to set a variable to an integer\n num2 = int(equation.split(\" \")[2])\n\n except ValueError:\n # Try to set a variable to an decimal\n num2 = float(equation.split(\" \")[2])\n\n # Return the answer\n return math.hypot(num1, num2)\n\n # If the lowercase version of the operation equals 'sin'\n elif equation.split(\" \")[0].lower() == \"sin\":\n # Return the answer\n return math.sin(num1)\n\n # If the lowercase version of the operation equals 'tan'\n elif equation.split(\" \")[0].lower() == \"tan\":\n # Return the answer\n return math.tan(num1)\n\n # Raise a warning\n raise ValueError(\"Invalid operation entered.\")", "def SM2m(sm):\n return sm * 1609.344", "def float_through(num36: str, mask: str, mem: dict):\n ones = [m.start() for m in re.finditer('1', mask)]\n quantums = [m.start() for m in re.finditer('X', mask)]\n \n fixed = \"\"\n for i, digit in enumerate(num36):\n if i in ones:\n c = \"1\"\n elif i in quantums:\n c = \"X\"\n else:\n c = digit\n fixed += c\n \n \n return mem", "def drawNumber(n,ndig,x,y, ucoords=1, fmt='float'): \n fdict = {'float':'FLOAT','exp':'EXP','fexp':'FEXP','log':'LOG'}\n dislin.numfmt(fdict[fmt])\n if ucoords:\n dislin.rlnumb(n,ndig,x,y)\n else:\n dislin.number(n,ndig,x,y)", "def convert_dec(integer, base):\n digits = '0123456789ABCDEFGHIJKLMNOP'\n s = Stack()\n while integer:\n s.push(digits[integer%base])\n integer //= base\n b = ''\n while not s.is_empty():\n b += str(s.pop())\n return b" ]
[ "0.6049695", "0.57279676", "0.56754863", "0.56605375", "0.56527585", "0.56470346", "0.56375796", "0.5623967", "0.56085277", "0.56013507", "0.56012034", "0.55787975", "0.5558308", "0.5548418", "0.5497168", "0.5491952", "0.54788685", "0.54778224", "0.54226166", "0.54081273", "0.5386832", "0.5382071", "0.53733635", "0.535948", "0.53213686", "0.5307975", "0.53067803", "0.5289851", "0.5288242", "0.5286399", "0.5282467", "0.5259171", "0.5253679", "0.5245844", "0.5243313", "0.52179956", "0.5213452", "0.5213008", "0.5212546", "0.5205244", "0.5204962", "0.5196441", "0.5195695", "0.5194449", "0.5193408", "0.51884794", "0.51884204", "0.5183483", "0.518261", "0.51771337", "0.5173058", "0.51615316", "0.5159858", "0.5152271", "0.5151584", "0.51091325", "0.51044405", "0.5101886", "0.509963", "0.50978816", "0.50949913", "0.50881404", "0.5080385", "0.50665075", "0.5065596", "0.5060545", "0.50527114", "0.5051483", "0.50496554", "0.50299907", "0.50211006", "0.50191057", "0.50172704", "0.50001496", "0.49988392", "0.49900818", "0.49815795", "0.49808723", "0.49799764", "0.49777478", "0.49684173", "0.49610707", "0.495867", "0.49561226", "0.49553245", "0.49535742", "0.4950785", "0.49502188", "0.4948221", "0.49411368", "0.49408916", "0.4940567", "0.4937475", "0.49367753", "0.49357587", "0.4934806", "0.49337816", "0.4931915", "0.49308485", "0.49281314", "0.4928116" ]
0.0
-1
Generates a trajectory of the Markov jump process.
def Execute(self,settings,IsStatusBar=False): if settings.IsSeed: np.random.seed(5) self._IsInitial = True self.settings = settings self.sim_t = copy.copy(settings.starttime) # does not have to start at zero if we perform sequential simulations self.X_matrix = copy.deepcopy(settings.X_matrix) self.fixed_species_amount = copy.deepcopy(self.parse.fixed_species_amount) try: self.volume_code = settings.volume_code except AttributeError: # No volume_code present in settings self.volume_code = "self._current_volume = 1" #self.species_to_update = [s for s in range(self.n_species)] # ensure that the first run updates all species self.Propensities() if not self.sim_t: self.timestep = 1 self.sim_output = [] self.propensities_output = [] self.V_output = [] self._IsTrackPropensities = copy.copy(settings.IsTrackPropensities) self.SpeciesSelection() self.RateSelection() self.SetEvents() # April 15, moved into here, because otherwise each new cell division cycle starts with a time event, if specified if not settings.IsOnlyLastTimepoint: self.Initial_Conditions() nstep_counter = 1 t1 = time.time() while (self.sim_t < settings.endtime) and (self.timestep < settings.timesteps): if self.sim_a_0 <= 0: # All reactants got exhausted settings.endtime = 10**50 break self.RunExactTimestep() # Run direct SSA self.HandleEvents() # Update Propensities selectively if self.sim_t < settings.endtime: if not self._IsPerformEvent: self.species_to_update = self.parse.reaction_affects[self.reaction_index] # Determine vars to update else: self.species_to_update = [s for s in range(self.n_species)] self.Propensities() if not settings.IsOnlyLastTimepoint: # Store Output self.GenerateOutput() self._IsPerformEvent = False # set to false (or just to make sure). t2 = time.time() if IsStatusBar and t2-t1> 1: t1 = time.time() sys.stdout.write('\rsimulating {0:s}\r'.format('.'*nstep_counter) ) sys.stdout.flush() nstep_counter+=1 if nstep_counter > 10: nstep_counter = 1 sys.stdout.write('\rsimulating {0:s} '.format('.'*nstep_counter)) sys.stdout.flush() if settings.IsOnlyLastTimepoint or settings.endtime != 10**50: self.GenerateOutput() if IsStatusBar and t1 and not settings.quiet: sys.stdout.write('\rsimulation done! \n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_trajectory(self, current_pos, velocity, goal, dt, prediction_time):\n\n history = list(np.copy(self._reached_goals))\n\n\n\n\n out = []\n out.append(np.copy(current_pos))\n first_goal_idx = np.where(self._goals == goal)[0][0]\n selected_goal = goal\n reached_goal = False\n counter_in_goal = 0\n\n\n\n for _ in range(prediction_time):\n\n # Particle reached selected goal\n # This will continuously chose a next goal, if a particle already reached its predecessor goal\n\n if np.linalg.norm(current_pos - selected_goal) <= 0.1:\n reached_goal = True\n\n\n\n\n\n\n if counter_in_goal > self.curr_stay_duration_goals[self._is_human_in_range_of_goal(selected_goal)[1]] / dt:\n\n selected_goal_idx = np.where(self._goals == selected_goal)[0][0]\n\n if len(history) > 0:\n if not selected_goal_idx == history[-1]:\n history.append(selected_goal_idx)\n else:\n history.append(selected_goal_idx)\n #print \"history:\"\n #print history\n # Select next goal based on the pre-learned goal-change probabilities\n\n\n #print \"selected goal {}\".format(selected_goal_idx)\n probs,available_goals = self.compute_transition_probs_for_goal_from_history(history,selected_goal_idx)\n\n\n for p in probs:\n if p < self._belief_threshold:\n p = 0.0\n\n print \"probs sampling: \"\n print probs / np.sum(np.asarray(probs))\n selected_goal = self._goals[np.random.choice(available_goals, p=probs / np.sum(np.asarray(probs)))]\n\n counter_in_goal = 0.0\n\n #print(\"switching\")\n\n else:\n counter_in_goal += 1\n #print(\"incr counter\")\n\n\n\n\n\n if reached_goal:\n #print self.curr_stay_duration_goals\n #print self.curr_stay_duration_goals[ self._is_human_in_range_of_goal(selected_goal)[1] ]\n\n new_pos = self.transition_human(current_pos, velocity, selected_goal, dt)\n\n out.append(new_pos)\n current_pos = new_pos\n\n\n\n else:\n new_pos = self.transition_human(current_pos, velocity, selected_goal, dt)\n out.append(new_pos)\n current_pos = new_pos\n\n return np.asarray(out)", "def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)", "def generate_trajectory(env, model):\n\n state = env.reset()\n done = False\n cumulative_rwd = 0\n ctr = 0\n \n while not done:\n # TODO:\n # 1) use model to generate probability distribution over next actions\n # 2) sample from this distribution to pick the next action\n action = 0\n if np.random.uniform() < model.epsilon:\n action = env.action_space.sample() \n else:\n q = model.call(tf.expand_dims(state, axis=0))\n action = tf.math.argmax(tf.squeeze(q))\n prev_state = state\n state, rwd, done, _ = env.step(action)\n cumulative_rwd = cumulative_rwd + rwd\n model.buffer.push(prev_state, action, state, rwd)\n train(env, model, ctr)\n model.epsilon = (model.epsilon - model.min_epsilon) * model.epsilon_update + model.min_epsilon\n return cumulative_rwd", "def generate_trajectory(t, v, waypoints, coeff_x, coeff_y, coeff_z):\n global yaw\n global current_heading\n yawdot = 0.0\n pos = np.zeros(3)\n acc = np.zeros(3)\n vel = np.zeros(3)\n jerk = np.zeros(3)\n snap = np.zeros(3)\n yawddot = 0.0\n\n # distance vector array, represents each segment's distance\n distance = waypoints[0:-1] - waypoints[1:]\n # T is now each segment's travel time\n T = (1.0 / v) * np.sqrt(distance[:,0]**2 + distance[:,1]**2 + distance[:,2]**2)\n # accumulated time\n S = np.zeros(len(T) + 1)\n S[1:] = np.cumsum(T)\n\n # find which segment current t belongs to\n t_index = np.where(t >= S)[0][-1]\n\n # prepare the next desired state\n if t == 0:\n pos = waypoints[0]\n t0 = get_poly_cc(8, 1, 0)\n\n # get X-Y plane project of velocity vector ( this vector is tangent to curve )\n v_proj = np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)])\n if(LA.norm(v_proj) == 0.0):\n # if velocity vector is of zero magnitude there should be no change in heading!\n pass\n else:\n current_heading = v_proj/LA.norm(v_proj) * (1.0 / T[0])\n \n\n # stay hover at the last waypoint position\n elif t > S[-1]:\n pos = waypoints[-1]\n else:\n # scaled time\n scale = (t - S[t_index]) / T[t_index]\n start = 8 * t_index\n end = 8 * (t_index + 1)\n\n t0 = get_poly_cc(8, 0, scale)\n pos = np.array([coeff_x[start:end].dot(t0), coeff_y[start:end].dot(t0), coeff_z[start:end].dot(t0)])\n\n t1 = get_poly_cc(8, 1, scale)\n # chain rule applied\n vel = np.array([coeff_x[start:end].dot(t1), coeff_y[start:end].dot(t1), coeff_z[start:end].dot(t1)]) * (1.0 / T[t_index])\n\n t2 = get_poly_cc(8, 2, scale)\n # chain rule applied\n acc = np.array([coeff_x[start:end].dot(t2), coeff_y[start:end].dot(t2), coeff_z[start:end].dot(t2)]) * (1.0 / T[t_index]**2)\n\n t3 = get_poly_cc(8, 3, scale)\n # apply chain rule\n jerk = np.array([coeff_x[start:end].dot(t3), coeff_y[start:end].dot(t3), coeff_z[start:end].dot(t3)]) * (1.0 / T[t_index]**3)\n\n t4 = get_poly_cc(8, 4, scale)\n # apply chain rule\n snap = np.array([coeff_x[start:end].dot(t4), coeff_y[start:end].dot(t4), coeff_z[start:end].dot(t4)]) * (1.0 / T[t_index]**4)\n\n # calculate desired yaw and yaw rate\n\n v_proj = np.array([vel[0], vel[1]])\n\n if( LA.norm(v_proj) == 0.0):\n # if velocity vector is zero, again there should be no change in heading\n next_heading = current_heading\n else:\n next_heading = v_proj/LA.norm(v_proj)\n\n \"\"\"\n try :\n #current_heading = v_proj/LA.norm(v_proj) #* (1.0 / T[0]) #np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)]) * (1.0 / T[0])\n next_heading = v_proj/LA.norm(v_proj)\n except ZeroDivisionError:\n # velocity vector magnitude was zero so there should be no change in heading!\n next_heading = current_heading\n \"\"\" \n\n # angle between current vector with the next heading vector\n # from a * b = |a|*|b|cos(angle)\n delta_psi = np.arccos(np.dot(current_heading, next_heading) / (LA.norm(current_heading)*LA.norm(next_heading)))\n # cross product allow us to determine rotating direction\n norm_v = np.cross(current_heading,next_heading)\n\n if norm_v > 0:\n yaw += delta_psi\n elif norm_v < 0:\n yaw -= delta_psi\n else:\n # normv = 0! if there is no change in yaw, do not modify it!\n pass\n\n # dirty hack, quadcopter's yaw range represented by quaternion is [-pi, pi]\n while yaw > np.pi:\n yaw = yaw - 2*np.pi\n\n # print next_heading, current_heading, \"yaw\", yaw*180/np.pi, 'pos', pos\n current_heading = next_heading\n #print(current_heading)\n yawdot = delta_psi / 0.005 # dt is control period\n max_yawdot = 5.0 #rad/s\n if(abs(yawdot) > max_yawdot):\n yawdot = (yawdot/abs(yawdot))*max_yawdot # make it 5rad/s with appropriate direction\n \n yaw = np.sin(2*t)*0.0\n yawdot = 2*np.cos(2*t)*0.0\n yawddot = -4*np.sin(2*t)*0.0\n return DesiredState(pos, vel, acc, jerk, snap, yaw, yawdot, yawddot)", "def compute_trajectory():\n pass", "def compute_trajectory_step(self):\r\n if not self.diverged:\r\n # gets the heliocentric distance\r\n dist = self.trajectory(self.t)\r\n\r\n # gets the initial time\r\n inittime = self.t\r\n\r\n # while the distance has changed by less than rtol percent\r\n while np.abs(self.trajectory(self.t)/dist-1) < self.rtol:\r\n\r\n # step over a full rotation each time\r\n self.t += self.nsrot*self.dt\r\n\r\n # check CFL criterion\r\n if self.CFL(self.t-inittime) >= self.Cmax:\r\n # if CFL>1, reverse the time until CFL<1\r\n while self.CFL(self.t-inittime) >= self.Cmax:\r\n self.t -= self.dt\r\n break\r\n\r\n timejump = self.t-inittime # find the total time change\r\n\r\n # set the velocity to the average\r\n self.u.vector()[:] = self.ucycavg.vector()[:]\r\n\r\n # move the mesh over this displacement\r\n self.move_mesh(timejump)\r\n\r\n # save the new output data\r\n self.get_outputs()\r\n\r\n # write updates to the log file\r\n print(\"-------------------------\")\r\n print(\"{}: Trajectory Jump Completed, Stepped {:.3f} s, t={:.3e}, \\\r\n {:.3e}%\".format(self.convert_time(time.time() -\r\n self.start_time), timejump, self.t,\r\n 100*(self.t/self.end_time)))\r\n print(\"------------------------- \\n\")\r\n self.logfile.write(\"{}: --- Trajectory Jump Completed, Stepped \\\r\n {: .3f} s, {:.2f}%, CFL: {:.3e}---\\n\".format(\r\n self.convert_time(time.time()-self.start_time),\r\n timejump, 100*(self.t/self.end_time),\r\n self.CFL(timejump)))", "def make_trajectory(self, NextwpPosition, NextwpOrientation):\n d = np.linalg.norm(self.CurrentPosition - NextwpPosition)\n inter_segment_distance = 1\n self.no_of_segments = 1+int(d//inter_segment_distance)\n \n\n # enter sequence of waypoints: no of points should be self.no_of_segments+1\n x_wp = np.linspace(self.CurrentPosition[0], NextwpPosition[0], self.no_of_segments+1)\n y_wp = np.linspace(self.CurrentPosition[1], NextwpPosition[1], self.no_of_segments+1)\n z_wp = np.linspace(self.CurrentPosition[2], NextwpPosition[2], self.no_of_segments+1)\n \n # add intial and final condiions vel, acc, jerk\n x_ic = np.array([0, 0, 0])\n x_fc = np.array([0, 0, 0])\n x0 = np.array([x_wp[0], x_ic[0], x_ic[1], x_ic[2]])\n xT = np.array([x_wp[-1], x_fc[0], x_fc[1], x_fc[2]])\n\n y_ic = np.array([0, 0, 0])\n y_fc = np.array([0, 0, 0])\n y0 = np.array([y_wp[0], y_ic[0], y_ic[1], y_ic[2]])\n yT = np.array([y_wp[-1], y_fc[0], y_fc[1], y_fc[2]])\n \n z_ic = np.array([0, 0, 0])\n z_fc = np.array([0, 0, 0])\n z0 = np.array([z_wp[0], z_ic[0], z_ic[1], z_ic[2]])\n zT = np.array([z_wp[-1], z_fc[0], z_fc[1], z_fc[2]])\n\n path = [np.sqrt((x_wp[i]-x_wp[i-1])**2 + (y_wp[i]-y_wp[i-1])**2 + (z_wp[i]-z_wp[i-1])**2) for i in range(1, self.no_of_segments+1, 1)]\n\n \n T = []; T.insert(0, 0)\n T.insert(1, T[-1] + path[0]/self.reduced_speed)\n for i in range(1, len(path)-1, 1):\n T.append(T[-1] + path[i]/self.average_speed)\n T.insert(len(T)+1, T[-1]+path[-1]/self.reduced_speed) \n\n\n\n\n #T = []; T.insert(0, 0) # insert 0 at 0 position\n #for i in range(self.no_of_segments): \n # T.append(T[-1]+path[i]/self.average_speed)\n\n r = self.r\n N = 1 + self.N # because number of terms in a polynomial = degree+1\n\n QQ = []; AA_inv = []\n\n for i in range(self.no_of_segments): \n q = self.construct_Q(N, r, T[i], T[i+1])\n a = self.construct_A(N, r, T[i], T[i+1])\n a_inv = scipy.linalg.pinv(a)\n QQ = block_diag(QQ, q)\n AA_inv = block_diag(AA_inv, a_inv)\n \n order = 2*r*self.no_of_segments\n R = np.dot(AA_inv.T, np.dot(QQ, AA_inv))\n \n bx = self.construct_b(x0, xT)\n by = self.construct_b(y0, yT)\n bz = self.construct_b(z0, zT)\n\n m = Model(\"qp\")\n order = 2*r*self.no_of_segments\n dx = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dx\")\n dy = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dy\") \n dz = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dz\") \n\n # making objective using quicksum, takes a lot of time \n #obj1 = quicksum(dx[i] * quicksum(R[i][j] * dx[j] for j in range(order)) for i in range(order))\n #obj2 = quicksum(dy[i] * quicksum(R[i][j] * dy[j] for j in range(order)) for i in range(order))\n #obj3 = quicksum(dz[i] * quicksum(R[i][j] * dz[j] for j in range(order)) for i in range(order))\n \n # using LinExpr for the second expression is significantly faster \n obj1 = quicksum(dx[i] * LinExpr([(R[i][j], dx[j]) for j in range(order)]) for i in range(order))\n obj2 = quicksum(dy[i] * LinExpr([(R[i][j], dy[j]) for j in range(order)]) for i in range(order))\n obj3 = quicksum(dz[i] * LinExpr([(R[i][j], dz[j]) for j in range(order)]) for i in range(order))\n obj = obj1 + obj2 + obj3\n j = 0\n for i in range(order): \n if i < r: \n m.addConstr(dx[i] == bx[i])\n m.addConstr(dy[i] == by[i])\n m.addConstr(dz[i] == bz[i])\n elif i >= order-r: \n m.addConstr(dx[i] == bx[r+j])\n m.addConstr(dy[i] == by[r+j])\n m.addConstr(dz[i] == bz[r+j])\n j += 1\n \n c = 1 # counter\n for i in range(r, order-2*r, 2*r): \n #m.addConstr(dx[i] == self.x_wp[c])\n #m.addConstr(dy[i] == self.y_wp[c])\n #m.addConstr(dz[i] == self.z_wp[c])\n m.addConstr(dx[i] <= x_wp[c] + 0.2)\n m.addConstr(dx[i] >= x_wp[c] - 0.2)\n m.addConstr(dy[i] <= y_wp[c] + 0.2)\n m.addConstr(dy[i] >= y_wp[c] - 0.2)\n m.addConstr(dz[i] <= z_wp[c] + 0.2)\n m.addConstr(dz[i] >= z_wp[c] - 0.2)\n c = c+1\n for j in range(r): \n m.addConstr(dx[i+j] == dx[i+j+r])\n m.addConstr(dy[i+j] == dy[i+j+r])\n m.addConstr(dz[i+j] == dz[i+j+r])\n #if j ==2: \n # m.addConstr(dx[i+j] == 2.0)\n\n m.setObjective(obj, GRB.MINIMIZE)\n #m.write('model.lp')\n m.setParam('OutputFlag', 0)\n m.setParam('PSDtol', 1e-1)\n m.optimize()\n\n\n runtime = m.Runtime\n\n\n x_coeff = [dx[i].X for i in range(order)]\n y_coeff = [dy[i].X for i in range(order)]\n z_coeff = [dz[i].X for i in range(order)]\n\n Dx = np.asarray(x_coeff)[np.newaxis].T\n Dy = np.asarray(y_coeff)[np.newaxis].T \n Dz = np.asarray(z_coeff)[np.newaxis].T \n pcx = np.dot(AA_inv, Dx); pcy = np.dot(AA_inv, Dy); pcz = np.dot(AA_inv, Dz)\n\n\n poly_coeff_x = pcx.T.ravel().tolist()\n poly_coeff_y = pcy.T.ravel().tolist()\n poly_coeff_z = pcz.T.ravel().tolist()\n\n return poly_coeff_x, poly_coeff_y, poly_coeff_z, T, time.time()\n #self.publish(poly_coeff_x, poly_coeff_y, poly_coeff_z)", "def GenerateTrajectory(self,y0=np.array([0,0.01]),T=3000,N=30000):\n \n # t_run = timeit.default_timer()\n t,Y=EM_numba(self.F,self.G,y0,T,N)\n # print('EM numba took '+str(timeit.default_timer() - t_run)+'secs')\n return t,Y", "def simulation_step(self):\n if not self.np_trajectory.size:\n #No trajectory to go to.....\n return\n closest_ind = self.find_closest_trajectory_pose()\n ref_ind = (closest_ind + 30) # closest_ind + numpy.round(self.v / 4)\n traj_len = len(self.np_trajectory[0])\n if self.loop is True:\n ref_ind = ref_ind % traj_len\n else:\n if ref_ind > traj_len-1:\n ref_ind = traj_len-1\n if closest_ind == traj_len-1:\n self.at_dest = True\n else:\n ref_ind = closest_ind\n ref_state = self.np_trajectory[:, int(ref_ind)]\n\n # update vehicle state.\n '''if self.class_name == 'TruckVehicle':\n self.update_vehicle_state_qualisys()\n self.UDP_receive()\n if self.data == \"-1.00\":\n self.set_control_commands_pp(ref_state, ref_ind)\n else:\n steer = int(self.data[-6:-3])\n throttle = int(self.data[:-6]) + 5\n hw_port.set_command(throttle,steer,2)\n self.update_truck_hardware()\n else:\n self.set_control_commands(ref_state)\n self.update_vehicle_state()'''\n\n self.set_control_commands(ref_state, ref_ind)\n self.update_vehicle_state()\n\n # publish vehicle state.\n vehicle_state = msgs.VehicleState(self.vehicle_id, self.class_name,\n self.x, self.y, self.yaw, self.v)\n self.pub_state.publish(vehicle_state)\n self.update_current_node()\n\n #The way that the stop light waiting works, this is necessary\n if not self.waiting_at_stop:\n self.check_for_traffic_light()\n self.get_traffic()", "def get_trajectory(self, render=False):\n state = self.reset_env()\n states = []\n actions = []\n rewards = []\n for i in range(self.config['episode_max_length']):\n action = self.choose_action(state)\n states.append(state)\n for _ in range(self.config['repeat_n_actions']):\n state, rew, done, _ = self.step_env(action)\n if done: # Don't continue if episode has already ended\n break\n actions.append(action)\n rewards.append(rew)\n if done:\n break\n if render:\n self.env.render()\n return {\"reward\": np.array(rewards),\n \"state\": np.array(states),\n \"action\": np.array(actions),\n \"done\": done, # Tajectory ended because a terminal state was reached\n \"steps\": i + 1\n }", "def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None", "def generate_trajectory(env, model, adversary):\n calls = []\n hands = []\n ad_hands = []\n actions = []\n rewards = []\n\n time_step = env.reset()\n cur_agent, next_agent = model, adversary\n model_player_id = 0\n # TODO: add random starting\n\n last_call = None\n\n while not time_step.last():\n # get cur player id and hand\n cur_player_id = int(time_step.observations['current_player'])\n hand_id = time_step.observations['info_state'][cur_player_id]\n ad_hand_id = time_step.observations['info_state'][1-cur_player_id]\n\n # If adversary's turn, make move and update last call\n if cur_player_id != model_player_id:\n action = adversary.step(last_call, hand_id)\n time_step = env.step([action])\n if time_step.last():\n rewards[-1] = max(time_step.rewards[model_player_id],0)\n last_call = action\n cur_agent, next_agent = next_agent, cur_agent\n continue\n\n # get action from agent\n if last_call == None:\n last_call = 1\n last_call_tensor = tf.convert_to_tensor([last_call], dtype=tf.float32)\n hand_id_tensor = tf.convert_to_tensor([hand_id], dtype=tf.float32)\n ad_hand_id_tensor = tf.convert_to_tensor([ad_hand_id], dtype=tf.float32)\n prbs = cur_agent.call(last_call_tensor, hand_id_tensor, ad_hand_id_tensor)[0].numpy()\n\n # mask out illegal actions\n legal_actions = time_step.observations['legal_actions'][cur_player_id]\n legal_actions_mask = np.ones(env.num_actions, dtype=bool)\n legal_actions_mask[legal_actions] = False\n prbs[legal_actions_mask] = 0\n\n # renormalize probabilities\n norm = np.sum(prbs)\n # TODO: check for zero norm\n if norm == 0:\n old_prbs = prbs\n prbs = np.zeros(env.num_actions)\n prbs[legal_actions] += (1/len(legal_actions))\n else:\n prbs = prbs / norm\n\n\n # select action weighted by prbs\n action = np.random.choice(list(range(len(prbs))), p=prbs)\n # apply action to env\n time_step = env.step([action])\n\n # update calls, hands, actions, and rewards\n calls.append(last_call)\n hands.append(hand_id)\n ad_hands.append(hand_id)\n actions.append(action)\n rewards.append(max(time_step.rewards[cur_player_id],0))\n\n last_call = action\n cur_agent, next_agent = next_agent, cur_agent\n\n return calls, hands, ad_hands, actions, rewards", "def prediction_step(self,delta_t,v,om,x_k_1,P_k_1,Q,jacobian,motion_model):\n\n\t\t# Motion Model Returns the states [x,y,theta]\n\t\tx_k_1[0],x_k_1[1],x_k_1[2] = motion_model(v,om,x_k_1[0],x_k_1[1],x_k_1[2],delta_t) \n \n\t\t#Jacobian of Motion Model w.r.t last state and Noise \n\t\tF, L = jacobian(v,x_k_1[2],delta_t)\n\n\t\t# Predicted Co-Variance\n\t\tP_k_1 = F.dot((P_k_1).dot(F.T)) + L.dot((Q).dot(L.T))\n\n\t\treturn x_k_1,P_k_1", "def _generate_trajectories(self, velocity, dt, num_trajectories, prediction_time, current_belief):\n\n\n\n\n trajectories = []\n #print(\"len(self._estimated_positions: {}\".format(len(self._estimated_positions)))\n for i in range(num_trajectories):\n # First, sample a position and a goal from the current belief and the estimated positions\n position, goal = self._sample_position(self._estimated_positions, current_belief)\n\n # Generate a full trajectory\n trajectory = self._generate_trajectory(position, velocity, goal, dt, prediction_time)\n\n\n trajectories.append(trajectory)\n return trajectories", "def main():\r\n PathGenerator = TrajectoryGenerator()\r\n \r\n ## coordinate \r\n # Y \r\n # ^ /\r\n # | /\r\n # | / <theta>\r\n # o -- -- -- >X\r\n\r\n x_0 = 0.0 # initial x position\r\n y_0 = 0.0 # initial y position\r\n theta_0 = 0.0 *np.pi/180 # initial heading angle of the vehicle \r\n kappa_0 = 0.0 *np.pi/180 # initial steering angle \r\n initial_state = [x_0, y_0, theta_0, kappa_0] \r\n \r\n x_f = 13.0 # final x position\r\n y_f = 8.0 # final y position\r\n theta_f = 0.0 *np.pi/180 # final heading angle of the vehicle \r\n kappa_f = 0.0 *np.pi/180 # final steering angle \r\n final_state = [x_f, y_f, theta_f, kappa_f] \r\n\r\n traject = PathGenerator.compute_spline(initial_state, final_state)\r\n point_array = np.asarray(traject)\r\n plt.plot(point_array[:,0], point_array[:,1],'o')\r\n \r\n sample_resolution = 0.5\r\n temp_goal_list = []\r\n for i in range(-2, 3):\r\n temp_final_state = np.copy(final_state)\r\n temp_final_state[1] = temp_final_state[1] + float(i)*sample_resolution\r\n temp_goal_list.append(temp_final_state)\r\n \r\n start = time.time()\r\n point_list = []\r\n for i in range(0, 5):\r\n temp_goal = temp_goal_list[i]\r\n traject = PathGenerator.compute_spline(initial_state, temp_goal)\r\n point_list.append(traject)\r\n end = time.time()\r\n print('Executed time is %f'%(end - start))\r\n \r\n # pdb.set_trace()\r\n for i in range(0,5):\r\n point_array = np.asarray(point_list[i])\r\n plt.plot(point_array[:,0], point_array[:,1],'o')\r\n \r\n plt.axis('equal')\r\n plt.show()", "def find_trajectory(self):\n\n translation,_ = self.trans_listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.x = translation[0]\n self.y = translation[1]\n \n cell_x = int(np.floor(self.x / self.metadata.resolution) + self.w / 2) - self.x_offset\n cell_y = int(np.floor(self.y / self.metadata.resolution) + self.h / 2) - self.y_offset\n\n visited = np.zeros(self.costmap.shape)\n visited[cell_y,cell_x] = 1\n\n to_explore = self.add_neighbors(visited, Node(cell_x,cell_y,0,None))\n to_explore.sort(key=operator.attrgetter('cost'))\n\n # Run modified Dijkstra algorithm\n while to_explore: \n next_node = to_explore.pop(0)\n if next_node.cost == -1:\n print(\"Found goal!\")\n\t\tself.send_final_pose(next_node)\n self.number_of_fails = 0\n self.get_trajectory(next_node)\n return\n \n to_explore = to_explore + self.add_neighbors(visited, next_node)\n to_explore.sort(key=operator.attrgetter('cost'))\n\n self.number_of_fails += 1\n print(\"Failed: %d times % self.number_of_fails\")\n\n if self.number_of_fails >= NUMBER_OF_FAILS:\n print(\"Exiting!\")\n msg = Bool()\n msg.data = True\n self.exp_complete_pub.publish(msg)", "def move2goal(self):\n vel_msg = Twist()\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = 0.4 # m/s\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = 1.5 # rad/s\n\n # Starting point reference\n goal_x = 1.0 \n goal_y = 1.0\n x_ref = 1.0\n y_ref = 1.0\n\n # Previous Reference\n x_prev_ref = 0.0\n y_prev_ref = 0.0\n theta_prev_ref = self.theta\n vrefA = 0.5\n wrefA = 0.0\n \n i = 0\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n\n x_prev_ref = tPx[0]\n y_prev_ref = tPy[0]\n theta_prev_ref = tPTheta[0]\n\n print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n print(f'ACTUAL THETA: {self.theta}')\n\n while not rospy.is_shutdown():\n \n if i >= 8:\n i = 0\n\n x_ref = goal_x\n y_ref = goal_y\n\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n # inputRef = ControllerInput(\n # xref=x_ref,\n # yref=y_ref,\n # RstateX=self.x_position,\n # RstateY=self.y_position,\n # RstateTheta=self.theta,\n # RstateVelocity=vel_msg.linear.x,\n # RstateW=vel_msg.angular.z,\n # xrefA=x_prev_ref,\n # yrefA=y_prev_ref,\n # thetarefA=theta_prev_ref,\n # vrefA=vrefA,\n # wrefA=wrefA\n # )\n\n # rospy.loginfo(f'X: {self.x_position} \\tY: {self.y_position}\\t Theta: {self.theta} ')\n # nmpc = NMPC_Controller(inputRef)\n # tPx, tPy, tPTheta = nmpc.test_create_mini_path()\n\n # print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n # print(f'ACTUAL THETA: {self.theta}')\n \n # new_v, new_w = nmpc.start_optmizer()\n # new_v = round(new_v, 4)\n # new_w = round(new_w, 4)\n\n # print(new_v, new_w)\n # rospy.loginfo(\n # f'X: {self.x_position}, Y: {self.y_position}, THETA: {self.theta}')\n \n # self.velocity_publisher.publish(vel_msg)\n # x_prev_ref = self.x_position\n # y_prev_ref = self.y_position\n # theta_prev_ref = self.theta\n # vrefA = vel_msg.linear.x\n # wrefA = vel_msg.angular.z\n \n\n # theta_prev_ref = self.theta\n # vel_msg.angular.z = 0.0\n\n\n '''Update the linear & angular velocity'''\n # vel_msg.linear.x = new_v\n # vel_msg.angular.z = new_w\n\n if i < 8:\n inputRef = ControllerInput(\n xref = tPx[i],\n yref = tPy[i],\n RstateX = self.x_position,\n RstateY = self.y_position,\n RstateTheta = self.theta,\n RstateVelocity = vel_msg.linear.x,\n RstateW = vel_msg.angular.z,\n xrefA = x_prev_ref,\n yrefA = y_prev_ref,\n thetarefA = theta_prev_ref,\n vrefA = vrefA,\n wrefA = wrefA\n )\n\n nmpc = NMPC_Controller(inputRef)\n new_v, new_w = nmpc.start_optmizer()\n new_v = round(new_v, 4)\n new_w = round(new_w, 4)\n\n print(f'(actual) X: {self.x_position}, Y: {self.x_position}, THETA: {self.theta}')\n print(f'(desired) X: {tPx[i]}, Y: {tPy[i]}')\n print(f'V: {vel_msg.linear.x}\\tW: {vel_msg.angular.z}')\n\n x_prev_ref = tPx[i-1]\n y_prev_ref = tPy[i-1]\n theta_prev_ref = tPTheta[i-1]\n vrefA = vel_msg.linear.x\n wrefA = vel_msg.angular.z\n\n vel_msg.linear.x = new_v\n vel_msg.angular.z = new_w\n # vel_msg.angular.z = 0.0\n\n print(f'index: {i}')\n\n distance = math.sqrt((self.x_position - tPx[i])**2 + (self.y_position - tPy[i])**2)\n if distance < 0.3:\n print(f'Distance: {distance}')\n i+=1\n\n\n self.velocity_publisher.publish(vel_msg)\n self.rate.sleep()\n\n rospy.spin()", "def goto(self,\n goal_positions, duration,\n starting_point='present_position',\n wait=True, interpolation_mode='minjerk'):\n trajs = []\n\n for i, (motor_name, goal_pos) in enumerate(goal_positions.items()):\n last = wait and (i == len(goal_positions) - 1)\n\n motor = getattr(self, motor_name)\n trajs.append(motor.goto(goal_pos, duration, starting_point,\n wait=last, interpolation_mode=interpolation_mode))\n\n return trajs", "def fixed_steps_trajectories(self, noise=0, nt=1, ll=0.1, limit=None):\n\n print('Generating Trajectories...')\n for i in tqdm.tqdm(range(self.ntraj)):\n\n if self.hop_distribution == 'gaussian' or self.hop_distribution == 'Gaussian':\n z_position = np.cumsum(\n np.random.normal(loc=0, scale=self.hop_sigma, size=self.nsteps)) # accumulate gaussian steps\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n self.trajectories[i, :, 1] = z_position - z_position[0] # make initial z equal to 0\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time = sampling.random_exponential_dwell(self.lamb, size=self.nsteps)\n elif self.dwell_distribution == 'power':\n time = sampling.random_power_law_dwell(1 + self.alpha, size=self.nsteps, ll=ll, limit=limit)\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n\n time = np.cumsum(time) # accumulate dwell times\n time -= time[0]\n\n self.trajectories[i, :, 0] = time\n\n # Add to array with all corners of hop distribution for visualization purposes\n self.trajectory_hops[i, 1::2, 0] = time[1:]\n self.trajectory_hops[i, 2::2, 0] = time[1:]\n\n self.trajectory_hops[i, ::2, 1] = self.trajectories[i, :, 1]\n self.trajectory_hops[i, 1:-1:2, 1] = self.trajectories[i, :-1, 1]\n self.trajectory_hops[i, -1, 1] = self.trajectories[i, -1, 1]\n\n print('Interpolating Trajectories...')\n # make uniform time intervals with the same interval for each simulated trajectory\n max_time = np.min(self.trajectories[:, -1, 0])\n self.time_uniform = np.linspace(0, max_time, self.nsteps*10)\n\n if nt > 1:\n # self.pbar = tqdm.tqdm(total=self.ntraj)\n pool = Pool(nt)\n for i, t in enumerate(pool.map(self.interpolate_trajectories, range(self.ntraj))):\n self.z_interpolated[i, :] = t\n else:\n for t in tqdm.tqdm(range(self.ntraj)):\n self.z_interpolated[t, :] = self.trajectories[t, np.digitize(self.time_uniform,\n self.trajectories[t, :, 0], right=False) - 1, 1]\n #self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)", "def run(self, jumps_list = [1, 50, 125, 500, 5000]):\n \n print (\"Jumps family with {0} ms jumps.\".format(jumps_list))\n for j in jumps_list:\n \n #define post-pulse relaxation time\n if j < 1000:\n pfreq = 1 #if jump is shorter than a second, make it 1s\n else: \n pfreq = float(1e3) / (10*j) # make it 10x longer than jump if > 1s\n #print (\"pfreq: \" +str(pfreq))\n t= TrainExpt(self.mech, self.params)\n t.run(1, j, pfreq, mod='_'+str(j))\n \n \"\"\"\n for r in self.rs.rates_set:\n _ra = r[1]\n t = Train(1, j, pfreq, self.param, self.N_states, self.open_states, _ra)\n t.build() #optional argument can cancel prepadding with 100 ms\n t.construct_train()\n\n self.trace_set.append(t.trace)\n\n #get rate constant\n #keys of ra are the rate tuple\n #refer with rate name\n _rate_changed = _ra[r_tuple_from_r_name(_ra, self.param.rate_to_change)][1][0] \n\n self.table += str(r[0]) + '\\t' + str(_rate_changed) + '\\n'\n \n print ('\\n-\\nTrial#\\t' + str(r[0]) + '\\t' + str(self.param.rate_to_change) + '\\t' + str(_rate_changed) + '\\n')\n print (t.printout)\n\n self.curve_save(self.trace_set, )\n \n self.text_out()\n \"\"\"", "def inter_step(self):\n #https://math.stackexchange.com/questions/1918743/how-to-interpolate-points-between-2-points\n c_loc = self.checkpoint_target.get_location()\n \n self.dist_to_checkpoint = self._calc_distance(c_loc)\n new_y = self.current_location[0] + (self.walk_speed / self.dist_to_checkpoint \\\n * (c_loc[0] - self.current_location[0]))\n new_x = self.current_location[1] + (self.walk_speed / self.dist_to_checkpoint \\\n * (c_loc[1] - self.current_location[1]))\n new_location = [float(new_y), float(new_x)]\n self.current_location = new_location\n self.walk_route.append(new_location)", "def step_simulation(self, action):\n # target = np.zeros(6)\n # a = np.copy(action)\n # for i in range(6):\n # target[i] = a[i] + ref_pos[i + 3]\n\n target = action * 1.5\n # target = action + ref_pos[3:9]\n\n joint_angle_4, joint_velocity_4 = self.get_joint_angle_and_velocity(4)\n joint_angle_7, joint_velocity_7 = self.get_joint_angle_and_velocity(7)\n self.joint_history.append(np.asarray([joint_angle_4, joint_velocity_4, joint_angle_7, joint_velocity_7]))\n\n joint_angles = self.robot_skeleton.q[3:]\n joint_velocities = self.robot_skeleton.dq[3:]\n\n tau = np.zeros(self.robot_skeleton.ndofs) # torque to apply at each simulation clock\n tau[3:] = self.P * (target - joint_angles) - self.D * joint_velocities\n tau = np.clip(tau, -150 * self.volume_scaling, 150 * self.volume_scaling)\n self.tau_history.append(tau)\n # print(tau)\n self.do_simulation(tau, 1)", "def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :\n # define polymer and tracers\n u = sim.u\n polymer = u.select_atoms(polymer_text)\n tracers = u.select_atoms(tracer_text)\n n_polymer = polymer.n_atoms\n n_tracers = tracers.n_atoms\n # initialize jumping matrix and first distance matrix d_prev\n J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)\n ts = u.trajectory [teq]\n d_prev = distance_array (polymer.positions,tracers.positions,\n box=ts.dimensions)\n D_prev = d_prev<threshold\n for ts in u.trajectory [teq::tsample] :\n # get distance matrix at current time step\n d_next = distance_array (polymer.positions,tracers.positions,\n box=ts.dimensions)\n D_next = d_next<threshold\n # get jumps of all tracers and add it to the jumping matrix\n for i in xrange (n_tracers) :\n t_prev = D_prev [:,i]\n t_next = D_next [:,i].reshape ((n_polymer,1))\n t = t_prev * t_next\n J += t\n D_prev = D_next.copy()\n return J", "def sample_trajectory(self, env, animate_this_episode, is_evaluation):\n # Using current task with meta inside\n env.reset_task(is_evaluation=is_evaluation)\n stats = []\n #====================================================================================#\n # ----------PROBLEM 2----------\n #====================================================================================#\n ep_steps = 0\n steps = 0\n\n num_samples = max(self.history, self.max_path_length + 1)\n meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))\n rewards = []\n\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n\n if ep_steps == 0:\n ob = env.reset()\n # first meta ob has only the observation\n # set a, r, d to zero, construct first meta observation in meta_obs\n # YOUR CODE HERE\n ac = np.zeros(self.ac_dim); rew = np.zeros(self.reward_dim); done = np.zeros(self.terminal_dim)\n meta_obs[steps, :] = np.concatenate((ob, ac, rew, done))\n steps += 1\n\n # index into the meta_obs array to get the window that ends with the current timestep\n # please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)\n # YOUR CODE HERE\n # padding for input obs size\n sample_action_in_ = meta_obs[steps-self.history:steps, :] if steps>=self.history else np.squeeze(np.concatenate(([meta_obs[0,:], ] * (self.history - steps), meta_obs[:steps, :]), axis=0))\n # need to clear hidden size, in order to avoid previous hidden state as it may be generated by the other totally different task (env setting may be changed)\n hidden = np.zeros((1, self.gru_size), dtype=np.float32)\n\n # get action from the policy\n # YOUR CODE HERE\n # Tensor(\"ob:0\", shape=(?, 1, 10), dtype=float32)\n # print(self.sy_ob_no)\n # Tensor(\"hidden:0\", shape=(?, 32), dtype=float32)\n # print(self.sy_hidden)\n ac = self.sess.run(self.sy_sampled_ac, feed_dict={\n self.sy_ob_no: sample_action_in_.reshape(-1, self.history, self.meta_ob_dim),\n self.sy_hidden: hidden,\n })\n assert len(ac) == 1\n ac = ac[0]\n\n # step the environment\n # YOUR CODE HERE\n ob, rew, done, _= env.step(ac)\n\n ep_steps += 1\n\n done = bool(done) or ep_steps == self.max_path_length\n # construct the meta-observation and add it to meta_obs\n # YOUR CODE HERE\n meta_obs[steps, :] = np.concatenate((ob, ac, [rew], [done]))\n\n rewards.append(rew)\n steps += 1\n\n in_ = meta_obs[steps, :]\n # add sample to replay buffer\n if is_evaluation:\n self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n else:\n self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n\n # start new episode\n if done:\n # compute stats over trajectory\n s = dict()\n s['rewards']= rewards[-ep_steps:]\n s['ep_len'] = ep_steps\n stats.append(s)\n ep_steps = 0\n\n if steps >= num_samples:\n break\n\n return steps, stats", "def traverse_scaffold(self):\n\n y, x, theta = self.find_start_pose()\n dy, dx = -np.sin(theta), np.cos(theta)\n straight, instruction_string = 0, ''\n\n while True:\n # Either scaffold infront of us\n if self.mp[y+dy,x+dx] == ord('#') or self.mp[y+dy,x+dx] == ord('O'):\n self.mp[y,x] = ord('O')\n straight += 1\n y, x = y+dy, x+dx\n # Otherwise, it is to our left or right\n else:\n if self.mp[y-1,x] == ord('#'): theta_ = np.pi/2\n elif self.mp[y+1,x] == ord('#'): theta_ = -np.pi/2\n elif self.mp[y,x-1] == ord('#'): theta_ = -np.pi\n elif self.mp[y,x+1] == ord('#'): theta_ = 0\n\n # Reached the end\n else:\n self.mp[y,x] = ord('x')\n if straight > 0:\n instruction_string = instruction_string + str(straight)\n\n return instruction_string\n\n if straight > 0:\n instruction_string = instruction_string + str(straight) + ','\n straight = 0\n\n # Find angle between directions\n d = np.arccos(np.dot([np.sin(theta+np.pi/2), np.cos(theta+np.pi/2)],\n [np.sin(theta_), np.cos(theta_)]))\n \n if d == 0:\n instruction_string += 'L,'\n theta = theta+np.pi/2\n else:\n instruction_string += 'R,'\n theta = theta-np.pi/2\n # Update orientation\n \n dy,dx = np.int(-np.sin(theta)),np.int(np.cos(theta))", "def predict_trajectory(x_init, v, yaw_dot):\n x = np.array(x_init)\n trajectory = [x]\n time = 0\n while time <= Config.predict_time:\n x = motion(x, [v, yaw_dot], Config.dt)\n trajectory.append(x)\n time += Config.dt\n\n return np.array(trajectory)", "def jumpp(self):\r\n\r\n if not self.current_jump is None:\r\n self.current_jump = self.current_jump.next", "def goto(self, start, target):\n start_pos = self.arm.arm.forward_kinematics(start[0])\n self.arm.arm.compute_jacobian()\n start_vel = self.arm.arm.get_tool_vel(start[1])\n\n target_pos = target[0]\n target_vel = target[1]\n\n th1, th2, z, th3 = self.arm.goto(start_pos, start_vel, target_pos,\n target_vel)\n return arm_to_joint_traj(th1, th2, z, th3, self.arm.dt)", "def generate_trajectory(scenario: Scenario, planning_problem: PlanningProblem, time_steps: int,\n max_tries: int = 1000) -> Tuple[TrajectoryPrediction, List[VehicleInfo]]:\n shape: Shape = Rectangle(DrawConfig.car_length, DrawConfig.car_width,\n planning_problem.initial_state.position, planning_problem.initial_state.orientation)\n states: List[MyState] = [MyState(planning_problem.initial_state)]\n vehicles: List[VehicleInfo] = [VehicleInfo(MyState(planning_problem.initial_state), None,\n DrawHelp.convert_to_drawable(planning_problem.initial_state))]\n for i in range(1, time_steps):\n last_state_copy: MyState = deepcopy(states[i - 1])\n found_valid_next: bool = False\n tries: int = 0\n while not found_valid_next and tries < max_tries:\n next_state: MyState = GenerationHelp.predict_next_state(scenario, last_state_copy)\n next_vehicle: VehicleInfo = VehicleInfo(next_state, None)\n if is_valid(next_vehicle, scenario):\n states.append(next_state)\n vehicles.append(next_vehicle)\n found_valid_next = True\n else:\n tries += 1\n last_state_copy.orientation \\\n = states[i - 1].state.orientation + uniform(-GenerationConfig.max_yaw, GenerationConfig.max_yaw)\n if not found_valid_next:\n break\n return TrajectoryPrediction(Trajectory(0, list(map(lambda s: s.state, states))), shape), vehicles", "def jump(self):\n\t\tself._is_falling = True\n\t\tself._dy = -5", "def _forward_kinematics_step(self, t_step):\n (s_1, s_12, s_123) = self._sines\n (c_1, c_12, c_123) = self._cosines\n self._x_1[t_step] = self._jnt_lengths[0] * s_1\n self._y_1[t_step] = self._jnt_lengths[0] * c_1\n self._x_2[t_step] = self._x_1[t_step] + self._jnt_lengths[1] * s_12\n self._y_2[t_step] = self._y_1[t_step] + self._jnt_lengths[1] * c_12\n self._x_e[t_step] = self._x_2[t_step] + self._jnt_lengths[2] * s_123\n self._y_e[t_step] = self._y_2[t_step] + self._jnt_lengths[2] * c_123", "def playAGame(t_Param, t_Qtable, t_Movements, t_States, t_Rewards, t_Maze, t_line=None, t_point=None):\n # start from the position next to the entrance of maze.\n pos = np.array([1,1], np.int16)\n \n # a list to memorize history step with maximum memory length of 2\n path = [0,0]\n \n # update plot\n if t_line is not None and t_point is not None:\n xdata = [pos[1],]; ydata = [pos[0],]\n t_line.set_xdata(xdata); t_line.set_ydata(ydata)\n t_point.set_xdata([pos[1],]); t_point.set_ydata(pos[0,])\n #t_line.figure.canvas.draw()\n plt.pause(0.01)\n\n for k in range(t_Param[\"nStep_Max\"]):\n # calculate current state index\n state_idx = t_Param[\"ncol\"] * pos[0] + pos[1]\n\n # modify history\n path.append( state_idx ); path.remove( path[0] )\n\n # update current position , and then return choice\n choice = move(t_Qtable[state_idx, :], pos, t_Movements)\n\n # update plot\n if t_line is not None and t_point is not None:\n xdata.append(pos[1]); ydata.append(pos[0])\n t_line.set_xdata(xdata); t_line.set_ydata(ydata)\n t_point.set_xdata([pos[1],]); t_point.set_ydata(pos[0,])\n #t_line.figure.canvas.draw()\n plt.pause(0.01)\n\n # calculate new state index\n state_idx_new = t_Param[\"ncol\"] * pos[0] + pos[1]\n #print(f\"[{pos[0]:>2d}, {pos[1]:2d}]\", end=\" \")\n # get environment; based on the new position, get reward\n env = t_Maze[pos[0], pos[1]]\n \n # if is turning back, punish\n if state_idx_new in path:\n R = -2\n # get reward from the Maze pixel value of the new state\n else:\n R = t_Rewards[ env ]\n\n # update Qtable\n try:\n t_Qtable[state_idx,choice] = (1-Param[\"alpha\"]) * t_Qtable[state_idx,choice] + \\\n Param[\"alpha\"] * (R + Param[\"gamma\"] * t_Qtable[state_idx_new, :].max())\n except IndexError:\n print(pos[0],pos[1])\n break\n\n # whether game over\n if env != 3:\n break\n\n step = k+1\n \n # if reach maximum nStep, set env to 4\n if step == t_Param[\"nStep_Max\"]:\n env = 4\n\n return env, step, tuple(pos)", "def _simulation_step(self, ctrl=None):\n # Default zero torque control\n if ctrl is None:\n ctrl = np.zeros(self.n_joints+1)\n\n # Advance simulation\n self.t += self.dt\n while(abs(self.env.sim.data.time - self.t) > self.t_epsilon):\n env_observation = self.env.step(ctrl)\n\n # Record trace\n self.trace['env_observations'].append(env_observation)\n self.trace['env_actions'].append(ctrl)\n\n # Render\n if self.rendering:\n self.env.render()\n\n self.rendering = False\n\n # Sync dynamic model\n self._sync_dynmodel()\n\n # Return parsed observation\n p, r = self._forward_kinematics(self.end_effector_name)\n return np.concatenate([p, r.elements, env_observation])", "def jump(self):\n self.vy = -9", "def new_task(self):\n self.true_trajectory = self.simulate()\n self.x0 = self.true_trajectory[0]\n self.xT = self.true_trajectory[-1]\n return self.reset()", "def shannon_parry_markov_chain(self):\n from sage.modules.free_module_element import vector\n if not self.is_deterministic():\n raise NotImplementedError(\"Automaton must be deterministic.\")\n if not self.digraph().is_aperiodic():\n raise NotImplementedError(\"Automaton must be aperiodic.\")\n if not self.digraph().is_strongly_connected():\n raise NotImplementedError(\"Automaton must be strongly connected.\")\n if not all(s.is_final for s in self.iter_states()):\n raise NotImplementedError(\"All states must be final.\")\n from sage.rings.integer_ring import ZZ\n M = self.adjacency_matrix().change_ring(ZZ)\n states = {state: i for i, state in enumerate(self.iter_states())}\n w_all = sorted(M.eigenvectors_right(),\n key=lambda x: abs(x[0]),\n reverse=True)\n w = w_all[0][1][0]\n mu = w_all[0][0]\n u_all = sorted(M.eigenvectors_left(),\n key=lambda x: abs(x[0]),\n reverse=True)\n u = u_all[0][1][0]\n u = 1/(u*w) * u\n final = vector(int(s.is_final) for s in self.iter_states())\n ff = u*final\n\n assert u*w == 1\n P = Transducer(initial_states=[s.label() for s in self.iter_initial_states()],\n final_states=[s.label() for s in self.iter_final_states()],\n on_duplicate_transition=duplicate_transition_add_input)\n for t in self.iter_transitions():\n P.add_transition(t.from_state.label(),\n t.to_state.label(),\n w[states[t.to_state]]/w[states[t.from_state]]/mu,\n t.word_in)\n for s in self.iter_states():\n P.state(s.label()).color = 1/(w[states[s]] * ff)\n P.state(s.label()).initial_probability = w[states[s]] * u[states[s]]\n return P", "def preview_trajectory(self, state, remain_timestep, vis=False):\n print('in preview trajectory')\n state_origin = copy.deepcopy(state)\n sim_state = [state[0][0].copy(), state[0][1]] \n\n joints = get_joints(self.joint_listener)\n ef_pose = get_ef_pose(self.pose_listener)\n ef_pose_origin = ef_pose.copy()\n joint_plan = [joints]\n ef_pose_plan = [ef_pose]\n\n for episode_steps in range(remain_timestep):\n state[0] = sim_state\n gaddpg_input_state = select_target_point(state)\n step = min(max(remain_timestep - episode_steps, 1), 25)\n action, _, _, aux_pred = agent.select_action(gaddpg_input_state, remain_timestep=step)\n action_pose = unpack_action(action)\n ef_pose = ef_pose.dot(action_pose)\n joints = solve_ik(joints, pack_pose(ef_pose))\n joint_plan.append(joints)\n ef_pose_plan.append(ef_pose)\n sim_next_point_state = se3_transform_pc(se3_inverse(action_pose), sim_state[0]) \n sim_state[0] = sim_next_point_state\n\n if vis:\n # vis entire traj. Might be useful\n poses_ = robot.forward_kinematics_parallel(\n wrap_value(joint_plan[0])[None], offset=True)[0]\n poses = [pack_pose(pose) for pose in poses_]\n line_starts, line_ends = grasp_gripper_lines(np.array(ef_pose_plan))\n points = state_origin[0][0]\n points = se3_transform_pc(ef_pose_origin, points)\n point_color = get_point_color(points)\n rgb = self.planner.planner_scene.renderer.vis(poses, list(range(10)), \n shifted_pose=np.eye(4),\n interact=2,\n V=np.array(V),\n visualize_context={\n \"white_bg\": True,\n \"project_point\": [points],\n \"project_color\": [point_color],\n \"static_buffer\": True,\n \"reset_line_point\": True,\n \"thickness\": [2],\n \"line\": [(line_starts[0], line_ends[0])],\n \"line_color\": [[255, 0, 0]], \n }\n )\n\n num = len(joint_plan)\n traj = np.zeros((num, 9), dtype=np.float32)\n for i in range(num):\n traj[i, :] = joint_plan[i]\n return traj", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def trajectory(self):\n traj = np.zeros((2, self.times.size))\n for t, time in enumerate(self.times):\n traj[:, t] = self.center_of_mass(time)\n return traj", "def ik_point(self, start_joints, target_position, n_steps=40, link_name=None):\n link_name = link_name if link_name is not None else self.tool_frame\n \n assert len(start_joints) == len(self.joint_indices)\n self.sim.update()\n \n # set active manipulator and start joint positions\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n \n request = {\n \"basic_info\" : {\n \"n_steps\" : n_steps,\n \"manip\" : str(self.manip.GetName()), \n \"start_fixed\" : True \n },\n \"costs\" : [\n {\n \"type\" : \"joint_vel\",\n \"params\": {\"coeffs\" : [1]} \n },\n ],\n \"constraints\" : [\n {\n \"type\" : \"pose\",\n \"name\" : \"target_pose\",\n \"params\" : {\"xyz\" : list(target_position), \n \"wxyz\" : [0,0,0,1],\n \"link\": link_name,\n \"rot_coeffs\" : [0,0,0],\n \"pos_coeffs\" : [1,1,1]\n }\n \n },\n ],\n \"init_info\" : {\n \"type\" : \"stationary\",\n },\n }\n \n # convert dictionary into json-formatted string\n s = json.dumps(request) \n # create object that stores optimization problem\n prob = trajoptpy.ConstructProblem(s, self.sim.env)\n \n tool_link = self.robot.GetLink(self.tool_frame)\n def penalize_low_height(x):\n self.robot.SetDOFValues(x, self.joint_indices, False)\n z = tool_link.GetTransform()[2,3]\n return max(0, 10.0 - z)\n\n for t in xrange(n_steps-2):\n prob.AddErrorCost(penalize_low_height, [(t,j) for j in xrange(len(self.joint_indices))], \"ABS\", \"PENALIZE_LOW_HEIGHT_%i\"%t)\n \n # do optimization\n result = trajoptpy.OptimizeProblem(prob)\n \n return result.GetTraj()", "def _perform_landing(self):\n self.y += self.settings.mario_jump_speed\n if self.y >= self.settings.mario_y_pos:\n self.y = self.settings.mario_y_pos\n self.jumping = 0\n self.is_currently_jumping = False", "def move_me_on_spawn(self):\r\n\t\tif self.points_to_go:\r\n\t\t\tself.start_pos = self.points_to_go[0]\r\n\t\t\tfor point in self.points_to_go[1:]:\r\n\t\t\t\tfor i in range(len(self.points_to_go[1:])):\r\n\t\t\t\t\tself.goal_pos = self.points_to_go[i]\r\n\t\t\t\t\t\r\n\t\t\t\t\tself.move_me()\r\n\t\t\t\t\t#self.start_pos = \r\n\t\t\t\t\t#print(self.goal_pos)\r\n\t\t\t\t\t#if self.move_me():\r\n\t\t\t\t\t#\ti += 1\r\n\t\t\t\t\t#\tprint('switch')\r", "def _prediction_step(self, time_diff_s):\n # x = A * x + B\n heading_r = math.radians(self.estimated_heading())\n from control.telemetry import Telemetry\n x_delta, y_delta = Telemetry.rotate_radians_clockwise(\n (0.0, time_diff_s),\n heading_r\n )\n speed_m_s = self.estimated_speed()\n transition = numpy.matrix([ # A\n [1.0, 0.0, 0.0, x_delta],\n [0.0, 1.0, 0.0, y_delta],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]\n ])\n\n # Update heading estimate based on steering\n new_heading = Telemetry.wrap_degrees(\n self.estimated_heading()\n + self._estimated_turn_rate_d_s * time_diff_s\n )\n self._estimates.itemset(2, new_heading)\n\n # TODO: Add acceleration values\n\n self._estimates = transition * self._estimates\n return transition", "def jump_process(phi, \n dt, \n n, \n m):\n \n if n != m:\n phi_new = np.zeros(len(phi), dtype = complex) \n \n phi_new[n] = np.copy(phi[m]/abs(phi[m]))\n \n else:\n phi_new = np.zeros(len(phi), dtype = complex) \n \n phi_new[n] = np.copy(phi[m]/abs(phi[m]))*np.exp( 1.0j * np.pi * 0.5 )\n\n return phi_new", "def writeJump(self):\n return self._writeMessage(3, [], 'writeJump')", "def jump(self):\n if (self.falling or self.rising) and self.doubleJump:\n self.speed_y = -20 # //////Aquí se cambia la velocidad incial cuando se salta//////\n self.fallin = False\n self.rising = True\n self.doubleJump = False\n\n if not self.falling and not self.rising:\n self.speed_y = -20 # //////Aquí se cambia la velocidad incial cuando se salta//////\n self.rising = True", "def cli_markov():\n\n info = 'Creates a transition matrix from a linkograph.'\n\n parser = argparse.ArgumentParser(description=info)\n\n parser.add_argument('linkograph', metavar='LINKOGRAPH.json',\n nargs=1,\n help='The linkograph')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='Prints the result to a file.')\n\n parser.add_argument('-m', '--method',\n help='The method used to create the model.')\n\n parser.add_argument('-f', '--forelinks', action='store_true',\n help='Use forelinks')\n\n parser.add_argument('-b', '--backlinks', action='store_true',\n help='Use backlinks')\n\n parser.add_argument('-d', '--dot', action='store_true',\n help='Create dot file.')\n\n parser.add_argument('-l', '--latex', action='store_true',\n help='Create latex file.')\n\n parser.add_argument('-t', '--transition', action='store_true',\n help='Use transition matrix')\n\n parser.add_argument('-p', '--precision', type=int,\n help='Number of digits retained.')\n\n args = parser.parse_args()\n\n linkNum = 1 # For forelinks.\n\n if args.backlinks:\n linkNum = 0 # For backlinks.\n\n if args.precision is None:\n args.precision = 2\n\n if args.method is None:\n args.method = 'link_predictor'\n\n linko = linkoCreate.readLinkoJson(args.linkograph[0])\n\n markovChain = createMarkov(linko, linkNum=linkNum,\n method = args.method,\n precision=args.precision)\n\n if args.out is not None:\n fh = open(args.out, 'w')\n else:\n fh = sys.stdout\n\n if args.transition:\n fh.write(str(linko.labels))\n fh.write('\\n')\n fh.write(str(markovChain))\n fh.write('\\n')\n elif args.latex:\n latexString = markovToLatex(markovChain, linko.labels,\n args.precision)\n fh.write(latexString)\n else:\n # markovToDot(markovChain, linko.labels, fh,\n # args.precision)\n\n dotString = markovToDot(markovChain, linko.labels,\n args.precision)\n fh.write(dotString)\n\n fh.close()", "def step(self):\n if self.iteration % self.print_interval == 0:\n print('Iteration: {:} (Time: {:.4f})'.format(self.iteration, time.time() - self.clock))\n self.clock = time.time()\n\n #if self.iteration % self.save_interval == 0:\n # print('Saving network to {:}'.format(self.save_dir))\n # self.network.save(self.save_dir)\n\n # Choose action based on output neuron spiking.\n # need inserting to spike_record\n a = self.action_function(self, output=self.output)\n # convert number into action_name\n self.action_name = self.env.subject.action_list[a]\n\n # Run a step of the environment.\n events, self.reward, self.done, info = self.env.step(action=self.action_name)\n\n # reward accumulation\n self.accumulated_reward += self.reward\n\n # currently image-based learning is adopted (Future work : spike-based)\n events_img = events_to_image(events, self.env.render_width, self.env.render_height)\n self.obs = torch.from_numpy(cv2.cvtColor(events_img, cv2.COLOR_BGR2GRAY)).float()/255.0\n\n # Encode the observation using given encoding function.\n for inpt in self.encoded:\n self.encoded[inpt] = self.encoding(self.obs, time=self.time, dt=self.network.dt)\n\n # Run the network on the spike train-encoded inputs.\n self.network.run(inpts=self.encoded, time=self.time, reward=self.reward)\n self.set_spike_data() # insert into spike_record\n\n # Plot relevant data.\n if self.iteration % self.plot_interval == 0:\n self.plot_data()\n self.plot_obs()\n\n self.iteration += 1\n\n if self.done:\n self.iteration = 0\n self.episode += 1\n self.reward_list.append(self.accumulated_reward)\n self.accumulated_reward = 0\n self.plot_reward()", "def teleop_step(self):\n # get current state\n state = self.panda.state\n self.step_number += 1\n\n return_state = self.return_state()\n\n # read in from keyboard\n key_input = self.key.get_controller_state()\n dpos, dquat, grasp, reset = (\n key_input[\"dpos\"],\n key_input[\"dquat\"],\n key_input[\"grasp\"],\n key_input[\"reset\"],\n )\n action = dpos\n\n # action in this example is the end-effector velocity\n self.panda.step(dposition=dpos, dquaternion=dquat, grasp_open=not self.grasp)\n\n # take simulation step\n p.stepSimulation()\n\n # return next_state, reward, done, info\n next_state = self.panda.state\n return_next_state = self.return_state()\n reward, done = self.calculate_reward(next_state, action)\n print(f'step: {self.step_number}\\treward: {reward}\\tdone: {done}')\n if reset:\n done = True\n info = self.panda.state\n\n return return_state, action, reward, return_next_state, done, info", "def __step(self, p):\n action = self.__action(p)\n temp_state = self.state\n\n if self.state == 0:\n if action == 1:\n self.state += 1\n elif self.state == 1:\n if action == 1:\n self.state -= 1\n else:\n self.state += 1\n else:\n if action == 1:\n self.state += 1\n else:\n self.state -= 1\n \n self.trajectory.append([temp_state, action, self.__reward(self.state)])", "def execute_tp(self):\n self.status_message = \"State: Execute TP- Executing Motion Plan with trajectory planner\"\n self.current_state = \"execute_tp\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"tp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n self.tp.set_initial_wp()\n self.tp.set_final_wp(full_wp)\n\n if self.next_state == \"estop\":\n break\n # TODO: Set the positions and break if estop is needed\n self.tp.go()\n # self.rexarm.set_positions(wp)\n # time.sleep(1.5)", "def traj_pipeline(self, prev_trmat=None):\n # image_seq = [image(frame_idx-2), image(frame_idx-1), image(frame_idx)]\n # egomotion update\n egomo = self.est.get_egomotion(self.image_seq)\n\n # egomotion transformation\n assert self.frame_idx >= 2, 'invalid self.frame_idx'\n if prev_trmat is None:\n assert self.frame_idx == 2, 'invalid self.frame_idx'\n # initialization of ego transformation matrix\n init_trmat = egomo_vec2mat(self.init_egomo_vec)\n prev_trmat = np.matmul(init_trmat, egomo_vec2mat(egomo[0])) # frame 0 to 1\n egomo_trmat = np.matmul(prev_trmat, egomo_vec2mat(egomo[1]))\n\n # tracker list update\n for t in self.t_list:\n # skip lost trackers\n if t.get_status()==False:\n continue\n # bounding box & depth\n bbox, depth = t.get_bbox(), t.get_depth()\n # project to 3d camera coordinate\n p3d_cam = cam_proj(self.k_mat, bbox, depth)\n # transform to world coordinate\n p3d = coord_transform(egomo_trmat, p3d_cam)\n t.add_attr_to_est_dict('traj', p3d)\n \n return egomo_trmat", "def step(self, move):", "def perform_step(self) -> None:\n self.n_it = self.n_it + 1\n self.update_learning_rate()\n observed_gradient = self.get_observed_gradient(self.theta)\n latent_gradient = self.compute_latent_gradient(observed_gradient)\n eta = np.random.normal(0.0, np.sqrt(self.epsilon), self.dim_latent)\n self.omega = self.omega + self.epsilon / 2.0 * latent_gradient + eta\n self.theta = self.gplvm_model.predict(self.omega)[0]\n return", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCR_HEIGHT:\n self.change_y = -8", "def plot_trajectory(\n wall,\n reward,\n start,\n agent,\n fig,\n ax,\n arrow_width=0.5,\n EPISODE_LENGTH=35,\n animate=False,\n fname=None,\n):\n from agent_runner import run_agent\n from gridworld.gridworld import GridworldMdp\n from mdp_interface import Mdp\n\n mdp = GridworldMdp.from_numpy_input(wall, reward, start)\n\n agent.set_mdp(mdp)\n env = Mdp(mdp)\n trajectory = run_agent(agent, env, episode_length=EPISODE_LENGTH, determinism=True)\n\n if len(trajectory) <= 1:\n raise ValueError(\"Trajectory rolled out unsuccessfully\")\n\n # Tuples of (state, next) - to be used for plotting\n state_trans = [(info[0], info[2]) for info in trajectory]\n count = 0\n for trans in state_trans:\n if trans[0] == trans[1]:\n count += 1\n if count == len(state_trans):\n print(\n \"Yes, the agent given stayed in the same spot for {} iterations...\".format(\n len(state_trans)\n )\n )\n\n if fig is None or ax is None:\n fig, ax = plt.subplots(1, 1)\n if ax is not None and type(ax) is list:\n raise ValueError(\"Given {} axes, but can only use 1 axis\".format(len(ax)))\n\n # Plot starting point\n plot_pos(start, ax=ax, color=\"k\", marker=\"o\", grid_size=len(wall))\n # Plot ending trajectory point\n finish = state_trans[-1][0]\n plot_pos(finish, ax=ax, color=\"k\", marker=\"*\", grid_size=len(wall))\n plot_lines(\n ax,\n fig,\n trans_list=state_trans,\n color=\"black\",\n arrow_width=arrow_width,\n grid_size=len(wall),\n animate=animate,\n fname=fname,\n )\n ax.set_xticks([])\n ax.set_yticks([])\n return fig, ax", "def get_jumps(self):\n if self.jumps is not None:\n return self.jumps\n\n if self.tw is None:\n self.tw = self.get_possible_tw()\n\n assert np.all(self.tw > 0), 'transition frequencies must be greater than zero'\n\n # sort transition frequencies to get a sorted\n # list of jumps later on.\n self.tw = sorted(self.tw)\n\n # create list of jumps and distribute by degeneracy\n # of the jump.\n jumps = [None for _ in range(self.h0.shape[0])]\n for w in self.tw:\n tr = self.get_transitions(w)\n i = len(tr) - 1\n jumps[i] = tr if jumps[i] is None else np.concatenate((jumps[i], tr))\n\n self.jumps = jumps\n return self.jumps", "def teleop_step(self):\n # get current state\n state = self.panda.state\n self.step_number += 1\n\n return_state = self.return_state()\n\n # read in from keyboard\n key_input = self.key.get_controller_state()\n dpos, dquat, grasp, reset = (\n key_input[\"dpos\"],\n key_input[\"dquat\"],\n key_input[\"grasp\"],\n key_input[\"reset\"],\n )\n action = dpos\n self.close_gripper(state)\n # action[0:3] = dpos\n\n # action in this example is the end-effector velocity\n self.panda.step(dposition=dpos, dquaternion=dquat, grasp_open=not self.grasp)\n\n # take simulation step\n p.stepSimulation()\n\n # return next_state, reward, done, info\n next_state = self.panda.state\n return_next_state = self.return_state()\n reward, done = self.calculate_reward(next_state, action)\n print(f'step: {self.step_number}\\treward: {reward}\\tdone: {done}')\n if reset:\n done = True\n info = self.panda.state\n\n # self.grasp = grasp\n return return_state, action, reward, return_next_state, done, info", "def build(self, trajectory):\n pass", "def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y", "def motion_model(particle_poses, speed_command, odom_pose, odom_pose_prev, dt):\n \n M = particle_poses.shape[0]\n \n # TODO. For each particle calculate its predicted pose plus some\n # additive error to represent the process noise. With this demo\n # code, the particles move in the -y direction with some Gaussian\n # additive noise in the x direction. Hint, to start with do not\n # add much noise.\n\n #time is in ns 1e-9\n dt = dt * 1e-9\n \n if dt ==0:\n return particle_poses\n\n for m in range(M):\n\n theta = particle_poses[m, 2]\n\n v = speed_command[0]\n omega = speed_command[1]\n \n if motion_model_velocity: #Velocity\n\n if omega == 0: #straight\n vel_dx = v * cos(theta) * dt\n vel_dy = v * sin(theta) * dt\n vel_dtheta = 0\n\n else:\n vel_dx = -v / omega * sin(theta) + v / omega * sin(theta + omega * dt)\n vel_dy = v / omega * cos(theta) - v / omega * cos(theta + omega * dt)\n vel_dtheta = omega * dt\n \n\n\n if motion_model_odom:\n odom_mov = rev_odm(odom_pose, odom_pose_prev)\n\n #particle_poses[m] = fwd_odm(particle_poses[m], odom_mov)\n\n #odom_dpose = fwd_odm2(particle_poses[m], odom_mov)\n (odom_dx, odom_dy, odom_dtheta) = fwd_odm2(particle_poses[m], odom_mov)\n\n\n\n\n #fusion\n w = motion_weighting\n dx = w * odom_dx + (1-w) * vel_dx\n dy = w * odom_dy + (1-w) * vel_dy\n dtheta = w * odom_dtheta + (1-w) * vel_dtheta\n \n \n\n \n \n #process noise\n if motion_model_noise:\n noise_x= np.random.normal(0, motion_sigma_x)\n noise_y= np.random.normal(0, motion_sigma_y)\n noise_theta= np.random.normal(0, motion_sigma_theta)\n \n #local noise\n if motion_model_noise_alt:\n localnoise_x = np.random.normal(0, motion_sigma_x)\n localnoise_y = np.random.normal(0, motion_sigma_y)\n\n noise_x = localnoise_x * cos(theta) - localnoise_y * sin(theta)\n noise_y = localnoise_y * sin(theta) + localnoise_y * cos(theta)\n noise_theta = np.random.normal(0, motion_sigma_theta)\n\n\n\n particle_poses[m, 0] += dx + noise_x\n particle_poses[m, 1] += dy + noise_y\n particle_poses[m, 2] = wraptopi(theta + dtheta + noise_theta)\n\n \n return particle_poses", "def do_impact(self, car):\r\n\r\n if car is not None:\r\n if self.head_of_jump is None:\r\n self.head_of_jump = Jump()\r\n else:\r\n jj = Jump()\r\n jj.next = self.head_of_jump\r\n self.head_of_jump = jj\r\n\r\n # self.current_jump = self.head_of_jump\r", "def visualizeTrajectory(y, g):\n visualizeObs()\n x = np.linspace(-1.5, 1.5, 13)[1:-1]\n plt.plot(np.concatenate(([-1.5],x,[1.5])), np.concatenate(([0],y,[0])), color='black', marker='+')\n if g is not None:\n for i in range(y.size):\n plt.arrow(x[i], y[i], 0, -0.5*g[i], color='blue', head_width=0.05)", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0: #or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def get_grasp_joint_trajectory(self, start_joints, target_pose, n_steps=40, ignore_orientation=False, link_name=None):\n link_name = link_name if link_name is not None else self.tool_frame\n \n assert len(start_joints) == len(self.joint_indices)\n assert target_pose.frame.count('base_link') == 1\n self.sim.update()\n \n # set active manipulator and start joint positions\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n \n # initialize trajopt inputs\n rave_pose = tfx.pose(self.sim.transform_from_to(target_pose.matrix, target_pose.frame, 'world'))\n quat = rave_pose.orientation\n xyz = rave_pose.position\n quat_target = [quat.w, quat.x, quat.y, quat.z]\n xyz_target = [xyz.x, xyz.y, xyz.z]\n rave_mat = rave.matrixFromPose(np.r_[quat_target, xyz_target])\n \n# init_joint_target = None\n init_joint_target = self.sim.ik_for_link(rave_pose.matrix, self.manip, link_name, 0)\n if init_joint_target is not None:\n init_joint_target = self._closer_joint_angles(init_joint_target, start_joints)\n \n init_traj = self.ik_point(start_joints, xyz, n_steps=n_steps, link_name=link_name)\n \n request = self._get_grasp_trajopt_request(xyz_target, quat_target, n_steps,\n ignore_orientation=ignore_orientation, link_name=link_name, init_traj=init_traj)\n \n # convert dictionary into json-formatted string\n s = json.dumps(request) \n # create object that stores optimization problem\n prob = trajoptpy.ConstructProblem(s, self.sim.env)\n \n # TODO: worth doing?\n# tool_link = self.robot.GetLink(link_name)\n# def point_at(x):\n# self.robot.SetDOFValues(x, self.joint_indices, False)\n# T = tool_link.GetTransform()\n# local_dir = xyz.array - T[:3,3]\n# return T[1:3,:3].dot(local_dir)\n# \n# for t in xrange(int(0.8*n_steps), n_steps-1):\n# #prob.AddConstraint(point_at, [(t,j) for j in xrange(len(self.joint_indices))], \"EQ\", \"POINT_AT_%i\"%t)\n# prob.AddErrorCost(point_at, [(t,j) for j in xrange(len(self.joint_indices))], \"ABS\", \"POINT_AT_%i\"%t)\n\n # do optimization\n result = trajoptpy.OptimizeProblem(prob)\n \n prob.SetRobotActiveDOFs() # set robot DOFs to DOFs in optimization problem\n #num_upsampled_collisions = len(traj_collisions(result.GetTraj(), self.robot, n=100))\n num_upsampled_collisions = self._num_collisions(result.GetTraj())\n print('Number of collisions: {0}'.format(num_upsampled_collisions))\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n if num_upsampled_collisions > 2:\n #if not traj_is_safe(result.GetTraj()[:], self.robot): # Check that trajectory is collision free\n return None\n else:\n return result.GetTraj()", "def move():\n # step 1 of task analysis: get data\n data = get_data('MovementData/Walking_02.txt')\n # step 2: get the initial orientation of the sensor\n sensor_orientation = get_init_orientation_sensor(data.acc[0])\n # step 3: get the vector of the right horizontal semi-circular canal's on-direction\n rhscc_init_on_dir = get_init_on_dir_rh_scc(15)\n # preparation for step 4: align the angular velocity sensor data with the global coordinate system\n angular_velocities_aligned_globally = align_sensor_data_globally(data.omega, sensor_orientation)\n # step 4: calculate the stimulation of the cupula\n stimuli = get_scc_stimulation(angular_velocities_aligned_globally, rhscc_init_on_dir)\n # step 5: get the transfer function of the scc with the dynamics provided in the lecture\n scc_trans_fun = get_scc_transfer_fun(0.01, 5)\n # step 6: get the cupular deflection\n max_cupular_deflection = calculate_max_cupular_deflection(scc_trans_fun, stimuli, data.rate)\n # preparation for step 7: align the acceleration sensor data with the global coordinate system\n accelerations_aligned_globally = align_sensor_data_globally(data.acc, sensor_orientation)\n # step 8: calculate the maxmimum left- and rightwards stimulation of the otolithic organ\n max_left_right_stimuli = calculate_otolithic_max_stimuli(accelerations_aligned_globally, 1)\n # step 9: calculate the head orientation\n head_orientations = calculate_head_orientation(angular_velocities_aligned_globally, data.rate)\n\n return max_cupular_deflection, max_left_right_stimuli, head_orientations", "def generate_motion_patters(self):\n\n\t\t# Motion primimtives for the forward direction.....................\n\t\td_del = 0.08\t\n\t\tdt = self.dt\n\t\tv = 2\t# Assuming a constant longitudinal velocity\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns in forward directon: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\t\t\t\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp = np.vstack((p,np.array([x0, y0, theta0])))\n\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)\n\n\t\t\n\t\t# Motion primitives for the backward direction ...................\n\t\td_del = 0.1\n\t\tv = -1.2\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns for the backward direction: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp=np.vstack((p, np.array([x0, y0, theta0])))\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)", "def newmark(self, excitation, dt, exc_vec=None, a_ini=None, v_ini=None, u_ini=None, beta=0.5, gamma=0.25):\n self.dt = dt\n\n \"\"\"If no instructions are given for the initial conditions, they are assumed to be 0\"\"\"\n if exc_vec is None:\n exc_vec = np.diag(self.M)\n if a_ini is None:\n a_ini = np.zeros(len(self.M))\n if v_ini is None:\n v_ini = np.zeros(len(self.M))\n if u_ini is None:\n u_ini = np.zeros(len(self.M))\n\n \"\"\"Setting the initial conditions\"\"\"\n a = []\n v = []\n u = []\n a.append(a_ini)\n v.append(v_ini)\n u.append(u_ini)\n time = [0.]\n\n \"\"\"Inversion of the dynamic stiffness matrix\"\"\"\n dynamic_stiffness = np.linalg.inv((self.M + self.C + self.K * dt ** 2 / 4))\n\n \"\"\"Time integration\"\"\"\n for i in range(max(np.shape(excitation))):\n a.append(\n dynamic_stiffness.dot(- self.C.dot(v[i] + dt * beta * a[i])\n - self.K.dot(u[i] + dt * v[i] + dt ** 2 * gamma * a[i])\n + (exc_vec * excitation[i]))\n )\n v.append(\n v[i] + (a[i] + a[i + 1]) * dt * beta\n )\n u.append(\n u[i] + v[i] * dt + (a[i] + a[i + 1]) * dt ** 2 * gamma\n )\n\n time.append(dt * i)\n\n self.acceleration = list(map(list, zip(*a)))\n self.velocity = list(map(list, zip(*v)))\n self.displacement = list(map(list, zip(*u)))\n self.time = time\n return self.acceleration, self.velocity, self.displacement, self.time", "def write_goto(output_file, command, label, curr_function):\n if command == \"if-goto\":\n output_file.write(\"@SP\" + \"\\n\" +\n \"AM = M - 1\" + \"\\n\" +\n \"D = M\" + \"\\n\" +\n \"@\" + curr_function[0] + \"$\" + label + \"\\n\" +\n \"D; JNE\" + \"\\n\")\n else:\n output_file.write(\"@\" + curr_function[0] + \"$\" + label + \"\\n\" +\n \"0; JMP\" + \"\\n\")", "def seek(self, cutoff):\n while self.step_num < cutoff and self.op_state == Turing_Machine.RUNNING:\n \"\"\"Perform an atomic transition or chain step.\"\"\"\n if self.op_state != Turing_Machine.RUNNING:\n continue\n if self.end_time and time.time() >= self.end_time:\n self.op_state = Turing_Machine.TIME_OUT\n continue\n\n if self.compute_steps:\n self.old_step_num = self.step_num\n # Note: We increment the number of loops early to take care of all the\n # places step() could early-return.\n self.num_loops += 1\n\n # Get current symbol\n cur_symbol = self.tape.get_top_symbol()\n\n # Lookup TM transition rule\n cond, (symbol2write, next_state, next_dir), num_steps = \\\n self.machine.get_transition(cur_symbol, self.state, self.dir)\n\n # Test condition\n self.op_state = cond[0]\n self.op_details = cond[1:]\n\n # Apply transition\n # Chain move\n self.tape.apply_single_move(symbol2write, next_dir)\n self.state = next_state\n self.dir = next_dir\n self.num_macro_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_macro += num_steps\n if self.op_state == Turing_Machine.INF_REPEAT:\n self.inf_reason = REPEAT_IN_PLACE\n\n if self.op_state != Turing_Machine.UNDEFINED:\n self.verbose_print()", "def _animation_step(self, par_dict):\n\n t0 = time.time()\n dt = par_dict[\"dt\"]\n controller = par_dict[\"controller\"]\n integrator = par_dict[\"integrator\"]\n if controller is not None:\n _, _, tau = controller.get_control_output(\n meas_pos=self.x[:self.plant.dof],\n meas_vel=self.x[self.plant.dof:],\n meas_tau=np.zeros(self.plant.dof),\n meas_time=self.t)\n else:\n tau = np.zeros(self.plant.n_actuators)\n self.step(tau, dt, integrator=integrator)\n ee_pos = self.plant.forward_kinematics(self.x[:self.plant.dof])\n ee_pos.insert(0, self.plant.base)\n ani_plot_counter = 0\n for link in range(self.plant.n_links):\n self.animation_plots[ani_plot_counter].set_data(\n [ee_pos[link][0], ee_pos[link+1][0]],\n [ee_pos[link][1], ee_pos[link+1][1]])\n ani_plot_counter += 1\n self.animation_plots[ani_plot_counter].set_data(ee_pos[link+1][0],\n ee_pos[link+1][1])\n ani_plot_counter += 1\n\n set_arrow_properties(self.tau_arrowarcs[link],\n self.tau_arrowheads[link],\n float(np.squeeze(tau)),\n ee_pos[link][0],\n ee_pos[link][1])\n t = float(self.animation_plots[ani_plot_counter].get_text()[4:])\n t = round(t+dt, 3)\n self.animation_plots[ani_plot_counter].set_text(f\"t = {t}\")\n\n # if the animation runs slower than real time\n # the time display will be red\n if time.time() - t0 > dt:\n self.animation_plots[ani_plot_counter].set_color(\"red\")\n else:\n self.animation_plots[ani_plot_counter].set_color(\"black\")\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def sample_trajectory(self, env):\n obs_s = []\n init_obs = env.reset()\n obs_s.append(init_obs)\n total_re = 0\n\n def padding(seq):\n if len(seq) < self.lookback:\n len_to_pad = self.lookback - len(seq)\n pad = [np.zeros_like(init_obs)] * len_to_pad\n seq = pad + seq\n return seq\n\n while True:\n obs_seq = np.asarray(padding(obs_s[-self.lookback:]))\n ac = self.get_action(obs_seq)\n new_obs, re, done, _ = env.step(ac)\n obs_s.append(new_obs)\n new_obs_seq = np.asarray(padding(obs_s[-self.lookback:]))\n total_re += re\n self.replay_buffer.add(obs_seq, ac, re, new_obs_seq, done)\n if done:\n break\n self.t += 1\n return total_re", "def dynamicMoon(moonPosX, moonPosY, velocityXMoon, velocityYMoon, h):\r\n kPosMoon = [[0 for x in range(4)] for y in range(2)] # initialising the 2x2 k matricies\r\n kVMoon = [[0 for x in range(4)] for y in range(2)]\r\n \r\n kPosMoon[0][0] = velocityXMoon # this value is k1 for the x position. It is just the velocity of the rocket at its current position.\r\n kPosMoon[1][0] = velocityYMoon #this value is k1 for the y position\r\n kVMoon[0][0] = accelerationCalcX(moonPosX, moonPosY) #this value is k1 for the x velocity. At its current position what is the acceleration of the projectile\r\n kVMoon[1][0] = accelerationCalcY(moonPosX, moonPosY) # this value is k1 for the y velocity\r\n \r\n #k2s\r\n kPosMoon[0][1] = velocityXMoon + h*kVMoon[0][0]/2 #what would its velocity be if it carried on at its initial acceleration (calculated in k1 for x velocity) for half a time step\r\n kPosMoon[1][1] = velocityYMoon + h*kVMoon[1][0]/2\r\n kVMoon[0][1] = accelerationCalcX(moonPosX + h*kPosMoon[0][0]/2, moonPosY + h*kPosMoon[1][0]/2) # if it continued at the velocity in k2 for x position for half a time step what would the acceleration on the projectile be.\r\n kVMoon[1][1] = accelerationCalcY(moonPosX + h*kPosMoon[0][0]/2, moonPosY + h*kPosMoon[1][0]/2)\r\n \r\n #k3s\r\n kPosMoon[0][2] = velocityXMoon + h*kVMoon[0][1]/2 # if it carried on at the acceleration calculated for k2 in x velocity for half a time step, what would its velocity be\r\n kPosMoon[1][2] = velocityYMoon + h*kVMoon[1][1]/2\r\n kVMoon[0][2] = accelerationCalcX(moonPosX + h*kPosMoon[0][1]/2, moonPosY + h*kPosMoon[1][1]/2) # if carried on at the velocity calculated in k2 for half a time step then what would its accelaration be\r\n kVMoon[1][2] = accelerationCalcY(moonPosX + h*kPosMoon[0][1]/2, moonPosY + h*kPosMoon[1][1]/2)\r\n \r\n #k4s\r\n kPosMoon[0][3] = velocityXMoon + h*kVMoon[0][2] # if it carried on at the acceleration calcualted in k3 fro a whole timestep, then what would its velocity be \r\n kPosMoon[1][3] = velocityYMoon + h*kVMoon[1][2]\r\n kVMoon[0][3] = accelerationCalcX(moonPosX + h*kPosMoon[0][2], moonPosY + h*kPosMoon[1][2]) #if it continued at the velocity calculated in k3 for a whole time step, then what would its accelaration be\r\n kVMoon[1][3] = accelerationCalcY(moonPosX + h*kPosMoon[0][2], moonPosY + h*kPosMoon[1][2])\r\n \r\n velocityXMoon = velocityXMoon+(h/6)*(kVMoon[0][0]+2*kVMoon[0][1]+2*kVMoon[0][2]+kVMoon[0][3]) # the velocity in x is appended, after combining the ks for velocity in x\r\n velocityYMoon = velocityYMoon+(h/6)*(kVMoon[1][0]+2*kVMoon[1][1]+2*kVMoon[1][2]+kVMoon[1][3]) # the velocity in y is appended, after combining the ks for velocity in y\r\n moonPosX = moonPosX+(h/6)*(kPosMoon[0][0]+2*kPosMoon[0][1]+2*kPosMoon[0][2]+kPosMoon[0][3]) # the x position is appended, after combinging the ks for x position\r\n moonPosY = moonPosY+(h/6)*(kPosMoon[1][0]+2*kPosMoon[1][1]+2*kPosMoon[1][2]+kPosMoon[1][3]) # the y position is appended, after combinging the ks for y position\r\n \r\n return moonPosX, moonPosY, velocityXMoon, velocityYMoon # return the position and velocity back to moonPass\r", "def jump(self):\n \n # move down and see if there's a platform below us.\n # Move down 2 pixels because it doesn't work well if you only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set the speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos", "def jump(act_line: int, offset: str):\n return act_line + int(offset)", "def deploy(model, env, timesteps=1000):\n\n obs = env.reset()\n reward_sum, length, successes, n_episodes = (0.0, 0, 0, 0)\n returns, returns_success, trajectories, trajectory = ([], [], [], [])\n\n for _ in range(timesteps):\n action, _ = model.predict(obs, deterministic=False)\n if isinstance(env, CMDP):\n obs, reward, g, done, info = env.step(action)\n else:\n obs, reward, done, info = env.step(action)\n reward_sum += reward\n length += 1\n trajectory.append(env.s)\n if done:\n success = info['next_state_type'] == 'G'\n successes += float(success)\n returns.append(reward_sum)\n if success:\n returns_success.append(reward_sum)\n length = 0\n reward_sum = 0.0\n n_episodes += 1\n obs = env.reset()\n trajectories.append(trajectory)\n trajectory = []\n if trajectory:\n trajectories.append(trajectory)\n if n_episodes == 0:\n n_episodes = 1\n returns.append(reward_sum)\n success_ratio = successes / n_episodes\n avg_return = np.mean(returns)\n avg_return_success = np.mean(returns_success)\n return success_ratio, avg_return, avg_return_success, trajectories", "def simulate_random_walk (G, damping, max_jumps):\n\n results = []\n nodes = [] # keep nodes\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n j = 0\n while (j < max_jumps):\n previous_node = current_node\n jump_decision = random.uniform(0, 1)\n\n if jump_decision < damping or G.out_degree(current_node) == 0:\n # make a jump\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n j += 1\n try:\n distance = nx.astar_path_length(G, previous_node, \\\n current_node, weight = 'weight')\n # distance intervals 1h traveling\n results.append(distance)\n nodes.append(previous_node)\n except nx.NetworkXNoPath: continue\n\n else:\n # move to neighbor node\n incident = G.out_edges([current_node], data = False)\n distribution = [ G.get_edge_data(e[0], e[1])['transition'] for e in incident ]\n xk = np.arange(len(incident))\n generator = stats.rv_discrete(values = (xk, distribution))\n current_node = incident[generator.rvs()][1]\n\n return results, nodes", "def jump(self):\n\n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(\n self, self.platforms, False)\n self.rect.y -= 2\n\n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= WIN_HEIGHT:\n self.change_y = -10", "def jump_diffusion_process(params: 'ModelParameters') -> 'np.array':\n s_n = time = 0\n small_lamda = -(1.0 / params.lamda)\n jump_sizes = []\n for _ in range(params.all_time):\n jump_sizes.append(0.0)\n while s_n < params.all_time:\n s_n += small_lamda * np.log(np.random.uniform(0, 1))\n for j in range(params.all_time):\n if time * params.all_delta <= s_n * params.all_delta <= (j + 1) * params.all_delta:\n jump_sizes[j] += random.normalvariate(params.jumps_mu, params.jumps_sigma)\n break\n time += 1\n return jump_sizes", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def path_ent(tmat, traj):\n traj = array(traj)\n jumps = zip(traj[:-1],traj[1:])\n jump_list = [item for item in (jumps)]\n\n ent = list() \n for jump in jump_list:\n prv = int(jump[0])\n nxt = int(jump[1])\n ent.append(log( tmat[prv, nxt]/tmat[nxt, prv] ))\n return list(ent)", "def build(self, trajectory):\n #TODO Implement?", "def trajectory(self, state, T=1, time_steps=200):\n\n state = state.to(device)\n t = torch.linspace(0, T, time_steps).to(device)\n\n # integrate and remove batch dim\n traj = self.model_of_dyn_system.trajectory(state, t)\n return traj.detach().cpu()[:, 0, :]", "def jump(self, xvel = 0, yvel = 0): #TODO: figure out how a monster's jumping ability is determined.\n self.xvel += xvel\n self.yvel -= yvel\n self.animation.iter()\n self.ai_count = 25 #TEMP\n self.onGround = False", "def gt_command(self):\n self.write(\n \"@SP\\nA=M-1\\nD=M\\n@NEG1\" + str(\n self.__label_num) + \"\\nD;JLT\\n@POS1\" + str(\n self.__label_num) +\n \"\\nD;JGE\\n(NEG1\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nA=A-1\\nD=M\\n@POS2\" + str(\n self.__label_num) + \"\\nD;JGT\\n@CONT\"\n + str(self.__label_num) + \"\\n0;JMP\\n(POS1\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nA=A-1\\nD=M\\n@NEG2\" +\n str(self.__label_num) + \"\\nD;JLT\\n@CONT\" + str(\n self.__label_num) + \"\\n0;JMP\\n(POS2\" + str(\n self.__label_num) + \")\\n@SP\"\n \"\\nA=M-1\\nA=A-1\\nM=-1\\n@SP\\nM=M-1\\n@ENDLABEL\" + str(\n self.__label_num) + \"\\n0;JMP\\n(NEG2\" + str(\n self.__label_num) + \")\\n@SP\" +\n \"\\nA=M-1\\nA=A-1\\nM=0\\n@SP\\nM=M-1\\n@ENDLABEL\" + str(\n self.__label_num) + \"\\n0;JMP\\n(CONT\" + str(\n self.__label_num) + \")\\n\"\n \"@SP\\nM=M-1\\nA=M\\nD=M\\n@SP\\nA=M-1\\nD=M-D\\n@TRUE\" + str(\n self.__label_num) + \"\\nD;JGT\\n@SP\\nA=M-1\\nM=0\\n@ENDLABEL\" +\n str(self.__label_num) + \"\\n0;JMP\\n(TRUE\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nM=-1\\n(ENDLABEL\" +\n str(self.__label_num) + \")\\n\")", "def perform_step(self) -> None:\n self.n_it = self.n_it + 1\n self.update_learning_rate()\n observed_gradient = self.get_observed_gradient(self.theta)\n latent_gradient = self.compute_latent_gradient(observed_gradient)\n g_omega = self.gplvm_model.predict_wishart_embedding(self.omega)[0]\n g_inv_omega = np.linalg.inv(g_omega)\n mu = np.dot(g_inv_omega, latent_gradient[0, :])\n epsilon_derivative = 1e-4\n for k in range(self.dim_latent):\n increment = np.copy(self.omega)\n increment[0, k] = increment[0, k] + epsilon_derivative\n g_derivative =\\\n (self.gplvm_model.predict_wishart_embedding(increment)[0] -\n g_omega) / epsilon_derivative\n tmp_mu = np.dot(g_inv_omega, np.dot(g_derivative, g_inv_omega))\n mu = mu - 2.0 * tmp_mu[:, k]\n mu = mu + g_inv_omega[:, k] * np.trace(np.dot(g_inv_omega,\n g_derivative))\n g_inv_sqrt_omega = sqrtm(g_inv_omega)\n eta = np.random.normal(0.0, np.sqrt(self.epsilon), self.dim_latent)\n self.omega = self.omega + self.epsilon / 2.0 * mu +\\\n np.dot(g_inv_sqrt_omega, eta)\n self.theta = self.gplvm_model.predict(self.omega)[0]\n return", "def calculate_trajectory(self, position, velocity, acceleration,\n deltaV, deltaD):\n # Initial/End conditions\n initS = (position[0], velocity[0], acceleration[0])\n initD = (position[1], velocity[1], acceleration[1])\n\n # TODO: find proper boundary condition for dS\n dS = ((2 * initS[1] + deltaV) / 2) * self.deltaT\n\n endS = (initS[0] + dS, initS[1] + deltaV, 0)\n endD = (initD[0] + deltaD, 0, 0)\n\n # Calculate coefficients\n coeff_S = self.calculate_coefficients(initS, endS)\n coeff_D = self.calculate_coefficients(initD, endD)\n\n # Calculate trajectory\n trajectory = self._get_trajectory(coeff_S, coeff_D)\n\n return trajectory", "def stepGenerator(self, current, target):\n\n while True:\n target = self.cfg[\"GOAL\"]\n if self.gotscript:\n if self.pathsteps in self.tc:\n terrain, topleft, botright = self.tc.get(self.pathsteps)\n pointlist = p4.getBlock(topleft, botright)\n # change logical map\n self.lmap.setPoints(terrain, pointlist)\n # change in gui, if running\n try:\n self.gui.clearPoints(pointlist)\n except:\n pass\n if self.pathsteps in self.gc:\n target = self.lmap.nearestPassable(self.gc.get(self.pathsteps))\n self.setGoal(target)\n if self.pathsteps in self.ac:\n newpos = p4.addVectors(current, self.ac.get(self.pathsteps))\n current = self.lmap.nearestPassable(newpos)\n yield newpos # scripted move is not costed or counted\n try:\n clockstart = timer() # start timer\n nextreturn = self.agent.getNext(self.lmap, current, target, self.timeremaining)\n logging.debug(nextreturn)\n clockend = timer()\n except:\n raise p4.BadAgentException()\n\n # Only time first step unless operating in 'realtime' mode. If this is realtime, and the step involved no reasoning (took less than FREE_TIME) do not count its time\n if ((not self.cfg.get(\"REALTIME\") and self.pathtime) or (\n (clockend - clockstart) < self.cfg.get(\"FREE_TIME\"))):\n steptime = 0\n else:\n steptime = (clockend - clockstart)\n previous = current\n\n # Agent may have returned single step or step plus sets of coords and colors.\n # Try/except distinguishes between them\n try:\n x = nextreturn[1][0] # fails if nextreturn is coord only\n current, configsets = nextreturn\n except TypeError:\n current = nextreturn\n finally:\n self.pathsteps += 1\n self.pathtime += steptime\n self.timeremaining -= steptime\n\n # We now consider every door open. In fact, we are just computing the final path cost, we are not\n # searching for it. So is reasonable to assume that I have all the keys along the path.\n allkeys = [k for k in self.lmap.key_and_doors.keys()]\n cost = self.lmap.getCost(current, previous, allkeys)\n # self.pathcost += self.lmap.getCost(current, previous, allkeys)\n if not self.lmap.isAdjacent(current, previous):\n cost = float('inf')\n # agent has made illegal move:\n if cost == float('inf'):\n self.updateStatus(\"Illegal move at \" + str(current) + \":\" + str(self.lmap.getCost(current)), False)\n if self.cfg[\"STRICT\"]:\n current = previous\n nextreturn = previous\n self.pathsteps -= 1\n cost = 0\n self.pathcost += cost\n yield nextreturn", "def makeSpline(self,\n waypointTrajectory: Trajectory,\n preventOvershoot: bool = True,\n loop: bool = False\n ) -> None:\n if loop and waypointTrajectory.milestones[-1] != waypointTrajectory.milestones[0]:\n raise ValueError(\"Asking for a loop trajectory but the endpoints don't match up\")\n velocities = []\n t = waypointTrajectory\n d = len(t.milestones[0])\n if len(t.milestones)==1:\n velocities.append([0]*d)\n elif len(t.milestones)==2:\n if loop:\n v = [0]*d\n else:\n s = (1.0/(t.times[1]-t.times[0]) if (t.times[1]-t.times[0]) != 0 else 0)\n v = vectorops.mul(vectorops.sub(t.milestones[1],t.milestones[0]),s) \n velocities.append(v)\n velocities.append(v)\n else:\n third = 1.0/3.0\n N = len(waypointTrajectory.milestones)\n if loop:\n timeiter = zip([-2]+list(range(N-1)),range(0,N),list(range(1,N))+[1])\n else:\n timeiter = zip(range(0,N-2),range(1,N-1),range(2,N))\n for p,i,n in timeiter:\n if p < 0:\n dtp = t.times[-1] - t.times[-2]\n else:\n dtp = t.times[i] - t.times[p]\n if n <= i:\n dtn = t.times[1]-t.times[0]\n else:\n dtn = t.times[n]-t.times[i]\n assert dtp >= 0 and dtn >= 0\n s = (1.0/(dtp+dtn) if (dtp+dtn) != 0 else 0)\n v = vectorops.mul(vectorops.sub(t.milestones[n],t.milestones[p]),s)\n if preventOvershoot:\n for j,(x,a,b) in enumerate(zip(t.milestones[i],t.milestones[p],t.milestones[n])):\n if x <= min(a,b):\n v[j] = 0.0\n elif x >= max(a,b):\n v[j] = 0.0\n elif v[j] < 0 and x - v[j]*third*dtp >= a:\n v[j] = 3.0/dtp*(x-a)\n elif v[j] > 0 and x - v[j]*third*dtp <= a:\n v[j] = 3.0/dtp*(x-a)\n elif v[j] < 0 and x + v[j]*third*dtn < b:\n v[j] = 3.0/dtn*(b-x)\n elif v[j] > 0 and x + v[j]*third*dtn > b:\n v[j] = 3.0/dtn*(b-x)\n \n velocities.append(v)\n if not loop:\n #start velocity as quadratic\n x2 = vectorops.madd(t.milestones[1],velocities[0],-third*(t.times[1]-t.times[0]))\n x1 = vectorops.madd(x2,vectorops.sub(t.milestones[1],t.milestones[0]),-third)\n v0 = vectorops.mul(vectorops.sub(x1,t.milestones[0]),3.0/(t.times[1]-t.times[0]))\n #terminal velocity as quadratic\n xn_2 = vectorops.madd(t.milestones[-2],velocities[-1],third*(t.times[-1]-t.times[-2]))\n xn_1 = vectorops.madd(xn_2,vectorops.sub(t.milestones[-1],t.milestones[-2]),third)\n vn = vectorops.mul(vectorops.sub(t.milestones[-1],xn_1),3.0/(t.times[-1]-t.times[-2]))\n velocities = [v0]+velocities+[vn]\n self.__init__(waypointTrajectory.times[:],waypointTrajectory.milestones,velocities)", "def __init__(self,\n init_pose = np.array([0.0,0.0,10.0,0.0,0.0,0.0]),\n init_velocities = np.array([0.0,0.0,0.1]),\n init_angle_velocities = np.array([0.0,0.0,0.0]),\n runtime=5.,\n target_pos=np.array([0.0,0.0,50.0])):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n self.state_size = self.action_repeat * 6\n \n self.action_low = 10\n self.action_high = 900\n self.action_size = 4\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])\n\n # to calc reward\n self.pos_diff_init = None", "def _content_jump(self, target):\n head = self._relative_head_pos()\n similarities = 1 - np.sqrt(np.sum((self.memory - target) ** 2, 1)) / self.memory_unit_size\n pos = int(np.argmax(similarities).item())\n if similarities[pos] > self.min_similarity_to_jump:\n self.head_pos = pos\n else:\n self.head_pos = 0\n if self.history is not None:\n self.history[\"loc\"][-1].append((head, 0.1))", "def write_trajectory(self, environmnent, pdb_filename):\n # TODO\n pass", "def time_step(dt, mol):\n f = mol.get_force() \n a1 = -f / mol.p1.m\n a2 = f / mol.p2.m\n v_prev1 = mol.p1.vel - dt * a1 / 2\n v_prev2 = mol.p2.vel - dt * a2 / 2\n v_next1 = v_prev1 + a1 * dt\n v_next2 = v_prev2 + a2 * dt\n mol.p1.vel = v_next1\n mol.p2.vel = v_next2\n mol.p1.pos = mol.p1.pos + v_next1 * dt\n mol.p2.pos = mol.p2.pos + v_next2 * dt", "def trajectory_plt2(model, inputs, targets, timesteps, highlight_inputs=False,\n include_arrow=False, save_fig=''):\n alpha = 0.5\n color=[]\n for i in range(len(targets)):\n if targets[i]==0:\n color.append('purple')\n elif targets[i] == 1:\n color.append('steelblue')\n elif targets[i] == 2:\n color.append('c')\n elif targets[i] == 3:\n color.append('lime')\n elif targets[i] == 4:\n color.append('yellow')\n\n # Calculate trajectories (timesteps, batch_size, input_dim)\n trajectories = model.odeblock.trajectory(inputs, timesteps).detach()\n # Features are trajectories at the final time\n features = trajectories[-1]\n\n if model.augment_dim > 0:\n aug = torch.zeros(inputs.shape[0], model.odeblock.odefunc.augment_dim)\n inputs_aug = torch.cat([inputs, aug], 1)\n else:\n aug = torch.zeros(inputs.shape[0], model.odeblock.odefunc.hidden_dim)\n inputs_aug = torch.cat([inputs, aug], 1)\n\n if model.augment_dim ==1 :\n input_dim = model.data_dim + model.augment_dim\n elif model.augment_dim == 0 :\n input_dim = model.data_dim\n else:\n input_dim = model.data_dim #+ 2\n\n\n if input_dim == 2:\n # Plot starting and ending points of trajectories\n input_linewidths = 2 if highlight_inputs else 0\n plt.scatter(inputs_aug[:, 0].numpy(), inputs_aug[:, 1].numpy(), c=color,\n alpha=alpha, linewidths=input_linewidths, edgecolor='orange')\n plt.scatter(features[:, 0].numpy(), features[:, 1].numpy(), c=color,\n alpha=alpha, linewidths=0)\n\n # For each point in batch, plot its trajectory\n for i in range(inputs_aug.shape[0]):\n # Plot trajectory\n trajectory = trajectories[:, i, :]\n x_traj = trajectory[:, 0].numpy()\n y_traj = trajectory[:, 1].numpy()\n plt.plot(x_traj, y_traj, c=color[i], alpha=alpha)\n #compute_distance(x_traj, y_traj, 1 )\n # Optionally add arrow to indicate direction of flow\n if include_arrow:\n arrow_start = x_traj[-2], y_traj[-2]\n arrow_end = x_traj[-1], y_traj[-1]\n plt.arrow(arrow_start[0], arrow_start[1],\n arrow_end[0] - arrow_start[0],\n arrow_end[1] - arrow_start[1], shape='full', lw=0,\n length_includes_head=True, head_width=0.15,\n color=color[i], alpha=alpha)\n\n plt.tick_params(axis='both', which='both', bottom=False, top=False,\n labelbottom=False, right=False, left=False,\n labelleft=False)\n\n ax = plt.gca()\n elif input_dim == 3:\n # Create figure\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Plot starting and ending points of trajectories\n input_linewidths = 1 if highlight_inputs else 0\n ax.scatter(inputs_aug[:, 0].numpy(), inputs_aug[:, 1].numpy(),\n inputs_aug[:, 2].numpy(), c=color, alpha=alpha,\n linewidths=input_linewidths, edgecolor='orange')\n ax.scatter(features[:, 0].numpy(), features[:, 1].numpy(),\n features[:, 2].numpy(), c=color, alpha=alpha, linewidths=0)\n\n # For each point in batch, plot its trajectory\n for i in range(inputs_aug.shape[0]):\n # Plot trajectory\n trajectory = trajectories[:, i, :]\n x_traj = trajectory[:, 0].numpy()\n y_traj = trajectory[:, 1].numpy()\n z_traj = trajectory[:, 2].numpy()\n ax.plot(x_traj, y_traj, z_traj, c=color[i], alpha=alpha)\n #compute_distance(x_traj, y_traj, z_traj)\n # Optionally add arrow\n if include_arrow:\n arrow_start = x_traj[-2], y_traj[-2], z_traj[-2]\n arrow_end = x_traj[-1], y_traj[-1], z_traj[-1]\n\n arrow = Arrow3D([arrow_start[0], arrow_end[0]],\n [arrow_start[1], arrow_end[1]],\n [arrow_start[2], arrow_end[2]],\n mutation_scale=15,\n lw=0, color=color[i], alpha=alpha)\n ax.add_artist(arrow)\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n elif input_dim == 4:\n # Create figure\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Plot starting and ending points of trajectories\n input_linewidths = 1 if highlight_inputs else 0\n from sklearn.decomposition import PCA\n pca = PCA(n_components=3)\n pca_input = pca.fit_transform(inputs_aug )\n pca_features = pca.fit_transform(features )\n\n ax.scatter(pca_input[:, 0], pca_input[:, 1],\n pca_input[:, 2], c=color, alpha=alpha,\n linewidths=input_linewidths, edgecolor='orange')\n ax.scatter(pca_features[:, 0], pca_features[:, 1],\n pca_features[:, 2], c=color, alpha=alpha, linewidths=0)\n\n # For each point in batch, plot its trajectory\n for i in range(inputs_aug.shape[0]):\n # Plot trajectory\n trajectory = trajectories[:, i, :]\n pca_traje = pca.fit_transform(trajectory )\n x_traj = pca_traje[:, 0]\n y_traj = pca_traje[:, 1]\n z_traj = pca_traje[:, 2]\n ax.plot(x_traj, y_traj, z_traj, c=color[i], alpha=alpha)\n #compute_distance(x_traj, y_traj, z_traj)\n # Optionally add arrow\n if include_arrow:\n arrow_start = x_traj[-2], y_traj[-2], z_traj[-2]\n arrow_end = x_traj[-1], y_traj[-1], z_traj[-1]\n\n arrow = Arrow3D([arrow_start[0], arrow_end[0]],\n [arrow_start[1], arrow_end[1]],\n [arrow_start[2], arrow_end[2]],\n mutation_scale=15,\n lw=0, color=color[i], alpha=alpha)\n ax.add_artist(arrow)\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n else:\n raise RuntimeError(\"Input dimension must be 2 or 3 but was {}\".format(input_dim))\n\n ax.set_aspect(get_square_aspect_ratio(ax))\n\n if len(save_fig):\n plt.savefig(save_fig, format='png', dpi=400, bbox_inches='tight')\n plt.clf()\n plt.close()", "def generate_jump(jmp_name: str, cond: typing.Callable):\n @vm_operation\n def gen(vm_state: VmState, *args, op_bytecode=None, **kwargs) -> VmState:\n op_code, _, arg1, _, _ = op_bytecode\n\n assert VM_OPERATION_TO_BYTECODE[op_code] == jmp_name\n\n label_index = arg1\n\n if label_index not in vm_state.vm_labels:\n raise Exception(f\"Bad label {label_index}\")\n\n if cond(vm_state):\n vm_state.vm_code_pointer = vm_state.vm_labels[label_index]\n\n return vm_state\n\n # Need for easy debugging\n gen.__name__ = f\"vm_{jmp_name.lower()}\"\n\n return gen", "def cross_platform(ramp_obj, agent_obj, target_position=None):\n paths = []\n\n points = []\n\n if ramp_obj is not None:\n dist_p1 = dist_btw_3d_pts([agent_obj.agent_position[\"x\"], agent_obj.agent_position[\"y\"],\n agent_obj.agent_position[\"z\"]],[ ramp_obj.jump_point_1[\"x\"],\n ramp_obj.jump_point_1[\"y\"], ramp_obj.jump_point_1[\"z\"]])\n\n dist_p2 = dist_btw_3d_pts([agent_obj.agent_position[\"x\"], agent_obj.agent_position[\"y\"],\n agent_obj.agent_position[\"z\"]], [ramp_obj.jump_point_2[\"x\"],\n ramp_obj.jump_point_2[\"y\"], ramp_obj.jump_point_2[\"z\"]])\n\n dist_p3 = dist_btw_3d_pts([agent_obj.agent_position[\"x\"], agent_obj.agent_position[\"y\"],\n agent_obj.agent_position[\"z\"]], [ramp_obj.jump_point_3[\"x\"],\n ramp_obj.jump_point_3[\"y\"], ramp_obj.jump_point_3[\"z\"]])\n if dist_p1 < dist_p2 and dist_p1 < dist_p3:\n selected_point = ramp_obj.jump_point_1\n selected_point_ = ramp_obj.jump_point_1_\n elif dist_p2 < dist_p1 and dist_p2 < dist_p3:\n selected_point = ramp_obj.jump_point_2\n selected_point_ = ramp_obj.jump_point_2_\n else:\n selected_point = ramp_obj.jump_point_3\n selected_point_ = ramp_obj.jump_point_3_\n points.append((\"goto\", selected_point_[\"x\"], selected_point_[\"z\"], 0.01))\n points.append((\"goto\", selected_point[\"x\"], selected_point[\"z\"], 0.01))\n points.append((\"jump\", ramp_obj.height, 0))\n points.append((\"goto\", ramp_obj.target_position[\"x\"], ramp_obj.target_position[\"z\"], 0.24))\n points.append((\"meta\", \"Platform-jump\", 0, 0))\n paths.append(points)\n else:\n points.append((\"goto\", target_position[\"x\"], target_position[\"z\"], 0.24))\n points.append((\"meta\", \"Straight-Target\", 0, 0))\n paths.append(points)\n\n return paths", "def select_jump(HC, \n HC_row, \n HC_col, \n phi, \n phim1, \n dt, \n eps, \n Pjump):\n \n Psigma_cum = 0\n\n rnd = np.random.rand()\n \n for n,m in zip(HC_row,HC_col):\n\n if n != m:\n Psigma_cum -= dt * eps * HC[n,m] * abs(phi[m])**2 \n \n else:\n Psigma_cum += dt * eps * HC[n,m] * abs(phi[m])**2 \n \n if rnd < Psigma_cum/Pjump:\n phip1Q = jump_process(phi, dt, n, m) \n \n phim1Q = np.copy(phip1Q)\n phiQ = np.copy(phip1Q) \n \n return phim1Q, phiQ", "def next_step(self):\n\n y_next = []\n y_next.append(0)\n for i in range(1, len(self.x) - 1):\n x = self.x[i]\n\n y = self.constant* (self.y_current[i + 1] + self.y_current[i - 1] - 2 * self.y_current[i])\\\n + 2 * self.y_current[i] - self.y_previous[i]\n\n y_next.append(y)\n\n y_next.append(0)\n\n self.y_previous = copy.copy(self.y_current)\n self.y_current = copy.copy(y_next)\n\n if self.timestep % 10000 is 0:\n self.timeframes[self.timestep] = copy.copy(self.y_current)\n\n self.timestep += 1", "def goto_point(self,targetx,targety):\n #if point is 0,0, make 0.01,0.01 to avoid divide by 0\n if targetx == 0 and targety == 0:\n targetx = 0.01\n targety = 0.01\n self.targetdistance = math.sqrt((self.currentx-targetx)**2 + (self.currenty-targety)**2)\n self.targetangle = math.atan2(targety-self.currenty,targetx-self.currentx)\n self.angledifference = self.angle_diff(self.targetangle,self.orientation)\n if abs(self.angledifference) < .10:\n self.turnspeed = 0\n else:\n self.turnspeed = math.tanh(self.kturn*self.angledifference)\n self.speed = math.tanh(self.targetdistance*self.kspeed/self.angledifference)\n if self.speed < 0:\n self.speed = 0\n self.linearVector = Vector3(x=self.speed, y=0.0, z=0.0)\n self.angularVector = Vector3(x = 0.0, y = 0.0, z = self.turnspeed)\n # print \"currentx = \" + str(self.currentx)\n # print \"currenty = \" + str(self.currenty)\n # print \"orientation = \" + str(self.orientation)\n # print \"targetangle = \" + str(self.targetangle)\n # print \"angledifference = \" + str(self.angledifference)\n #print \"turnspeed = \" + str(self.turnspeed)\n #print \"speed = \" + str(self.speed)", "def get_return_from_grasp_joint_trajectory(self, start_joints, target_pose, n_steps=40):\n assert len(start_joints) == len(self.joint_indices)\n assert target_pose.frame.count('base_link') == 1\n \n # set active manipulator and start joint positions\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n \n # initialize trajopt inputs\n rave_pose = tfx.pose(self.sim.transform_from_to(target_pose.matrix, target_pose.frame, 'world'))\n quat = rave_pose.orientation\n xyz = rave_pose.position\n quat_target = [quat.w, quat.x, quat.y, quat.z]\n xyz_target = [xyz.x, xyz.y, xyz.z]\n rave_mat = rave.matrixFromPose(np.r_[quat_target, xyz_target])\n \n request = self._get_return_from_grasp_trajopt_request(xyz_target, quat_target, n_steps)\n \n # convert dictionary into json-formatted string\n s = json.dumps(request) \n # create object that stores optimization problem\n prob = trajoptpy.ConstructProblem(s, self.sim.env)\n \n tool_link = self.robot.GetLink(self.tool_frame)\n def penalize_low_height(x):\n self.robot.SetDOFValues(x, self.joint_indices, False)\n z = tool_link.GetTransform()[2,3]\n return max(0, 10.0 - z)\n\n for t in xrange(n_steps-2):\n prob.AddErrorCost(penalize_low_height, [(t,j) for j in xrange(len(self.joint_indices))], \"ABS\", \"PENALIZE_LOW_HEIGHT_%i\"%t)\n \n # do optimization\n result = trajoptpy.OptimizeProblem(prob)\n \n self.robot.SetDOFValues(start_joints, self.joint_indices)\n prob.SetRobotActiveDOFs() # set robot DOFs to DOFs in optimization problem\n num_upsampled_collisions = self._num_collisions(result.GetTraj())\n print('Number of collisions: {0}'.format(num_upsampled_collisions))\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n if num_upsampled_collisions > 2:\n return None\n else:\n return result.GetTraj()" ]
[ "0.6040567", "0.6015634", "0.60108757", "0.590324", "0.58615047", "0.58486295", "0.5742071", "0.5694309", "0.5661889", "0.5657638", "0.563613", "0.5598414", "0.5571726", "0.5569394", "0.55562425", "0.5501541", "0.5492893", "0.5480169", "0.54617375", "0.54580027", "0.5438285", "0.54263407", "0.5355802", "0.5347102", "0.53291976", "0.5309144", "0.52895343", "0.52754825", "0.52746654", "0.524352", "0.5234775", "0.5233507", "0.52249193", "0.52232814", "0.5214129", "0.51871145", "0.51749027", "0.5173286", "0.5168738", "0.5168709", "0.51657987", "0.5163044", "0.51607573", "0.5157252", "0.51452136", "0.5136591", "0.51306194", "0.51232606", "0.5121082", "0.51176554", "0.5112847", "0.5112462", "0.50998056", "0.50923914", "0.50895315", "0.50860256", "0.5083545", "0.5076981", "0.50707155", "0.5070686", "0.5069819", "0.50625813", "0.50518155", "0.5049841", "0.504961", "0.5048125", "0.5045186", "0.5044475", "0.5042408", "0.5035006", "0.5034964", "0.5034023", "0.5020907", "0.50184083", "0.50176024", "0.5017599", "0.5015602", "0.5000768", "0.49981907", "0.49917373", "0.49842474", "0.49797592", "0.49716604", "0.49661684", "0.49654457", "0.49621332", "0.49612302", "0.49567288", "0.4953246", "0.49518618", "0.4947372", "0.49431762", "0.49429393", "0.49408782", "0.49346825", "0.49291807", "0.49271792", "0.49218467", "0.4921779", "0.4920298", "0.4911091" ]
0.0
-1
Calculates a time step of the Direct Method
def RunExactTimestep(self): if self.sim_t == 0: randoms = np.random.random(1000) self.randoms_log = np.log(randoms)*-1 self.randoms = np.random.random(1000) self.count = 0 elif self.count == 1000: randoms = np.random.random(1000) self.randoms_log = np.log(randoms)*-1 self.randoms = np.random.random(1000) self.count = 0 self.sim_tau = self.randoms_log[self.count]/float(self.sim_a_0) # reaction time generation self.sim_r2 = self.randoms[self.count] # Draw random number 2 [0-1] self.count +=1 if (self.sim_t + self.sim_tau) < self.settings.endtime: self.sim_t += self.sim_tau # Time update self.reaction_index = 0 sum_of_as = self.sim_a_mu[self.reaction_index] criteria = self.sim_r2*self.sim_a_0 while sum_of_as < criteria: # Use r2 to determine which reaction will occur self.reaction_index += 1 # Index sum_of_as += self.sim_a_mu[self.reaction_index] try: self.X_matrix += self.N_matrix_transpose[self.reaction_index] self.timestep += 1 except MemoryError as ex: print(ex) sys.exit() else: self.sim_t = self.settings.endtime self.reaction_index = np.nan
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def _STEPS2TIME(step):\n return step/1000.", "def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)", "def time_step(self):\n\n rho_rel = np.abs(self.rho_dt / self.rho)\n rho_rel_max = np.max(rho_rel)\n e_rel = np.abs(self.e_dt / self.e)\n e_rel_max = np.max(e_rel)\n x_rel = np.abs(self.u / self.dx)\n x_rel_max = np.max(x_rel)\n y_rel = np.abs(self.w / self.dy)\n y_rel_max = np.max(y_rel)\n rel = [rho_rel_max, e_rel_max, x_rel_max, y_rel_max]\n delta = np.max(np.abs(rel))\n\n if 0.1 <= delta <= 1e3:\n self.dt = self.p / delta\n else:\n self.dt = self.p", "def timeStep(self):\n return self.params['h']", "def time_step(dt, mol):\n f = mol.get_force() \n a1 = -f / mol.p1.m\n a2 = f / mol.p2.m\n v_prev1 = mol.p1.vel - dt * a1 / 2\n v_prev2 = mol.p2.vel - dt * a2 / 2\n v_next1 = v_prev1 + a1 * dt\n v_next2 = v_prev2 + a2 * dt\n mol.p1.vel = v_next1\n mol.p2.vel = v_next2\n mol.p1.pos = mol.p1.pos + v_next1 * dt\n mol.p2.pos = mol.p2.pos + v_next2 * dt", "def _TIME2STEPS(time):\n return int(time*1000)", "def _step(self, action: types.NestedArray) -> ts.TimeStep:", "def time_step(self) -> float:\n return self._timestep", "def time_step(self):\n return self._time_step", "def step(self, dt):\n return Vector(self.P.x + dt*self.V.x, self.P.y + dt*self.V.y)", "def step_based(t, eta_init, last_eta, d = 0.01, r = 50):\n return eta_init*d**np.floor((1+t)/r)", "def Step(self, t, dt):\n self.s += dt * self.dsdt + self.weighted_incoming_spikes / self.tau_s\n if self.ref_remaining <= 0:\n self.v += dt * self.dvdt\n else:\n self.v = 0.0\n self.ref_remaining -= dt\n if self.v >= 1.0:\n v0 = self.v_history[-1]\n v1 = self.v\n t0 = t\n tstar = t + dt * (1.0 - v0) / (v1 - v0)\n self.spikes.append(tstar)\n self.v = 1.0\n self.ref_remaining = self.tau_ref - (self.spikes[-1] - t)\n self.v_history.append(self.v)\n self.s_history.append(self.s)\n self.weighted_incoming_spikes = 0.0", "def compute_step(X):\n return MOVING_STEP", "def step_linear_double(step):\n return step * 2", "def method_compute_timestep(self):\n\n myg = self.cc_data.grid\n\n cfl = self.rp.get_param(\"driver.cfl\")\n\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n # the timestep is min(dx/|u|, dy|v|)\n xtmp = ytmp = 1.e33\n if not abs(u).max() == 0:\n xtmp = myg.dx/abs(u.v()).max()\n if not abs(v).max() == 0:\n ytmp = myg.dy/abs(v.v()).max()\n\n dt = cfl*min(xtmp, ytmp)\n\n # We need an alternate timestep that accounts for buoyancy, to\n # handle the case where the velocity is initially zero.\n rho = self.cc_data.get_var(\"density\")\n rho0 = self.base[\"rho0\"]\n rhoprime = self.make_prime(rho, rho0)\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n\n F_buoy = (abs(rhoprime*g).v()/rho.v()).max()\n\n dt_buoy = np.sqrt(2.0*myg.dx/F_buoy)\n\n self.dt = min(dt, dt_buoy)\n if self.verbose > 0:\n print(f\"timestep is {dt}\")", "def step(self, dt=None):\n dt = dt or self._dt\n\n # update time\n self._t += dt\n\n # update temperatures\n if self.improved:\n yp = self._T + dt / 2 * self._dy(self._T)\n self._T += dt * self._dy(yp)\n else:\n self._T += dt * self._dy(self._T)\n\n # update history\n self._history.append((self.t, self.Tb, self.Tw, self.h))", "def time_step(self, dt, Nsteps = 1):\n self.dt = dt\n\n if Nsteps > 0:\n self.psi_mod_x *= self.x_evolve_half\n \n for i in xrange(Nsteps - 1):\n self.k_fft()\n self.psi_mod_k *= self.k_evolve\n self.x_fft()\n self.psi_mod_x *= self.x_evolve\n \n self.k_fft()\n\n self.t += dt * Nsteps", "def time_step(self, dt, Nsteps=1):\n assert Nsteps >= 0\n self.dt = dt\n if Nsteps > 0:\n self.psi_mod_x *= self.x_evolve_half\n for num_iter in xrange(Nsteps - 1):\n self.compute_k_from_x()", "def timestep(self) -> Optional[float]:\n dt = None\n if len(self.time) > 1 and self.is_equidistant:\n dt = (self.time[1] - self.time[0]).total_seconds() # type: ignore\n return dt", "def get_time_step(self):\n for body in self.bodies:\n # If body is a Satelite\n if body.name == \"Satelite\":\n # Assuming that acceleration for a small times step is constant\n t = 0.01 * norm(body.velocity) / norm(body.acc)\n if t < self.t:\n return t\n return self.t", "def total_steps(self) -> global___Expression:", "def step(self,dt):\r\n\r\n # if float(dt) > 50.0:\r\n # raise Exception(\"dt is too big (>50 seconds)\")\r\n\r\n # send dynamics forward one s\r\n self.rv_eci = rk4_propagate(self.rv_eci,dt,self.earth)", "def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds", "def step(self, d=1):\n raise NotImplementedError()", "def step(self, dt):\n self.time_elapsed += dt\n self.project()", "def half_step_time(self):\n\n return self.full_step_time() * self.half_to_full_step_time_ratio", "def step(self):\n if self._step is None:\n return self._n_fft // 2\n else:\n return self._step", "def ab2_timestep(x, u, u_previous, timestep):\r\n return x + timestep * (1.5 * u - 0.5 * u_previous)", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def _step(self) -> None:", "def compute_step(self, step):\n #print(self.velocity)\n self.position += step * self.velocity\n #print(self.position)", "def state_step(self) -> float:\n raise NotImplementedError", "def next_change(self, t=None):\n if t is None:\n t = self.engine.get_now()\n t -= self.initTime\n\n p = float(self.period) / POINTS_PER_CYCLE\n t2 = math.floor(t / p) * p + p\n\n t2 += self.initTime\n\n return t2", "def step( self, name ):\n duration = self.summarize_step( start=self.step_start, step_name=name, level=self.level )\n now = time.time()\n self.step_start = now\n return duration", "def get_time_step(self):\n return self._time_step", "def step(amplitude, t_stop):\n times = np.array([0, t_stop/10, t_stop])\n amps = np.array([0, amplitude, amplitude])\n return times, amps", "def dt(self):\n if isinstance(self._time_axis, are_ax.RegularAxis):\n return self._time_axis.step\n raise RuntimeError(\"Time step is not available for orbits constructed with non-regular time axis\")", "def timestep(self, simsystem, osc, obs):\n pass", "def do_time_step(t, dt, x, y, z, vx, vy, vz, m, B2, g, S0, omega):\n t = t + dt\n x = x + vx*dt\n y = y + vy*dt\n z = z + vz*dt\n\n vz = vz - S0*vx*omega/m*dt # update vz first to use current value of vx\n v = math.sqrt(vx*vx + vy*vy + vz*vz)\n vx = vx - B2/m*v*vx*dt\n vy = vy - g*dt - B2/m*v*vy*dt\n\n return t, x, y, z, vx, vy, vz", "def step(self, value):\n self.real_time += pd.DateOffset(**{self.time_unit: value})\n self.simu_time += value\n logger.debug(\"NEW TIME\")", "def time_step_spec(self) -> ts.TimeStep:\n return ts.time_step_spec(self.observation_spec(), self.reward_spec())", "def do_dt(r, t):\n return -o(r,t)/(2*t)", "def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step", "def full_step_time(self):\n\n total_step_time = self.duration()\n return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))", "def control_timestep(self):\n if self._overridden_n_sub_steps is not None:\n return self.physics.timestep() * self._overridden_n_sub_steps\n else:\n return self.task.control_timestep", "def get_step(self):\n # decide which direction and how far\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def time_steps(Tin, ints):\n from numpy import linspace\n \n t_steps=linspace(0, Tin, int(ints*Tin))\n dt=(len(t_steps)/Tin)**-1\n \n return t_steps, dt", "def dt(self):\n return self._data_writer.get_simulation_time_step_ms()", "def internal_timestep(self):\n try:\n return self._internal_dt\n except AttributeError:\n # the component hasn't started running yet\n _ = self.calc_grads_and_timesteps(False, False)\n return self._internal_dt", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def euler_step(x0, t0, dx, dt):\n \n k1 = dx(x0, t0, dt)\n \n x1 = x0 + k1\n \n return x1", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def _step(self, board, elapsedTime):\n\t\tpass", "def time_step_output(self, current_time, time_step):\n pass", "def test_timestep(self):\n class Mock(object):\n def __init__(self):\n self.t = 0.0\n self.dt = None\n\n def evolve(self1, t, dt):\n if self1.dt is not None:\n self.assertAlmostEqual(self1.dt, dt)\n else:\n self1.dt = dt\n\n self.assertAlmostEqual(self1.t, t)\n\n self1.t += self1.dt\n\n t_max = 10.0\n dt = 0.2\n\n G = Mock()\n simulation.Simulation(G, dt=dt).run(t_max)\n self.assertAlmostEqual(G.dt, dt)", "def get_step(self):\n direction = choice([1,-1])\n direction = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def step(self, state):", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def duration(self):\r\n return self.t2 - self.t1", "def time_based(t, eta_init, last_eta, d = 0.01):\n return last_eta/(1+d*t)", "def __call__(self, SIR, t):\n return self.SIR_timederivative(SIR, t)", "def dt(self, _):\n raise NotImplementedError(\n \"We do not support setting dt/ time step except during setup\")", "def step(self):\n\n for component in self.components:\n component.input(self.current_time)\n\n for component in self.components:\n component.fire()\n\n self.current_time = self.current_time + self.interval\n\n for component in self.components:\n component.output(self.current_time)\n\n return self.current_time", "def step_constant(step):\n return step", "def euler_step(u, t, f, dt):\n \n return u + dt * f(u,t)", "def step(self, time):\n raise \"use method step of class ReactorNet\" \n #return _cantera.reactor_step(self.__reactor_id, time) ", "def horde_step(self, observation):", "def time_interval_prop(self, time_step, nsteps):\n world.time = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting time\")", "def compute_time_step(self):\r\n # append the current time/MoI to the lists\r\n self.get_outputs()\r\n\r\n print(\"-------------------------\")\r\n print(\"Now Running Cycle {}, t: {:.3e}, Completed {:.2f}%, CFL: {:.3e}\"\r\n .format(self.ind, self.t, 100*self.t/self.end_time,\r\n self.CFL(self.dt)))\r\n\r\n try:\r\n self.solver.solve()\r\n except:\r\n print(\"DIVERGED\")\r\n self.logfile.write(\"%s: STOPPED DUE TO DIVERGENCE \\n\" %\r\n (self.convert_time(time.time()\r\n - self.start_time)))\r\n self.diverged = True\r\n return\r\n\r\n # if we want to save at steps, save all the functions\r\n if self.savesteps:\r\n self.save_funcs(self.u, self.p, self.ftides, self.gravity,\r\n self.centrifugal, self.coriolis, self.forcing)\r\n\r\n # write to log\r\n self.logfile.write(\r\n \"{}: --- Solved Cycle {}, t={:.3e}, Completed {:.2f}%,\\\r\n CFL: {:.3e} --- \\n\".format(\r\n self.convert_time(time.time()-self.start_time), self.ind,\r\n self.t, 100*self.t/self.end_time, self.CFL(self.dt)))\r\n\r\n # update the timestep, for if CFL is too large\r\n self.update_dt()\r\n\r\n # remove the mean velocity\r\n self.adjust_u()\r\n\r\n # assign the current solution to the prior solution\r\n self.u_p_.assign(self.up)\r\n\r\n # update the run index\r\n self.ind += 1", "def improved_euler_step(x0, t0, dx, dt):\n \n k1 = dx(x0, t0, dt)\n k2 = dx(x0 + k1, t0 + dt, dt)\n \n x1 = x0 + (k1+k2) / 2\n \n return x1", "def compute_time_step(self, P_n):\n\n #Pointer reassignment for convenience\n method = self.solver_method\n dt = self.time_step\n theta = self.solver_theta\n T = self.T\n B = self.B\n Q = self.Q\n\n #If mixed, or explicit are specified, otherwise default is implicit\n #therefore it's not actually required to be placed in the input deck\n if method == 'mixed':\n A = ((1.0 - theta) * T + B / dt)\n b = (B / dt - theta * T).dot(P_n) + Q\n P_np1 = spsolve(A, b)\n elif method == 'explicit':\n P_np1 = P_n + 1 / B * dt * (Q - T.dot(P_n))\n else:\n A = T + B / dt\n b = (B / dt).dot(P_n) + Q\n P_np1 = spsolve(A, b)\n\n #Return solution vector from a single time-step\n return P_np1", "def _step(self):\n pass", "def step(self):\r\n raise NotImplementedError", "def step(self, dt):\n \n # get the current stage of the integration\n k_num = self.cstep\n\n for array in self.arrays:\n\n np = array.get_number_of_particles()\n\n # get the mapping for this array and this stage\n to_step = self.step_props[ array.name ][k_num]\n\n for prop in to_step:\n\n initial_prop = to_step[ prop ][0]\n step_prop = to_step[ prop ][1]\n\n initial_arr = array.get( initial_prop )\n step_arr = array.get( step_prop )\n\n updated_array = initial_arr + step_arr * dt\n\n # simply use periodicity for the positions\n if prop in ['x', 'y', 'z']:\n updated_array[numpy.where(updated_array < 0)[0]] += 1\n updated_array[numpy.where(updated_array > 1)[0]] -= 1\n\n array.set( **{prop:updated_array} )\n\n # Increment the step by 1\n self.cstep += 1", "def step(self, step=None):\n pass", "def _step(self, action, state):\n x, dx, theta, dtheta = state\n \n f = (min(1.0, max(-1.0, action)) - 0.5) * self.f * 2.0;\n \n # Alternate equations\n fi = self.mp * self.l * dtheta**2 * np.sin(theta) + (3.0/4) * self.mp * np.cos(theta) * self.g * np.sin(theta)\n mi = self.mp * (1 - (3.0/4) * np.cos(theta)**2)\n ddx = f + np.sum(fi) / (self.mc + np.sum(mi))\n ddtheta = (- 3.0 / (4 * self.l)) * (ddx * np.cos(theta) + self.g * np.sin(theta))\n \n # Equations from \"THE POLE BALANCING PROBLEM\"\n # _ni = (-f - self.mp * self.l * dtheta**2 * np.sin(theta))\n # m = self.mc + np.sum(self.mp)\n # _n = self.g * np.sin(theta) + np.cos(theta) * (_ni / m)\n # _d = self.l * (4./3. - (self.mp * np.cos(theta)**2) / m)\n # ddtheta = (_n / _d)\n # ddx = (f + np.sum(self.mp * self.l * np.floor(dtheta**2 * np.sin(theta) - ddtheta * np.cos(theta)))) / m\n \n x += self.t * dx\n dx += self.t * ddx \n theta += self.t * dtheta\n dtheta += self.t * ddtheta\n \n return (x, dx, theta, dtheta)", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def do_dr(r, t):\n return 1/t**0.5", "def step(self, printTime=False):\n self.step_gpu()\n self.nSteps += 1\n self.time += self.dt\n if printTime and\\\n (self.nSteps % self.__class__.__stepsPerTimePrint == 0):\n print(\"Time = %f\" % self.time)", "def next_step(self):\n\n y_next = []\n y_next.append(0)\n for i in range(1, len(self.x) - 1):\n x = self.x[i]\n\n y = self.constant* (self.y_current[i + 1] + self.y_current[i - 1] - 2 * self.y_current[i])\\\n + 2 * self.y_current[i] - self.y_previous[i]\n\n y_next.append(y)\n\n y_next.append(0)\n\n self.y_previous = copy.copy(self.y_current)\n self.y_current = copy.copy(y_next)\n\n if self.timestep % 10000 is 0:\n self.timeframes[self.timestep] = copy.copy(self.y_current)\n\n self.timestep += 1", "def step(self, a):\n if self.mirror and self.phase >= self.max_phase / 2:\n a = self.reflect_action(a)\n self.time += 1\n\n self.posbefore = self.robot_skeleton.q[0]\n\n self.do_dart_clocks(a)\n self.set_phase(self.phase + 1)\n\n self.posafter = self.robot_skeleton.q[0]\n\n # ref_pos, ref_vel = self.get_kin_state()\n # self.set_state(ref_pos, ref_vel)\n\n # common behavior for returning step() results\n done = self.is_done()\n ob = self._get_obs()\n reward = self.compute_reward()\n self.reward_buffer.append(reward)\n self.total_reward += reward\n\n self.energy += np.square(a).sum()\n return ob, reward, done, {}", "def updateTimeStep(self, newDt):\n self.timeStep = newDt", "def calc_speed2(self):\n if self.vars['step'] > 0:\n target_tensor = torch.abs(self.state - self.state_prev)\n speed = torch.max(target_tensor) / self.vars['dt']\n else: # this is the first step, no calculation is possible\n speed = float('NaN')\n return speed", "def onTimeStep(self, timeStep):\n pass", "def set_t_FAST(self):\n\t\n\tself.N = 2**7\n\tdt = self.Orbit.Tobs/self.N\n\tself.t = np.linspace(0, self.N-1, self.N)*self.Orbit.Tobs/self.N\n\t\n\treturn", "def step(self, state, action, Time_matrix):\n # Get the next state and the other time durations\n next_state, waiting_time, pickup_time, drop_time = self.next_state_func(\n state, action, Time_matrix)\n\n # getting the reward for the ride\n rewards = self.reward_func(waiting_time, pickup_time, drop_time)\n total_time = waiting_time + pickup_time + drop_time\n \n return rewards, next_state, total_time", "def step(self):\n\n self.last_input_time = current_time()\n self.current_time = self.last_input_time\n\n for component in self.components:\n component.input(self.last_input_time)\n\n for component in self.components:\n component.fire()\n\n self.last_spent = current_time() - self.last_input_time\n last_dt = self.interval - self.last_spent\n\n self.lagged = False\n if last_dt > 0.0:\n time.sleep(last_dt)\n elif last_dt < 0.0:\n self.lagged = True\n\n self.last_output_time = current_time()\n self.current_time = self.last_output_time\n\n for component in self.components:\n component.output(self.last_output_time)\n\n self.last_output_time = current_time()\n self.current_time = self.last_output_time\n\n return self.current_time", "def integrate(self, t):", "def step(self):\n return self._step", "def calculate_travel_time_complex(distance_meters, accel_mps2):\n time = 0\n distance_progress = 0\n speed = 0\n halfway_point = distance_meters / 2\n while distance_progress < halfway_point:\n time = time + 1\n speed = speed + accel_mps2\n distance_progress = distance_progress + speed\n \"\"\"\n Output progress \n print \"{0} seconds | speed: {1} m/s | distance traveled: {2} m\".format(time, speed, distance_progress)\n \"\"\"\n time = time * 2\n return [time, speed]", "def dspmt(t):\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)", "def step ( self ) :\n return self.__step", "def step(self):\n self.solver.integrate(self.t)\n self.state = self.solver.y", "def step(self) -> int:\n return self._step", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def step(self):\n raise NotImplementedError", "def delay(self):\r\n return self.relative_phases / (2 * np.pi * self.frequencies)", "def delay(self):\r\n return self.relative_phases / (2 * np.pi * self.frequencies)" ]
[ "0.73281395", "0.72096485", "0.7029386", "0.70150405", "0.6889623", "0.68617964", "0.68476254", "0.6813826", "0.68087006", "0.66766346", "0.6659325", "0.6623326", "0.660472", "0.6590279", "0.65758175", "0.656104", "0.6550675", "0.6538886", "0.6505917", "0.649663", "0.6495107", "0.6491536", "0.6477955", "0.64561236", "0.639699", "0.6385628", "0.63855225", "0.6370513", "0.6369613", "0.63559836", "0.6346982", "0.63459426", "0.6344178", "0.63333106", "0.63194364", "0.6318919", "0.62783885", "0.62516284", "0.62371355", "0.6236105", "0.623307", "0.62283283", "0.6210338", "0.61983734", "0.6179523", "0.6167522", "0.61660314", "0.6162046", "0.6145428", "0.6135767", "0.61276966", "0.61276966", "0.61272734", "0.61049575", "0.60977316", "0.6089859", "0.60826623", "0.6078856", "0.6078656", "0.60719734", "0.6051141", "0.6044365", "0.60439473", "0.60388374", "0.6037773", "0.602823", "0.5982976", "0.59753907", "0.596807", "0.59618676", "0.594776", "0.5947164", "0.5944523", "0.5942357", "0.5941833", "0.59387994", "0.59386903", "0.593398", "0.59326947", "0.59295607", "0.5917935", "0.5915762", "0.5915389", "0.5912448", "0.5911245", "0.5899397", "0.5897037", "0.5883868", "0.5875577", "0.58724743", "0.58721167", "0.5857501", "0.58568215", "0.585302", "0.5852631", "0.5852521", "0.5841849", "0.5839486", "0.5831864", "0.582865", "0.582865" ]
0.0
-1
r""" Find named groups in `pattern` and replace them with the group name. E.g., 1. ^(?P\w+)/b/(\w+)$ ==> ^/b/(\w+)$ 2. ^(?P\w+)/b/(?P\w+)/$ ==> ^/b//$
def replace_named_groups(pattern): named_group_indices = [ (m.start(0), m.end(0), m.group(1)) for m in named_group_matcher.finditer(pattern) ] # Tuples of (named capture group pattern, group name). group_pattern_and_name = [] # Loop over the groups and their start and end indices. for start, end, group_name in named_group_indices: # Handle nested parentheses, e.g. '^(?P<a>(x|y))/b'. unmatched_open_brackets, prev_char = 1, None for idx, val in enumerate(list(pattern[end:])): # If brackets are balanced, the end of the string for the current # named capture group pattern has been reached. if unmatched_open_brackets == 0: group_pattern_and_name.append((pattern[start:end + idx], group_name)) break # Check for unescaped `(` and `)`. They mark the start and end of a # nested group. if val == '(' and prev_char != '\\': unmatched_open_brackets += 1 elif val == ')' and prev_char != '\\': unmatched_open_brackets -= 1 prev_char = val # Replace the string for named capture groups with their group names. for group_pattern, group_name in group_pattern_and_name: pattern = pattern.replace(group_pattern, group_name) return pattern
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rename_regex_groups(pattern):\n\n grp_rewritten = ''\n mapping = dict()\n rmapping = dict()\n cnt = 1\n for item in _NAMED_GRP_PATTERN.split(pattern):\n if _NAMED_GRP_PATTERN.fullmatch(item):\n old_name = item[len(_NAMED_GRP_PREFIX):-len(_NAMED_GRP_SUFFIX)]\n new_name = 'G' + str(cnt)\n cnt += 1\n\n mapping[new_name] = old_name\n rmapping[old_name] = new_name\n\n item = _NAMED_GRP_PREFIX + new_name + _NAMED_GRP_SUFFIX\n\n grp_rewritten += item\n\n ref_rewritten = ''\n for item in _NAMED_REF_PATTERN.split(grp_rewritten):\n if _NAMED_REF_PATTERN.fullmatch(item):\n old_name = item[len(_NAMED_REF_PREFIX):-len(_NAMED_REF_SUFFIX)]\n new_name = rmapping.get(old_name, old_name)\n\n item = _NAMED_REF_PREFIX + new_name + _NAMED_REF_SUFFIX\n\n ref_rewritten += item\n\n return ref_rewritten, mapping", "def replace_unnamed_groups(pattern):\n unnamed_group_indices = [m.start(0) for m in unnamed_group_matcher.finditer(pattern)]\n # Indices of the start of unnamed capture groups.\n group_indices = []\n # Loop over the start indices of the groups.\n for start in unnamed_group_indices:\n # Handle nested parentheses, e.g. '^b/((x|y)\\w+)$'.\n unmatched_open_brackets, prev_char = 1, None\n for idx, val in enumerate(list(pattern[start + 1:])):\n if unmatched_open_brackets == 0:\n group_indices.append((start, start + 1 + idx))\n break\n\n # Check for unescaped `(` and `)`. They mark the start and end of\n # a nested group.\n if val == '(' and prev_char != '\\\\':\n unmatched_open_brackets += 1\n elif val == ')' and prev_char != '\\\\':\n unmatched_open_brackets -= 1\n prev_char = val\n\n # Remove unnamed group matches inside other unnamed capture groups.\n group_start_end_indices = []\n prev_end = None\n for start, end in group_indices:\n if prev_end and start > prev_end or not prev_end:\n group_start_end_indices.append((start, end))\n prev_end = end\n\n if group_start_end_indices:\n # Replace unnamed groups with <var>. Handle the fact that replacing the\n # string between indices will change string length and thus indices\n # will point to the wrong substring if not corrected.\n final_pattern, prev_end = [], None\n for start, end in group_start_end_indices:\n if prev_end:\n final_pattern.append(pattern[prev_end:start])\n final_pattern.append(pattern[:start] + '<var>')\n prev_end = end\n final_pattern.append(pattern[prev_end:])\n return ''.join(final_pattern)\n else:\n return pattern", "def replace_re_group(expr, group, pattern):\n r = \"\"\n lg = len(group)\n while expr:\n idx = expr.find(group)\n if idx == -1:\n return r + expr # No more groups found\n r += expr[:idx]\n expr = expr[idx + lg:]\n level = 1 # Level of parenthesis nesting\n while expr:\n c = expr[0]\n expr = expr[1:]\n if c == \"\\\\\":\n # Skip quoted character\n expr = expr[1:]\n continue\n elif c == \"(\":\n # Increase nesting level\n level += 1\n continue\n elif c == \")\":\n # Decrease nesting level\n level -= 1\n if level == 0:\n # Replace with pattern and search for next\n r += pattern\n break\n return r + expr", "def remove_named_groups(pattern: str, noncapturing: bool = True) -> str:\n if noncapturing:\n new_parens = \"(?:\"\n else:\n new_parens = \"(\"\n\n return re.sub(r\"\\(\\?P<\\w+>\", new_parens, pattern)", "def InterpolateGrouping(self, pattern):\n components = []\n offset = 0\n for match in GROUPING_PATTERN.finditer(pattern):\n components.append([pattern[offset:match.start()]])\n\n # Expand the attribute into the set of possibilities:\n alternatives = match.group(1).split(\",\")\n components.append(_unique(alternatives))\n offset = match.end()\n\n components.append([pattern[offset:]])\n # Now calculate the cartesian products of all these sets to form all\n # strings.\n for vector in itertools.product(*components):\n yield u\"\".join(vector)", "def replace_pattern(\n gm: GraphModule,\n pattern: Union[Callable, GraphModule],\n replacement: Union[Callable, GraphModule],\n match_filters: List[Callable[[\"InternalMatch\", Graph, Graph], bool]] = None, # type: ignore[name-defined]\n) -> List[Match]:\n match_and_replacements = _replace_pattern(gm, pattern, replacement, match_filters)\n return [\n Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements\n ]", "def generate_regex(self, pattern_to_glob):\n\n # First thing, replace '.' with '\\.' and make sure we're going\n # to match the start and the end of the pattern string.\n pattern_to_glob = re.sub(r\"\\.\", r\"\\.\", pattern_to_glob)\n pattern_to_glob = r\"^\" + pattern_to_glob + r\"$\"\n\n found_values = []\n new_val = pattern_to_glob\n\n # Find all the named attributes\n all_matches = re.findall(r\"%.+?%\", pattern_to_glob)\n\n for match_thing in all_matches:\n name = match_thing[1:-1]\n\n if match_thing not in found_values:\n # Change to a named group.\n group_string = r\"(?P<\" + name + r\">.+?)\"\n new_val = re.sub(match_thing, group_string, new_val, count=1)\n found_values.append(match_thing)\n else:\n # Use the existing group.\n group_string = r\"(?P=\" + name + r\")\"\n new_val = re.sub(match_thing, group_string, new_val, count=1)\n\n return new_val", "def capture_group(text: AnsibleUnsafeText, re_pattern: str, group=0) -> str:\n match_group = ''\n try:\n match_group = re.search(re_pattern, str(text)).group(group)\n except AttributeError as ae:\n print(type(ae).__name__)\n traceback.print_stack()\n return to_text(match_group)", "def add_pattern(self, name, pattern=None):\n self._pattern_reg.add_pattern(name, pattern)", "def __init__(self, regex, groups, nestedPattern = None, ignored = dict()):\r\n self.regex = regex.format(*[x.group() for x in groups])\r\n self.groups = groups\r\n self.ignored = ignored\r\n self.nestedPattern = nestedPattern\r\n self.name = \"_\"\r\n while self.name in self.groups:\r\n self.name += \"_\"", "def simplify_regex(pattern):\n pattern = replace_named_groups(pattern)\n pattern = replace_unnamed_groups(pattern)\n # clean up any outstanding regex-y characters.\n pattern = pattern.replace('^', '').replace('$', '')\n if not pattern.startswith('/'):\n pattern = '/' + pattern\n return pattern", "def test_replace_group(self):\n pass", "def store_pattern(self, name, cpattern):\n # Store the compiled regular expression in \"pattern\"\n # with the key \"name\"\n if not self.has_pattern(name):\n self.__patterns[name] = cpattern\n return cpattern\n # Raise an exception if a pattern has already been\n # stored with the same name\n raise KeyError", "def _get_regex_for_pattern(self, pattern: bytes):\n # TODO: should blacksheep support \":\" in routes (using escape chars)?\n for c in _escaped_chars:\n if c in pattern:\n pattern = pattern.replace(c, b\"\\\\\" + c)\n\n if b\"*\" in pattern:\n # throw exception if a star appears more than once\n if pattern.count(b\"*\") > 1:\n raise RouteException(\n \"A route pattern cannot contain more than one star sign *. \"\n \"Multiple star signs are not supported.\"\n )\n\n if b\"/*\" in pattern:\n pattern = _route_all_rx.sub(br\"?(?P<tail>.*)\", pattern)\n else:\n pattern = _route_all_rx.sub(br\"(?P<tail>.*)\", pattern)\n\n # support for < > patterns, e.g. /api/cats/<cat_id>\n # but also: /api/cats/<int:cat_id> or /api/cats/<uuid:cat_id> for more\n # granular control on the generated pattern\n if b\"<\" in pattern:\n pattern = _angle_bracket_route_param_rx.sub(\n self._handle_rich_parameter, pattern\n )\n\n # support for mustache patterns, e.g. /api/cats/{cat_id}\n # but also: /api/cats/{int:cat_id} or /api/cats/{uuid:cat_id} for more\n # granular control on the generated pattern\n if b\"{\" in pattern:\n pattern = _mustache_route_param_rx.sub(self._handle_rich_parameter, pattern)\n\n # route parameters defined using /:name syntax\n if b\"/:\" in pattern:\n pattern = _route_param_rx.sub(br\"/(?P<\\1>[^\\/]+)\", pattern)\n\n # NB: following code is just to throw user friendly errors;\n # regex would fail anyway, but with a more complex message\n # 'sre_constants.error: redefinition of group name'\n # we only return param names as they are useful for other things\n param_names = []\n for p in _named_group_rx.finditer(pattern):\n param_name = p.group(1)\n if param_name in param_names:\n raise ValueError(\n f\"cannot have multiple parameters with name: \" f\"{param_name}\"\n )\n\n param_names.append(param_name)\n\n if len(pattern) > 1 and not pattern.endswith(b\"*\"):\n # NB: the /? at the end ensures that a route is matched both with\n # a trailing slash or not\n pattern = pattern + b\"/?\"\n return re.compile(b\"^\" + pattern + b\"$\", re.IGNORECASE), param_names", "def register_pattern(name, pattern_factory, override=None):\n\tif not isinstance(name, string_types):\n\t\traise TypeError(\"name:{!r} is not a string.\".format(name))\n\tif not callable(pattern_factory):\n\t\traise TypeError(\"pattern_factory:{!r} is not callable.\".format(pattern_factory))\n\tif name in _registered_patterns and not override:\n\t\traise AlreadyRegisteredError(name, _registered_patterns[name])\n\t_registered_patterns[name] = pattern_factory", "def update_word_pattern(word, pattern, letter):\r\n # make pattern string as list for changing object inside it\r\n pattern_list = list(pattern)\r\n # Go through the pattern and reveal the letters.\r\n for i in range(len(word)):\r\n # Check where the letter exist, and reveal it on the pattern.\r\n if word[i] == letter:\r\n pattern_list[i] = letter\r\n # Rejoin the list onto one string\r\n pattern = \"\".join(pattern_list)\r\n return pattern", "def replace_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def resolve_pattern(pattern, args):\n if args is None or len(args) == 0:\n return pattern\n elif pattern.find('%') >= 0:\n return pattern % args\n elif pattern.find(\"{\") >= 0:\n # star magic does not work for single args\n return pattern.format(*args)\n else:\n # fixed pattern, no placeholders\n return pattern", "def lookup_pattern(name):\n\treturn _registered_patterns[name]", "def update_weight_name(repl_patterns: Dict[str, str], weight_name: str) -> str:\n # Create a regular expression from all of the dictionary keys\n regex = re.compile('|'.join(map(re.escape, repl_patterns.keys())))\n\n # For each match, look up the corresponding value in the repl_patterns dict.\n return regex.sub(lambda match: repl_patterns[match.group(0)], weight_name)", "def test_replace_groups(self):\n pass", "def _modify_entities_of_placeholder_pattern(pattern,mode='append'):\n if mode == 'append':\n for keyword in ['%task%','%session%','%subject%','%run%','%acquisition%']:\n pattern = pattern.replace(keyword,'%entities.'+keyword[1:])\n pattern = pattern.replace('%dataset%','%dataset_description.Name%')\n elif mode == 'cut':\n for keyword in ['%task%','%session%','%subject%','%run%','%acquisition%']:\n pattern = pattern.replace('%entities.'+keyword[1:],keyword)\n pattern = pattern.replace('%dataset_description.Name%','%dataset%')\n return pattern", "def ensure_grouping(pattern, begins=begins_grouped, newgroup=silent_group):\n\n if begins.match(pattern) and ends_grouped.match(pattern):\n # correspond last ) and initial ( ? then it is already grouped\n if list(closing_parentheses_match(pattern))[-1] == 0:\n return pattern\n\n return newgroup(pattern)", "def update_word_pattern(word,pattern,letter):\r\n new_pattern = list()\r\n for i in range(len(word)):\r\n if word[i] == letter and pattern[i] == '_':\r\n new_pattern.append(letter)\r\n else:\r\n new_pattern.append(pattern[i])\r\n return_pattern=''.join(new_pattern)\r\n return return_pattern", "def add_pattern(self, name, pattern=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(pattern, (list, np.ndarray, Pattern)), \"pattern must be a list or Pattern\"\n \n if not isinstance(pattern, Pattern):\n pattern = Pattern(name, multipliers=pattern, time_options=self._options.time) \n else: #elif pattern.time_options is None:\n pattern.time_options = self._options.time\n if pattern.name in self._data.keys():\n raise ValueError('Pattern name already exists')\n self[name] = pattern", "def save_pattern(self, pattern: Pattern):", "def save_pattern(self, pattern: Pattern):", "def combine_patterns(patterns: Sequence[str], groupname: Optional[str] = None) -> str:\n tag_re = re.compile(r\"\\(\\?P<(\\w+)>\")\n stripped_patterns = list()\n for p in patterns:\n tags = list(tag_re.finditer(p))\n prefix = f\"{tags[0].group(1)}_\"\n new_p = p\n for t in tags[:0:-1]:\n start, end = t.span(1)\n new_p = \"\".join((new_p[:start], prefix, new_p[start:]))\n stripped_patterns.append(new_p)\n if groupname is None:\n combined = rf\"(?:{r'|'.join(stripped_patterns)})\"\n else:\n combined = rf\"(?P<{groupname}>{r'|'.join(stripped_patterns)})\"\n\n return combined", "def compile(self, name, pattern):\n try:\n return self.get_pattern(name)\n except KeyError:\n return self.store_pattern(name, re.compile(pattern))", "def parse_pattern(pattern):\n pattern_type, _, group_spec = pattern.partition('(')\n # Groups are separated by '+' in a composite pattern.\n groups = [\n int(group) for group in group_spec[:-1].split('+')\n ] if group_spec else [1]\n\n # Some light lists use dots, some don't, just throw them away\n return pattern_type.lower().replace('.', ''), groups", "def _match_fn_name_pattern(\n self, fn_name, pattern\n ) -> Union[str, Tuple[str, str], None]:\n if isinstance(pattern, RegexPattern):\n m = pattern.search(fn_name)\n groups = m and m.groups()\n if groups:\n if len(groups) == 1:\n return groups[0]\n if len(groups) > 2:\n raise ValueError(\n f\"The `out_pattern` {pattern} matched on '{fn_name}' >2 groups: {groups}\"\n )\n return sfxed(*reversed(groups))\n elif callable(pattern):\n return pattern(fn_name)\n elif fn_name.startswith(pattern):\n return fn_name[len(pattern) :]", "def pattern_modifier(pattern, X, y):\n mod_pattern = pattern.reshape(n_channels, n_samples)\n mod_pattern = mod_pattern * kernel[np.newaxis, :]\n return mod_pattern.reshape(pattern.shape)", "def from_regex(pattern:str) -> str:\n raise NotImplementedError()", "def match_pattern(pattern, input, bindings=None):\n\n # Check to see if matching failed before we got here.\n if bindings is False:\n return False\n \n # When the pattern and the input are identical, we have a match, and\n # no more bindings need to be found.\n if pattern == input:\n return bindings\n\n bindings = bindings or {}\n\n # Match input and pattern according to their types.\n if is_segment(pattern):\n token = pattern[0] # segment variable is the first token\n var = token[2:] # segment variable is of the form ?*x\n return match_segment(var, pattern[1:], input, bindings)\n elif is_variable(pattern):\n var = pattern[1:] # single variables are of the form ?foo\n return match_variable(var, [input], bindings)\n elif contains_tokens(pattern) and contains_tokens(input):\n # Recurse:\n # try to match the first tokens of both pattern and input. The bindings\n # that result are used to match the remainder of both lists.\n return match_pattern(pattern[1:],\n input[1:],\n match_pattern(pattern[0], input[0], bindings))\n else:\n return False", "def repeat(pattern, capture=True):\r\n return (r\"(\" if capture else r\"(?:\") + pattern + r\")+\"", "def replacement(cls, search_pattern: str, replacement: str) -> PhonTransform:\n sub_func = lambda match: replacement\n return cls(search_pattern, sub_func)", "def _format_pattern(pattern: str) -> str:\n return pattern.rstrip('*') + '**'", "def pattern(self, pattern):\n if pattern is None:\n raise ValueError(\"Invalid value for `pattern`, must not be `None`\") # noqa: E501\n\n self._pattern = pattern", "def get_pattern(self, name):\n return self._pattern_reg[name]", "def regexp_replace(\n self, pattern: Any, replacement: Any, flags: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(\n regexp_replace_op,\n pattern,\n replacement=replacement,\n flags=flags,\n )", "def make_pattern(current_pattern):\n pattern = ''.join([str(b) for b in current_pattern])\n return pattern", "def convertPattern(pattern, sign):\r\n\r\n # Check for include vs exclude patterns.\r\n if pattern[:2] == \"+ \":\r\n pattern = pattern[2:]\r\n sign = \"+\"\r\n elif pattern[:2] == \"- \":\r\n pattern = pattern[2:]\r\n sign = \"-\"\r\n\r\n # Express windows, mac patterns in unix patterns (rsync.py extension).\r\n separator = os.path.normpath(\"/\")\r\n if separator != \"/\":\r\n pattern = re.sub(re.escape(separator), \"/\", pattern)\r\n\r\n # If pattern contains '/' it should match from the start.\r\n temp = pattern\r\n if pattern[0] == \"/\":\r\n pattern = pattern[1:]\r\n if temp[-1] == \"/\":\r\n temp = temp[:-1]\r\n\r\n # Convert pattern rules: ** * ? to regexp rules.\r\n pattern = re.escape(pattern)\r\n pattern = string.replace(pattern, \"\\\\*\\\\*\", \".*\")\r\n pattern = string.replace(pattern, \"\\\\*\", \"[^/]*\")\r\n pattern = string.replace(pattern, \"\\\\*\", \".*\")\r\n\r\n if \"/\" in temp:\r\n # If pattern contains '/' it should match from the start.\r\n pattern = \"^\\\\/\" + pattern\r\n else:\r\n # Else the pattern should match the all file or folder name.\r\n pattern = \"\\\\/\" + pattern\r\n\r\n if pattern[-2:] != \"\\\\/\" and pattern[-2:] != \".*\":\r\n # File patterns should match also folders.\r\n pattern = pattern + \"\\\\/?\"\r\n\r\n # Pattern should match till the end.\r\n pattern = pattern + \"$\"\r\n return (sign, pattern)", "def sql_name_pattern(pattern):\n\n inquotes = False\n relname = ''\n schema = None\n pattern_len = len(pattern)\n i = 0\n\n while i < pattern_len:\n c = pattern[i]\n if c == '\"':\n if inquotes and i + 1 < pattern_len and pattern[i + 1] == '\"':\n relname += '\"'\n i += 1\n else:\n inquotes = not inquotes\n elif not inquotes and c.isupper():\n relname += c.lower()\n elif not inquotes and c == '*':\n relname += '.*'\n elif not inquotes and c == '?':\n relname += '.'\n elif not inquotes and c == '.':\n # Found schema/name separator, move current pattern to schema\n schema = relname\n relname = ''\n else:\n # Dollar is always quoted, whether inside quotes or not.\n if c == '$' or inquotes and c in '|*+?()[]{}.^\\\\':\n relname += '\\\\'\n relname += c\n i += 1\n\n if relname:\n relname = '^(' + relname + ')$'\n\n if schema:\n schema = '^(' + schema + ')$'\n\n return schema, relname", "def _regexify_matching_pattern(rule_pattern: str, wildcard_optional=False) -> str:\n return rule_pattern.replace(\"*\", f\"(.{'+*'[wildcard_optional]})\")", "def _group_for(file_path):\n return re.sub(pattern_pair_lane_combo, \"\", os.path.basename(file_path))", "def replace(text,pattern,replace=\"\"):\n\n thisFunc = inspect.currentframe().f_code.co_name\n result = re.sub(pattern,replace,text)\n return result", "def add_pattern(self, pattern):\n self.patterns.append(pattern)", "def rename(dir, patterns, titlePattern, count=0):\n\tcount_i = count\n\tfor pattern in patterns:\n\t\tfor pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n\t\t\ttitle, ext = os.path.splitext(os.path.basename(pathAndFilename))\n\n\t\t\tprint(\"Find {}\".format(title))\n\t\t\tos.rename(pathAndFilename, os.path.join(dir, titlePattern % (count_i)))\n\t\t\tcount_i += 1", "def add_substitution(self, pattern, repl):\r\n\r\n self.substitutions.append( (re.compile(pattern), repl) )", "def bb_groupname(hit):\n try:\n group = hit.group(1)\n G = Group.objects.get(name=group)\n T = loader.get_template('webview/t/group.html')\n C = Context({'G' : G})\n return T.render(C)\n except:\n # This is normally thrown when the group is invalid. Return the original result,\n # Only we add an icon to indicate an invalid group.\n return '<img src=\"/static/user_error.png\" alt=\"user\" border=\"0\" /> %s' % (group)", "def _patternToRegEx(self,pattern):\n if (pattern == \"*\"):\n # special case that matches anything\n regex = \".*?\"\n else:\n regex = pattern\n if (regex.find(\".\") >= 0):\n regex = regex.replace(\".\", \"\\.\")\n #endIf\n \n asteriskIndex = regex.find(\"*\")\n if (asteriskIndex < 0):\n # no wildcard in pattern\n regex = \"%s$\" % regex\n elif (asteriskIndex + 1 != len(regex)):\n raise TraceSpecificationException(\"Invalid entity pattern: %s. A wildcard character may only be used to terminate a pattern.\" % pattern)\n else:\n # remove * and add \".*?\"\n regex = \"%s.*?\" % regex[:-1]\n #endIf\n #endIf\n return regex", "def _spacyfy(\n matches: List[List[Optional[Tuple[str, str]]]], pattern: List[Dict[str, Any]]\n) -> List[List[Dict[str, Any]]]:\n new_patterns = []\n if matches:\n for match in matches:\n new_pattern = deepcopy(pattern)\n for i, token in enumerate(match):\n if token:\n del new_pattern[i][token[0]]\n new_pattern[i][\"TEXT\"] = token[1]\n new_patterns.append(new_pattern)\n return new_patterns", "def parse_file_replace(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n full_data = fisier.read()\n fisier.close()\n\n try:\n fisier = open(path, \"w+\")\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n\n data = \"\"\n for line in full_data:\n data += line\n\n if args.ignore_case:\n pattern = re.compile(re.escape(args.pattern), re.IGNORECASE)\n pattern.sub(args.pattern, data)\n else:\n data = data.replace(args.pattern, args.replace)\n\n fisier.write(data)\n fisier.close()", "def convert_pattern(pattern, pattern_type=None):\n\tif pattern_type == 'regex':\n\t\treturn re.compile(pattern)\n\telif pattern_type == 'wildcard':\n\t\treturn re.compile(fnmatch.translate(pattern))\n\treturn re.compile(re.escape(pattern))", "def relate_pattern(a, b, pattern, **kwargs):\n return lib.relate_pattern(a, b, pattern, **kwargs)", "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "def patch_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def match(self, pattern):\n if isinstance(pattern, Var):\n substitution = {pattern: self}\n elif isinstance(pattern, Term) and self.function == pattern.function \\\n and len(self.arguments) == len(pattern.arguments):\n terms = [Term.__match(self.arguments[idx], pattern.arguments[idx])\n for idx in range(0, len(self.arguments))]\n substitution = reduce(merge, terms)\n else:\n substitution = None\n return substitution", "def setPattern(self, value):\n return self._set(pattern=value)", "def remove_pattern(self, name):\n self._pattern_reg.__delitem__(name)", "def convertPattern(self,pattern):\n images.convertSameFldImages(pattern,self.pathDir,self.img_destDir)\n return True", "def filter_group_names(groups, patterns):\n group_names = [g['logGroupName'] for g in groups]\n matched = set()\n for p in patterns:\n matched.update(fnmatch.filter(group_names, p))\n return [g for g in groups if g['logGroupName'] in matched]", "def filteredUrls(pattern, view, kwargs=None, name=None):\n results = [(pattern, view, kwargs, name)]\n tail = ''\n mtail = re.search('(/+\\+?\\\\*?\\??\\$?)$', pattern)\n if mtail:\n tail = mtail.group(1)\n pattern = pattern[:len(pattern) - len(tail)]\n for filter in ('/state/(?P<state>\\w+)',\n '/group/(?P<group>[^/]+)',\n '/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)',\n '/server/(?P<server>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)'):\n results += [(pattern + filter + tail, view, kwargs)]\n return results", "def build_match_and_apply_functions(pattern, search, replace):\n\n def matches_rule(word):\n \"\"\" Check if word contains pattern.\n \"\"\"\n return re.search(pattern, word)\n\n def apply_rule(word):\n \"\"\" Replace text with replacement in word.\n \"\"\"\n return re.sub(search, replace, word)\n\n return (matches_rule, apply_rule)", "def regex_group_split(file_list, pattern, output=True):\n split_list = list([]) # tuple probz\n\n dicdic ={ \"Jan\":\"01\",\"Feb\":\"02\",\"Mar\":\"03\",\n \"Apr\":\"04\",\"May\":\"05\",\"June\":\"06\",\"Jun\":\"06\",\n \"July\":\"07\",\"Jul\":\"07\",\"Aug\":\"08\",\"Sep\":\"09\",\n \"Oct\":\"10\",\"Nov\":\"11\",\"Dec\":\"12\",\n \"JAN\":\"01\",\"FEB\":\"02\",\"MAR\":\"03\",\n \"APR\":\"04\",\"MAY\":\"05\",\"JUN\":\"06\",\n \"JUL\":\"07\",\"AUG\":\"08\",\"SEP\":\"09\",\n \"OCT\":\"10\",\"NOV\":\"11\",\"DEC\":\"12\"}\n\n for file in file_list:\n split_file = list(re.match(pattern, file).groups())\n #split_list.append(file.replace(\" \", \"\"))\n split_file[0], split_file[1], split_file[2], split_file[3], split_file[4], split_file[5] = \\\n split_file[0] + \" \", split_file[1], split_file[2] + \"-\", split_file[3]+ \"-\", split_file[4], split_file[5]\n split_list.append(\"\".join(split_file))\n \n if (len(split_list) > 0 and output):\n #print colors.RED + '\\033[1m' + \"renames:\" + '\\033[0m'\n for split in split_list:\n print colors.RED + split + colors.ENDC\n\n return split_list", "def replace_with(self, replacement):\n\n # FIND NAMES IN replacement\n parts = list(regex_parameters.split(replacement, include_separators=True))\n\n def replacer(tokens):\n acc = []\n for s, n in zip(parts, parts[1:]):\n acc.append(s)\n acc.append(text(tokens[n]))\n acc.append(parts[-1])\n return \"\".join(acc)\n\n return self / replacer", "def load_url_pattern_names(patterns, include_with_args=True):\n global URL_NAMES\n for pat in patterns:\n if pat.__class__.__name__ == 'RegexURLResolver': # load patterns from this RegexURLResolver\n load_url_pattern_names(pat.url_patterns, include_with_args)\n elif pat.__class__.__name__ == 'RegexURLPattern': # load name from this RegexURLPattern\n if pat.name is not None and pat.name not in URL_NAMES:\n if include_with_args or re.compile(pat.regex).groups == 0:\n URL_NAMES.append(pat.name)\n return URL_NAMES", "def register_patterns(self) -> None:\n\n if (patterns := getattr(self, \"WORDS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(rf\"\\b{k}\\b\", v))\n\n if (patterns := getattr(self, \"PATTERNS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(k, v))\n\n if (replacements := getattr(self, \"REPLACEMENTS\", None)) is not None:\n for replacement in replacements:\n self.register_replacement(replacement)", "def replace_groups(self):\n newstr = []\n for state in self._parsed:\n newstr.append(self._handle_state(state))\n return ''.join(newstr)", "def pathify(urlpattern, **context):\n\n repl = lambda match: context[match.group(1)]\n path = re.sub(r':([a-z]+)', repl, urlpattern)\n return tuple(path[1:].split('/'))", "def _set_group_name(self):\n self._scene_gen.group_name = self._group_name_le.text()\n self._refresh_view()", "def replace(self, pattern, substitute, pattern_type=None):\n\t\tpattern = convert_pattern(pattern, pattern_type)\n\t\twith self.AutoSplitlines():\n\t\t\tself.lines = [(pattern.sub(substitute, line) if pattern.search(line) else line) for line in self.lines]", "def bb_group(hit):\n try:\n groupid = hit.group(1)\n group = Group.objects.get(id=groupid)\n T = loader.get_template('webview/t/group.html')\n C = Context({'G' : group})\n return T.render(C)\n except:\n return \"[group]%s[/group]\" % groupid", "def generate(self, name):\n if not '?' in self._pattern and not '[' in self._pattern:\n try:\n return self._pattern.replace('*', '{}').format(name)\n except IndexError:\n # multiple *\n pass\n return ''", "def get_pattern(self, name):\n return self.__patterns[name]", "def _create_regex(pattern, ignore_case=False, whole_words=False, literal_pattern=False):\n if literal_pattern:\n pattern = re.escape(pattern)\n if whole_words:\n b = r'\\b' if isinstance(pattern, str) else br'\\b'\n pattern = b + pattern + b\n\n regex = re.compile(pattern, re.I if ignore_case else 0)\n return regex", "def remove_pattern(input_txt,pattern):\r\n r = re.findall(pattern,input_txt)\r\n\r\n for i in r:\r\n input_txt = re.sub(i,'',input_txt)\r\n return input_txt", "def set_group_name(self, name):\n self.groupname = name", "def lreplace(pattern, sub, string):\n return re.sub('^%s' % pattern, sub, string)", "def replace_by_character_map(self, pattern_name: str, characters_map: dict):\n if pattern_name not in self.patterns:\n self.patterns[pattern_name] = generate_multiple_replace_pattern(characters_map)\n\n self.value = multiple_replace(\n characters_map, self.value, self.patterns[pattern_name]\n )\n return self", "def pattern_filter(patterns, name):\n return [pat for pat in patterns if fnmatch.fnmatchcase(name, pat)]", "def remove_url_patterns(filename, pattern):\n name = get_name_from_filename(filename)\n ext = get_extension_from_filename(filename)\n repl = \" \"\n new_name = re.sub(pattern, repl, name)\n\n return new_name + ext", "def do_pattern(l, pattern, repeat=1):\n command = create_pattern_command(pattern, repeat)\n l.write(command)", "def create_group_memberships(self, group_pattern, test_data):\n for group_spec in group_pattern:\n # Each membership specification is a dict of the form:\n #\n # {'group': 0, 'users': [list of user indexes]}\n #\n # Add all users in the list to the specified group, first\n # converting from index to full entity ID.\n group_value = test_data['groups'][group_spec['group']]['id']\n for user_index in group_spec['users']:\n user_value = test_data['users'][user_index]['id']\n self.identity_api.add_user_to_group(user_value, group_value)\n return test_data", "def expand(self, m):\n\n if m is None:\n raise ValueError(\"Match is None!\")\n\n sep = m.re.pattern[:0]\n if isinstance(sep, bytes) != self._bytes:\n raise TypeError('Match string type does not match expander string type!')\n text = []\n # Expand string\n for index in range(0, len(self.literals)):\n l = self.literals[index]\n if l is None:\n g_index = self._get_group_index(index)\n span_case, single_case, capture = self._get_group_attributes(index)\n if not self.use_format:\n # Non format replace\n try:\n l = m.group(g_index)\n if l is None:\n l = sep\n except IndexError: # pragma: no cover\n raise IndexError(\"'{}' is out of range!\".format(g_index))\n else:\n # String format replace\n try:\n obj = m.captures(g_index)\n except IndexError: # pragma: no cover\n raise IndexError(\"'{}' is out of range!\".format(g_index))\n l = _util.format_captures(\n obj,\n capture,\n _util._to_bstr if isinstance(sep, bytes) else _util._to_str,\n sep\n )\n if span_case is not None:\n if span_case == _LOWER:\n l = l.lower()\n else:\n l = l.upper()\n if single_case is not None:\n if single_case == _LOWER:\n l = l[0:1].lower() + l[1:]\n else:\n l = l[0:1].upper() + l[1:]\n text.append(l)\n\n return sep.join(text)", "def handle_url_module(js_patterns, module_name, prefix=\"\"):\n if isinstance(module_name, basestring):\n __import__(module_name)\n root_urls = sys.modules[module_name]\n patterns = root_urls.urlpatterns\n elif isinstance(module_name, types.ModuleType):\n root_urls = module_name\n patterns = root_urls.urlpatterns\n else:\n root_urls = module_name\n patterns = root_urls\n\n for pattern in patterns:\n if issubclass(pattern.__class__, RegexURLPattern):\n if pattern.name:\n full_url = prefix + pattern.regex.pattern\n for chr in [\"^\",\"$\"]:\n full_url = full_url.replace(chr, \"\")\n #handle kwargs, args\n kwarg_matches = RE_KWARG.findall(full_url)\n if kwarg_matches:\n for el in kwarg_matches:\n #prepare the output for JS resolver\n full_url = full_url.replace(el[0], \"<%s>\" % el[1])\n #after processing all kwargs try args\n args_matches = RE_ARG.findall(full_url)\n if args_matches:\n for el in args_matches:\n full_url = full_url.replace(el, \"<>\")#replace by a empty parameter name\n js_patterns[pattern.name] = \"/\" + full_url\n elif issubclass(pattern.__class__, RegexURLResolver):\n if pattern.urlconf_name:\n handle_url_module(js_patterns, pattern.urlconf_name, prefix=pattern.regex.pattern)", "def formatPattern(self, pat):\n\n if not pat:\n return ''\n else:\n return pat", "def add_pattern(self, start, stop, pattern):\n self.coord2pattern[start] = []\n self.coord2pattern[start].append(pattern)", "def _compile_fnmatch(pattern: str) -> re.Pattern:\n return re.compile(translate(pattern))", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def add_pattern(self, pattern, callback):\n self.patterns.append((pattern, callback))", "def pattern_to_regex(pattern):\n\n pattern = pattern.replace('.', r'\\.')\n pattern = pattern.replace('?', r'.')\n pattern = pattern.replace('*', r'.*')\n\n if pattern.endswith('/'):\n pattern += r'.*'\n elif pattern.endswith('.*'):\n pattern = pattern[:-2]\n pattern += r'(?!.*?/.*?)'\n\n return pattern", "def __init__(self, pattern):\n self._pattern = pattern.lower()", "def named_back_reference(name:str) -> str:\n # TODO error handling \n return f\"\\\\k<{name}>\"", "def __init__(self, pattern):\n self._pattern = re.compile(pattern)", "def translate(self, pattern):\n\n if not pattern:\n return re.compile('')\n\n # Express windows, mac patterns in unix patterns.\n pattern = os.path.normcase(pattern).replace(os.sep, \"/\")\n\n # If pattern contains '/' it should match from the start.\n temp = pattern\n if pattern[0] == \"/\":\n pattern = pattern[1:]\n if temp[-1] == \"/\":\n temp = temp[:-1]\n\n # Convert pattern rules: ** * ? to regexp rules.\n pattern = re.escape(pattern)\n pattern = pattern.replace(\"\\\\?\", \"[^/]\")\n pattern = pattern.replace(\"\\\\*\\\\*\", \".*\")\n pattern = pattern.replace(\"\\\\*\", \"[^/]*\")\n pattern = pattern.replace(\"\\\\*\", \".*\")\n\n if \"/\" in temp:\n # If pattern contains '/' it should match from the start.\n pattern = \"^\\\\/\" + pattern\n else:\n # Else the pattern should match the all file or folder name.\n pattern = \"\\\\/\" + pattern\n\n if pattern[-2:] == \"\\\\/\":\n # Folder patterns should match also files (MP specific).\n pattern = pattern + \".*\"\n\n # (MP: not used because it is file-based)\n #if pattern[-2:] != \"\\\\/\" and pattern[-2:] != \".*\":\n # File patterns should match also folders.\n #pattern = pattern + \"\\\\/?\"\n\n # Pattern should match till the end.\n pattern = pattern + \"$\"\n return re.compile(pattern, re.S)", "def convertFilename (pattern, name):\n\tresult = \"\"\n\tj = 0\n\ti = 0\n\twhile j < len (pattern) or i < len(name):\n\t\t# If the format ended \n\t\tif j >= len (pattern):\n\t\t\tbreak\n\t\t# If one charactere must be ignored \n\t\telif pattern [j] == '?':\n\t\t\tif i < len(name):\n\t\t\t\tresult = result + name [i]\n\t\t\t\ti += 1\n\t\t\tif j < len(pattern):\n\t\t\t\tj += 1\n\t\t# If one or more characteres must be ignored \n\t\telif pattern [j] == '*':\n\t\t\tif i < len(name):\n\t\t\t\tresult = result + name [i]\n\t\t\t\ti += 1\n\t\t\telse :\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif i < len(name):\n\t\t\t\ti += 1\n\n\t\t\tif j < len(pattern):\n\t\t\t\tresult = result + pattern [j]\n\t\t\t\tj += 1\n\treturn result", "def sed_like_thing(pattern, repl, path):\n\n with codecs.open(path, 'rb', 'utf8') as inf:\n data = inf.read()\n\n data = re.sub(pattern, repl, data)\n\n with codecs.open(path, 'wb+', 'utf8') as outf:\n outf.write(data)", "def regex_splitter(input_string: str) -> str:\n #grouping_list: list = re.compile(REGEX_PATTERN).split(input_string)\n compiled_regex = re.compile(REGEX_PATTERN)\n mo = compiled_regex.findall(input_string)\n \n #print(\"Current found matches are:\" + str(mo))\n \n result = evaluator_indefinite(mo)\n \n #print(\"Dictionary of evaluations\" + str(result))\n \n new_string = input_string\n\n for match in mo:\n new_string = new_string.replace(str(match), str(result[match]))\n #print(\"Current string modified with new value: \" + new_string)\n\n return new_string", "def register_group(self, group):\n if group.name in self._groups:\n return\n\n self._groups[group.name] = copy.copy(group)" ]
[ "0.7320233", "0.67950296", "0.605959", "0.6014507", "0.5789089", "0.5741866", "0.55717754", "0.5548644", "0.55015105", "0.5437232", "0.54301506", "0.53881884", "0.5342404", "0.53323615", "0.52918", "0.5223952", "0.520838", "0.51812756", "0.5165162", "0.5128126", "0.510275", "0.50944996", "0.50500697", "0.50446326", "0.504288", "0.5034884", "0.5034884", "0.50042284", "0.4965464", "0.49596655", "0.49302283", "0.49235496", "0.48967507", "0.48410404", "0.47957143", "0.47931808", "0.47754174", "0.4760887", "0.4745655", "0.47320303", "0.47269285", "0.47233462", "0.47192895", "0.47178185", "0.47128206", "0.4712381", "0.47118077", "0.47074035", "0.4706006", "0.46977785", "0.46924877", "0.46743858", "0.46673584", "0.46546867", "0.46383238", "0.46286243", "0.46205908", "0.4615629", "0.4599809", "0.4589709", "0.45874402", "0.45823818", "0.45819393", "0.4576303", "0.4566416", "0.45648688", "0.4564328", "0.4561268", "0.45582545", "0.4547522", "0.45460555", "0.45436764", "0.45299268", "0.45078623", "0.45023024", "0.4495484", "0.4489165", "0.4466992", "0.44633684", "0.44554546", "0.44439778", "0.44384706", "0.44242075", "0.44138932", "0.44136944", "0.4401338", "0.44009975", "0.43983662", "0.43974066", "0.43925318", "0.4391775", "0.43833542", "0.4382307", "0.4370754", "0.437031", "0.4369149", "0.43593565", "0.43578783", "0.43535253", "0.43498886" ]
0.7663291
0
r""" Find unnamed groups in `pattern` and replace them with ''. E.g., 1. ^(?P\w+)/b/(\w+)$ ==> ^(?P\w+)/b/$ 2. ^(?P\w+)/b/((x|y)\w+)$ ==> ^(?P\w+)/b/$
def replace_unnamed_groups(pattern): unnamed_group_indices = [m.start(0) for m in unnamed_group_matcher.finditer(pattern)] # Indices of the start of unnamed capture groups. group_indices = [] # Loop over the start indices of the groups. for start in unnamed_group_indices: # Handle nested parentheses, e.g. '^b/((x|y)\w+)$'. unmatched_open_brackets, prev_char = 1, None for idx, val in enumerate(list(pattern[start + 1:])): if unmatched_open_brackets == 0: group_indices.append((start, start + 1 + idx)) break # Check for unescaped `(` and `)`. They mark the start and end of # a nested group. if val == '(' and prev_char != '\\': unmatched_open_brackets += 1 elif val == ')' and prev_char != '\\': unmatched_open_brackets -= 1 prev_char = val # Remove unnamed group matches inside other unnamed capture groups. group_start_end_indices = [] prev_end = None for start, end in group_indices: if prev_end and start > prev_end or not prev_end: group_start_end_indices.append((start, end)) prev_end = end if group_start_end_indices: # Replace unnamed groups with <var>. Handle the fact that replacing the # string between indices will change string length and thus indices # will point to the wrong substring if not corrected. final_pattern, prev_end = [], None for start, end in group_start_end_indices: if prev_end: final_pattern.append(pattern[prev_end:start]) final_pattern.append(pattern[:start] + '<var>') prev_end = end final_pattern.append(pattern[prev_end:]) return ''.join(final_pattern) else: return pattern
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_named_groups(pattern: str, noncapturing: bool = True) -> str:\n if noncapturing:\n new_parens = \"(?:\"\n else:\n new_parens = \"(\"\n\n return re.sub(r\"\\(\\?P<\\w+>\", new_parens, pattern)", "def replace_named_groups(pattern):\n named_group_indices = [\n (m.start(0), m.end(0), m.group(1))\n for m in named_group_matcher.finditer(pattern)\n ]\n # Tuples of (named capture group pattern, group name).\n group_pattern_and_name = []\n # Loop over the groups and their start and end indices.\n for start, end, group_name in named_group_indices:\n # Handle nested parentheses, e.g. '^(?P<a>(x|y))/b'.\n unmatched_open_brackets, prev_char = 1, None\n for idx, val in enumerate(list(pattern[end:])):\n # If brackets are balanced, the end of the string for the current\n # named capture group pattern has been reached.\n if unmatched_open_brackets == 0:\n group_pattern_and_name.append((pattern[start:end + idx], group_name))\n break\n\n # Check for unescaped `(` and `)`. They mark the start and end of a\n # nested group.\n if val == '(' and prev_char != '\\\\':\n unmatched_open_brackets += 1\n elif val == ')' and prev_char != '\\\\':\n unmatched_open_brackets -= 1\n prev_char = val\n\n # Replace the string for named capture groups with their group names.\n for group_pattern, group_name in group_pattern_and_name:\n pattern = pattern.replace(group_pattern, group_name)\n return pattern", "def simplify_regex(pattern):\n pattern = replace_named_groups(pattern)\n pattern = replace_unnamed_groups(pattern)\n # clean up any outstanding regex-y characters.\n pattern = pattern.replace('^', '').replace('$', '')\n if not pattern.startswith('/'):\n pattern = '/' + pattern\n return pattern", "def remove_pattern(input_txt,pattern):\r\n r = re.findall(pattern,input_txt)\r\n\r\n for i in r:\r\n input_txt = re.sub(i,'',input_txt)\r\n return input_txt", "def rename_regex_groups(pattern):\n\n grp_rewritten = ''\n mapping = dict()\n rmapping = dict()\n cnt = 1\n for item in _NAMED_GRP_PATTERN.split(pattern):\n if _NAMED_GRP_PATTERN.fullmatch(item):\n old_name = item[len(_NAMED_GRP_PREFIX):-len(_NAMED_GRP_SUFFIX)]\n new_name = 'G' + str(cnt)\n cnt += 1\n\n mapping[new_name] = old_name\n rmapping[old_name] = new_name\n\n item = _NAMED_GRP_PREFIX + new_name + _NAMED_GRP_SUFFIX\n\n grp_rewritten += item\n\n ref_rewritten = ''\n for item in _NAMED_REF_PATTERN.split(grp_rewritten):\n if _NAMED_REF_PATTERN.fullmatch(item):\n old_name = item[len(_NAMED_REF_PREFIX):-len(_NAMED_REF_SUFFIX)]\n new_name = rmapping.get(old_name, old_name)\n\n item = _NAMED_REF_PREFIX + new_name + _NAMED_REF_SUFFIX\n\n ref_rewritten += item\n\n return ref_rewritten, mapping", "def replace_re_group(expr, group, pattern):\n r = \"\"\n lg = len(group)\n while expr:\n idx = expr.find(group)\n if idx == -1:\n return r + expr # No more groups found\n r += expr[:idx]\n expr = expr[idx + lg:]\n level = 1 # Level of parenthesis nesting\n while expr:\n c = expr[0]\n expr = expr[1:]\n if c == \"\\\\\":\n # Skip quoted character\n expr = expr[1:]\n continue\n elif c == \"(\":\n # Increase nesting level\n level += 1\n continue\n elif c == \")\":\n # Decrease nesting level\n level -= 1\n if level == 0:\n # Replace with pattern and search for next\n r += pattern\n break\n return r + expr", "def InterpolateGrouping(self, pattern):\n components = []\n offset = 0\n for match in GROUPING_PATTERN.finditer(pattern):\n components.append([pattern[offset:match.start()]])\n\n # Expand the attribute into the set of possibilities:\n alternatives = match.group(1).split(\",\")\n components.append(_unique(alternatives))\n offset = match.end()\n\n components.append([pattern[offset:]])\n # Now calculate the cartesian products of all these sets to form all\n # strings.\n for vector in itertools.product(*components):\n yield u\"\".join(vector)", "def replace_groups(self):\n newstr = []\n for state in self._parsed:\n newstr.append(self._handle_state(state))\n return ''.join(newstr)", "def capture_group(text: AnsibleUnsafeText, re_pattern: str, group=0) -> str:\n match_group = ''\n try:\n match_group = re.search(re_pattern, str(text)).group(group)\n except AttributeError as ae:\n print(type(ae).__name__)\n traceback.print_stack()\n return to_text(match_group)", "def formatPattern(self, pat):\n\n if not pat:\n return ''\n else:\n return pat", "def suppress(self):\n self.pattern = hre.begins_not_silently_grouped.sub(\"(?:\", self.pattern)\n self._compiled = None\n self.structure.clear()\n return self", "def combine_patterns(patterns: Sequence[str], groupname: Optional[str] = None) -> str:\n tag_re = re.compile(r\"\\(\\?P<(\\w+)>\")\n stripped_patterns = list()\n for p in patterns:\n tags = list(tag_re.finditer(p))\n prefix = f\"{tags[0].group(1)}_\"\n new_p = p\n for t in tags[:0:-1]:\n start, end = t.span(1)\n new_p = \"\".join((new_p[:start], prefix, new_p[start:]))\n stripped_patterns.append(new_p)\n if groupname is None:\n combined = rf\"(?:{r'|'.join(stripped_patterns)})\"\n else:\n combined = rf\"(?P<{groupname}>{r'|'.join(stripped_patterns)})\"\n\n return combined", "def repeat(pattern, capture=True):\r\n return (r\"(\" if capture else r\"(?:\") + pattern + r\")+\"", "def from_regex(pattern:str) -> str:\n raise NotImplementedError()", "def ensure_grouping(pattern, begins=begins_grouped, newgroup=silent_group):\n\n if begins.match(pattern) and ends_grouped.match(pattern):\n # correspond last ) and initial ( ? then it is already grouped\n if list(closing_parentheses_match(pattern))[-1] == 0:\n return pattern\n\n return newgroup(pattern)", "def make_pattern(current_pattern):\n pattern = ''.join([str(b) for b in current_pattern])\n return pattern", "def replace_pattern(\n gm: GraphModule,\n pattern: Union[Callable, GraphModule],\n replacement: Union[Callable, GraphModule],\n match_filters: List[Callable[[\"InternalMatch\", Graph, Graph], bool]] = None, # type: ignore[name-defined]\n) -> List[Match]:\n match_and_replacements = _replace_pattern(gm, pattern, replacement, match_filters)\n return [\n Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements\n ]", "def _regexify_matching_pattern(rule_pattern: str, wildcard_optional=False) -> str:\n return rule_pattern.replace(\"*\", f\"(.{'+*'[wildcard_optional]})\")", "def generate_regex(self, pattern_to_glob):\n\n # First thing, replace '.' with '\\.' and make sure we're going\n # to match the start and the end of the pattern string.\n pattern_to_glob = re.sub(r\"\\.\", r\"\\.\", pattern_to_glob)\n pattern_to_glob = r\"^\" + pattern_to_glob + r\"$\"\n\n found_values = []\n new_val = pattern_to_glob\n\n # Find all the named attributes\n all_matches = re.findall(r\"%.+?%\", pattern_to_glob)\n\n for match_thing in all_matches:\n name = match_thing[1:-1]\n\n if match_thing not in found_values:\n # Change to a named group.\n group_string = r\"(?P<\" + name + r\">.+?)\"\n new_val = re.sub(match_thing, group_string, new_val, count=1)\n found_values.append(match_thing)\n else:\n # Use the existing group.\n group_string = r\"(?P=\" + name + r\")\"\n new_val = re.sub(match_thing, group_string, new_val, count=1)\n\n return new_val", "def _format_pattern(pattern: str) -> str:\n return pattern.rstrip('*') + '**'", "def _clear_pattern(self):\n # pattern group\n self.__interval = None\n self.__days_of_week = set()\n self.__first_day_of_week = None\n self.__day_of_month = None\n self.__month = None\n self.__index = 'first'\n # range group\n self.__start_date = None\n self.__end_date = None\n self.__occurrences = None", "def __init__(self, regex, groups, nestedPattern = None, ignored = dict()):\r\n self.regex = regex.format(*[x.group() for x in groups])\r\n self.groups = groups\r\n self.ignored = ignored\r\n self.nestedPattern = nestedPattern\r\n self.name = \"_\"\r\n while self.name in self.groups:\r\n self.name += \"_\"", "def parse_pattern(pattern):\n pattern_type, _, group_spec = pattern.partition('(')\n # Groups are separated by '+' in a composite pattern.\n groups = [\n int(group) for group in group_spec[:-1].split('+')\n ] if group_spec else [1]\n\n # Some light lists use dots, some don't, just throw them away\n return pattern_type.lower().replace('.', ''), groups", "def pattern_gen():\n pattern = \"\"\n\n return pattern", "def remove_pattern(self, name):\n self._pattern_reg.__delitem__(name)", "def test_replace_group(self):\n pass", "def test_replace_groups(self):\n pass", "def _get_regex_for_pattern(self, pattern: bytes):\n # TODO: should blacksheep support \":\" in routes (using escape chars)?\n for c in _escaped_chars:\n if c in pattern:\n pattern = pattern.replace(c, b\"\\\\\" + c)\n\n if b\"*\" in pattern:\n # throw exception if a star appears more than once\n if pattern.count(b\"*\") > 1:\n raise RouteException(\n \"A route pattern cannot contain more than one star sign *. \"\n \"Multiple star signs are not supported.\"\n )\n\n if b\"/*\" in pattern:\n pattern = _route_all_rx.sub(br\"?(?P<tail>.*)\", pattern)\n else:\n pattern = _route_all_rx.sub(br\"(?P<tail>.*)\", pattern)\n\n # support for < > patterns, e.g. /api/cats/<cat_id>\n # but also: /api/cats/<int:cat_id> or /api/cats/<uuid:cat_id> for more\n # granular control on the generated pattern\n if b\"<\" in pattern:\n pattern = _angle_bracket_route_param_rx.sub(\n self._handle_rich_parameter, pattern\n )\n\n # support for mustache patterns, e.g. /api/cats/{cat_id}\n # but also: /api/cats/{int:cat_id} or /api/cats/{uuid:cat_id} for more\n # granular control on the generated pattern\n if b\"{\" in pattern:\n pattern = _mustache_route_param_rx.sub(self._handle_rich_parameter, pattern)\n\n # route parameters defined using /:name syntax\n if b\"/:\" in pattern:\n pattern = _route_param_rx.sub(br\"/(?P<\\1>[^\\/]+)\", pattern)\n\n # NB: following code is just to throw user friendly errors;\n # regex would fail anyway, but with a more complex message\n # 'sre_constants.error: redefinition of group name'\n # we only return param names as they are useful for other things\n param_names = []\n for p in _named_group_rx.finditer(pattern):\n param_name = p.group(1)\n if param_name in param_names:\n raise ValueError(\n f\"cannot have multiple parameters with name: \" f\"{param_name}\"\n )\n\n param_names.append(param_name)\n\n if len(pattern) > 1 and not pattern.endswith(b\"*\"):\n # NB: the /? at the end ensures that a route is matched both with\n # a trailing slash or not\n pattern = pattern + b\"/?\"\n return re.compile(b\"^\" + pattern + b\"$\", re.IGNORECASE), param_names", "def _spacyfy(\n matches: List[List[Optional[Tuple[str, str]]]], pattern: List[Dict[str, Any]]\n) -> List[List[Dict[str, Any]]]:\n new_patterns = []\n if matches:\n for match in matches:\n new_pattern = deepcopy(pattern)\n for i, token in enumerate(match):\n if token:\n del new_pattern[i][token[0]]\n new_pattern[i][\"TEXT\"] = token[1]\n new_patterns.append(new_pattern)\n return new_patterns", "def _patternToRegEx(self,pattern):\n if (pattern == \"*\"):\n # special case that matches anything\n regex = \".*?\"\n else:\n regex = pattern\n if (regex.find(\".\") >= 0):\n regex = regex.replace(\".\", \"\\.\")\n #endIf\n \n asteriskIndex = regex.find(\"*\")\n if (asteriskIndex < 0):\n # no wildcard in pattern\n regex = \"%s$\" % regex\n elif (asteriskIndex + 1 != len(regex)):\n raise TraceSpecificationException(\"Invalid entity pattern: %s. A wildcard character may only be used to terminate a pattern.\" % pattern)\n else:\n # remove * and add \".*?\"\n regex = \"%s.*?\" % regex[:-1]\n #endIf\n #endIf\n return regex", "def pack(word, pattern):\n ret = []\n for i, char in enumerate(word):\n if pattern[i]:\n ret.append(char)\n return \"\".join(ret)", "def resolve_pattern(pattern, args):\n if args is None or len(args) == 0:\n return pattern\n elif pattern.find('%') >= 0:\n return pattern % args\n elif pattern.find(\"{\") >= 0:\n # star magic does not work for single args\n return pattern.format(*args)\n else:\n # fixed pattern, no placeholders\n return pattern", "def pattern_to_regex(pattern):\n\n pattern = pattern.replace('.', r'\\.')\n pattern = pattern.replace('?', r'.')\n pattern = pattern.replace('*', r'.*')\n\n if pattern.endswith('/'):\n pattern += r'.*'\n elif pattern.endswith('.*'):\n pattern = pattern[:-2]\n pattern += r'(?!.*?/.*?)'\n\n return pattern", "def replace_redacted(text):\n pat = re.compile(r'\\[\\*\\*(.*?)\\*\\*\\]', re.IGNORECASE)\n \n # replace name types\n text = pat.sub(replace_names, text)\n \n # replace place types\n text = pat.sub(replace_places, text)\n \n # replace person identifier types\n text = pat.sub(replace_identifiers, text) \n \n # replace date types\n text = pat.sub(replace_dates, text)\n \n # replace remaining digits\n text = pat.sub(replace_digits, text)\n return text", "def filteredUrls(pattern, view, kwargs=None, name=None):\n results = [(pattern, view, kwargs, name)]\n tail = ''\n mtail = re.search('(/+\\+?\\\\*?\\??\\$?)$', pattern)\n if mtail:\n tail = mtail.group(1)\n pattern = pattern[:len(pattern) - len(tail)]\n for filter in ('/state/(?P<state>\\w+)',\n '/group/(?P<group>[^/]+)',\n '/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)',\n '/server/(?P<server>[^/]+)/(?P<state>[A-Za-z]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)',\n '/server/(?P<server>[^/]+)/group/(?P<group>[^/]+)/(?P<state>[A-Za-z]+)'):\n results += [(pattern + filter + tail, view, kwargs)]\n return results", "def _group_for(file_path):\n return re.sub(pattern_pair_lane_combo, \"\", os.path.basename(file_path))", "def remove_pattern(file_contents, pattern):\n sub_pattern, replacement = re.compile(pattern), 'CRO_'\n for index, row in enumerate(file_contents):\n file_contents[index][0] = sub_pattern.sub(replacement, row[0])\n return file_contents", "def replace(text,pattern,replace=\"\"):\n\n thisFunc = inspect.currentframe().f_code.co_name\n result = re.sub(pattern,replace,text)\n return result", "def extract_pattern(fmt):\n class FakeDict(object):\n def __init__(self):\n self.seen_keys = set()\n\n def __getitem__(self, key):\n self.seen_keys.add(key)\n return ''\n\n def keys(self):\n return self.seen_keys\n\n fake = FakeDict()\n try:\n fmt % fake\n except TypeError:\n # Formatting error\n pass\n return set(fake.keys())", "def generate(self, name):\n if not '?' in self._pattern and not '[' in self._pattern:\n try:\n return self._pattern.replace('*', '{}').format(name)\n except IndexError:\n # multiple *\n pass\n return ''", "def _create_regex(pattern, ignore_case=False, whole_words=False, literal_pattern=False):\n if literal_pattern:\n pattern = re.escape(pattern)\n if whole_words:\n b = r'\\b' if isinstance(pattern, str) else br'\\b'\n pattern = b + pattern + b\n\n regex = re.compile(pattern, re.I if ignore_case else 0)\n return regex", "def update_word_pattern(word,pattern,letter):\r\n new_pattern = list()\r\n for i in range(len(word)):\r\n if word[i] == letter and pattern[i] == '_':\r\n new_pattern.append(letter)\r\n else:\r\n new_pattern.append(pattern[i])\r\n return_pattern=''.join(new_pattern)\r\n return return_pattern", "def pattern_modifier(pattern, X, y):\n mod_pattern = pattern.reshape(n_channels, n_samples)\n mod_pattern = mod_pattern * kernel[np.newaxis, :]\n return mod_pattern.reshape(pattern.shape)", "def word_pattern():\n return Pattern._nonkey_words() + (Pattern._var() + Pattern._unkey_words()).many() + Pattern._var().possibly()", "def contraction_expansion(text):\n\n global patterns\n for (pattern, repl) in patterns:\n (text, _) = re.subn(pattern, repl, text)\n return text", "def fmt_capture(kwargs: Any, *patterns: Any) -> Any: # type: ignore\n results = [copy_annotations(pattern, _fmt_capture(kwargs, pattern)) for pattern in each_string(*patterns)]\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(results) == 1\n return results[0]\n return results", "def sufix(pattern):\n return pattern[1:len(pattern)]", "def clean(pattern=default, *, module=None):\n pattern = default.unwrap(pattern, current_config[\"clean\"])\n\n if pattern is False:\n return\n\n if module is None:\n import __main__ as module\n\n items = vars(module)\n to_delete = [key for key in items if fnmatch.fnmatchcase(key, pattern)]\n\n for key in to_delete:\n del items[key]", "def format_regexp_matches(name, regexps, unmatched):\n if unmatched:\n err = \"{newline} {name} matched: {matched}\".format(\n newline=os.linesep,\n name=name,\n matched=[\n \"REGEX('{}')\".format(e.pattern)\n for e in regexps\n if e not in unmatched\n ],\n )\n\n err += \"{newline}Unmatched: {unmatched}\".format(\n newline=os.linesep,\n unmatched=[\"REGEX('{}')\".format(e.pattern) for e in unmatched],\n )\n return err\n return \"\"", "def regex_sub(data):\n regexs = regex_lists()\n\n for pair in regexs:\n pattern = pair[0]\n replacement = pair[1]\n data = re.sub(pattern, replacement, data)\n\n # Junk to make still present issues stand out in txt file\n # data = re.sub(r\"\\\\(.*?){\", \"#### REVEALING \\g<1> REVEALING ####\", data)\n return data", "def prepare_linkifier_pattern(source: str) -> str:\n return rf\"\"\"(?P<{BEFORE_CAPTURE_GROUP}>^|\\s|['\"\\(,:<])(?P<{OUTER_CAPTURE_GROUP}>{source})(?P<{AFTER_CAPTURE_GROUP}>$|[^\\pL\\pN])\"\"\"", "def remove_url_patterns(filename, pattern):\n name = get_name_from_filename(filename)\n ext = get_extension_from_filename(filename)\n repl = \" \"\n new_name = re.sub(pattern, repl, name)\n\n return new_name + ext", "def convertPattern(pattern, sign):\r\n\r\n # Check for include vs exclude patterns.\r\n if pattern[:2] == \"+ \":\r\n pattern = pattern[2:]\r\n sign = \"+\"\r\n elif pattern[:2] == \"- \":\r\n pattern = pattern[2:]\r\n sign = \"-\"\r\n\r\n # Express windows, mac patterns in unix patterns (rsync.py extension).\r\n separator = os.path.normpath(\"/\")\r\n if separator != \"/\":\r\n pattern = re.sub(re.escape(separator), \"/\", pattern)\r\n\r\n # If pattern contains '/' it should match from the start.\r\n temp = pattern\r\n if pattern[0] == \"/\":\r\n pattern = pattern[1:]\r\n if temp[-1] == \"/\":\r\n temp = temp[:-1]\r\n\r\n # Convert pattern rules: ** * ? to regexp rules.\r\n pattern = re.escape(pattern)\r\n pattern = string.replace(pattern, \"\\\\*\\\\*\", \".*\")\r\n pattern = string.replace(pattern, \"\\\\*\", \"[^/]*\")\r\n pattern = string.replace(pattern, \"\\\\*\", \".*\")\r\n\r\n if \"/\" in temp:\r\n # If pattern contains '/' it should match from the start.\r\n pattern = \"^\\\\/\" + pattern\r\n else:\r\n # Else the pattern should match the all file or folder name.\r\n pattern = \"\\\\/\" + pattern\r\n\r\n if pattern[-2:] != \"\\\\/\" and pattern[-2:] != \".*\":\r\n # File patterns should match also folders.\r\n pattern = pattern + \"\\\\/?\"\r\n\r\n # Pattern should match till the end.\r\n pattern = pattern + \"$\"\r\n return (sign, pattern)", "def _internal_match(self, pattern):\n compiled_re = re.compile(pattern)\n for word in self.words:\n if compiled_re.fullmatch(word) is not None:\n yield word", "def _modify_entities_of_placeholder_pattern(pattern,mode='append'):\n if mode == 'append':\n for keyword in ['%task%','%session%','%subject%','%run%','%acquisition%']:\n pattern = pattern.replace(keyword,'%entities.'+keyword[1:])\n pattern = pattern.replace('%dataset%','%dataset_description.Name%')\n elif mode == 'cut':\n for keyword in ['%task%','%session%','%subject%','%run%','%acquisition%']:\n pattern = pattern.replace('%entities.'+keyword[1:],keyword)\n pattern = pattern.replace('%dataset_description.Name%','%dataset%')\n return pattern", "def findall(pattern, text):\r\n\tspl = re.compile(pattern).split(text)\r\n\tresult = []\r\n\tbeginTag = \"\"\r\n\tendTag = None\r\n\tbeginFormat = \"\"\r\n\tendFormat = \"\"\r\n\tinitText = text\r\n\tfor s in spl:\r\n\t\ttext = text[len(s)+2:]\r\n\t\tend = text.find(\")s\")\r\n\t\tvar = \"\"\r\n\t\tif len(text) > 0:\r\n\t\t\tvar = text[:end]\r\n\t\t\tresult.append(var)\r\n\t\tif var == \"content\":\r\n\t\t\tbeginTag += s\r\n\t\t\tendTag = \"\"\r\n\t\telif endTag != None:\r\n\t\t\tendTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tendFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tendFormat += \"self.%s,\"%var\r\n\t\t\t\tendTag += \"\\x25s\"\r\n\t\telse:\r\n\t\t\tbeginTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tbeginFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tbeginFormat += \"self.%s,\"%var\r\n\t\t\t\tbeginTag += \"\\x25s\"\r\n\t\ttext = text[end+2:]\r\n\tif endTag == None:\r\n\t\tendTag = \"\"\r\n\t\tendFormat = \"\"\r\n\treturn result, beginTag, endTag, beginFormat, endFormat", "def prepare_regexps(self):\r\n print(\"Preparing regular expressions for this session.\")\r\n privmsg_parse = re.compile(\"\")", "def convert_pattern(pattern, pattern_type=None):\n\tif pattern_type == 'regex':\n\t\treturn re.compile(pattern)\n\telif pattern_type == 'wildcard':\n\t\treturn re.compile(fnmatch.translate(pattern))\n\treturn re.compile(re.escape(pattern))", "def update_word_pattern(word, pattern, letter):\r\n # make pattern string as list for changing object inside it\r\n pattern_list = list(pattern)\r\n # Go through the pattern and reveal the letters.\r\n for i in range(len(word)):\r\n # Check where the letter exist, and reveal it on the pattern.\r\n if word[i] == letter:\r\n pattern_list[i] = letter\r\n # Rejoin the list onto one string\r\n pattern = \"\".join(pattern_list)\r\n return pattern", "def save_pattern(self, pattern: Pattern):", "def save_pattern(self, pattern: Pattern):", "def _compile_fnmatch(pattern: str) -> re.Pattern:\n return re.compile(translate(pattern))", "def filter_group_names(groups, patterns):\n group_names = [g['logGroupName'] for g in groups]\n matched = set()\n for p in patterns:\n matched.update(fnmatch.filter(group_names, p))\n return [g for g in groups if g['logGroupName'] in matched]", "def compile(self, name, pattern):\n try:\n return self.get_pattern(name)\n except KeyError:\n return self.store_pattern(name, re.compile(pattern))", "def conditionalSURLCleanup(pattern, replacement, surl, old_prefix):\n\n if re.search(pattern, surl) and not re.search(pattern, old_prefix):\n return re.sub(pattern, replacement, surl)\n else:\n return surl", "def _remove_regex(regex, text) -> StyledStr:\n text = str(text)\n if NO_COLOR:\n return StyledStr(text)\n return StyledStr(re.sub(regex, \"\", text))", "def figure_id_filter(text):\n def fn(m):\n s = m.group(1) or \"\"\n s += m.group(2)\n body = m.group(4)\n s += replace_body(body, type=m.group(3))\n s += m.group(5) or \"\" # Close parens if any\n return s\n\n return __ref_pattern.sub(fn,text)", "def regex_splitter(input_string: str) -> str:\n #grouping_list: list = re.compile(REGEX_PATTERN).split(input_string)\n compiled_regex = re.compile(REGEX_PATTERN)\n mo = compiled_regex.findall(input_string)\n \n #print(\"Current found matches are:\" + str(mo))\n \n result = evaluator_indefinite(mo)\n \n #print(\"Dictionary of evaluations\" + str(result))\n \n new_string = input_string\n\n for match in mo:\n new_string = new_string.replace(str(match), str(result[match]))\n #print(\"Current string modified with new value: \" + new_string)\n\n return new_string", "def simplify_standard_patterns(function: Function) -> Function:\n BodyPart = Union[Instruction, Label]\n PatternPart = Union[Instruction, Label, None]\n Pattern = List[Tuple[PatternPart, bool]]\n\n def make_pattern(*parts: str) -> Pattern:\n ret: Pattern = []\n for part in parts:\n optional = part.endswith(\"*\")\n part = part.rstrip(\"*\")\n if part == \"?\":\n ret.append((None, optional))\n elif part.endswith(\":\"):\n ret.append((Label(\"\"), optional))\n else:\n ins = parse_instruction(part, InstructionMeta.missing())\n ret.append((ins, optional))\n return ret\n\n div_pattern = make_pattern(\n \"bnez $x, .A\",\n \"?\", # nop or div\n \"break\",\n \".A:\",\n \"li $at, -1\",\n \"bne $x, $at, .B\",\n \"li $at, 0x80000000\",\n \"bne $y, $at, .B\",\n \"nop\",\n \"break\",\n \".B:\",\n )\n\n divu_pattern = make_pattern(\n \"bnez $x, .A\",\n \"nop\",\n \"break\",\n \".A:\",\n )\n\n mod_p2_pattern = make_pattern(\n \"bgez $x, .A\",\n \"andi $y, $x, LIT\",\n \"beqz $y, .A\",\n \"nop\",\n \"addiu $y, $y, LIT\",\n \".A:\",\n )\n\n div_p2_pattern_1 = make_pattern(\n \"bgez $x, .A\",\n \"sra $y, $x, LIT\",\n \"addiu $at, $x, LIT\",\n \"sra $y, $at, LIT\",\n \".A:\",\n )\n\n div_p2_pattern_2 = make_pattern(\n \"bgez $x, .A\",\n \"move $at, $x\",\n \"addiu $at, $x, LIT\",\n \".A:\",\n \"sra $x, $at, LIT\",\n )\n\n div_2_s16_pattern = make_pattern(\n \"sll $x, $x, LIT\",\n \"sra $y, $x, LIT\",\n \"srl $x, $x, 0x1f\",\n \"addu $y, $y, $x\",\n \"sra $y, $y, 1\",\n )\n\n div_2_s32_pattern = make_pattern(\n \"srl $x, $y, 0x1f\",\n \"addu $x, $y, $x\",\n \"sra $x, $x, 1\",\n )\n\n utf_pattern = make_pattern(\n \"bgez $x, .A\",\n \"cvt.s.w\",\n \"li $at, 0x4f800000\",\n \"mtc1\",\n \"nop\",\n \"add.s\",\n \".A:\",\n )\n\n ftu_pattern = make_pattern(\n \"cfc1 $y, $31\",\n \"nop\",\n \"andi\",\n \"andi*\", # (skippable)\n \"?\", # bnez or bneql\n \"?\",\n \"li*\",\n \"mtc1\",\n \"mtc1*\",\n \"li\",\n \"?\", # sub.fmt ?, X, ?\n \"ctc1\",\n \"nop\",\n \"?\", # cvt.w.fmt ?, ?\n \"cfc1\",\n \"nop\",\n \"andi\",\n \"andi*\",\n \"bnez\",\n \"nop\",\n \"mfc1\",\n \"li\",\n \"b\",\n \"or\",\n \".A:\",\n \"b\",\n \"li\",\n \"?\", # label: (moved one step down if bneql)\n \"?\", # mfc1\n \"nop\",\n \"bltz\",\n \"nop\",\n )\n\n lwc1_twice_pattern = make_pattern(\"lwc1\", \"lwc1\")\n swc1_twice_pattern = make_pattern(\"swc1\", \"swc1\")\n\n gcc_sqrt_pattern = make_pattern(\n \"sqrt.s\",\n \"c.eq.s\",\n \"nop\",\n \"bc1t\",\n \"?\",\n \"jal sqrtf\",\n \"nop\",\n )\n\n def matches_pattern(actual: List[BodyPart], pattern: Pattern) -> int:\n symbolic_registers: Dict[str, Register] = {}\n symbolic_labels: Dict[str, str] = {}\n\n def match_one(actual: BodyPart, exp: PatternPart) -> bool:\n if exp is None:\n return True\n if isinstance(exp, Label):\n name = symbolic_labels.get(exp.name)\n return isinstance(actual, Label) and (\n name is None or actual.name == name\n )\n if not isinstance(actual, Instruction):\n return False\n ins = actual\n if ins.mnemonic != exp.mnemonic:\n return False\n if exp.args:\n if len(exp.args) != len(ins.args):\n return False\n for (e, a) in zip(exp.args, ins.args):\n if isinstance(e, AsmLiteral):\n if not isinstance(a, AsmLiteral) or e.value != a.value:\n return False\n elif isinstance(e, Register):\n if not isinstance(a, Register):\n return False\n if len(e.register_name) <= 1:\n if e.register_name not in symbolic_registers:\n symbolic_registers[e.register_name] = a\n elif symbolic_registers[e.register_name] != a:\n return False\n elif e.register_name != a.register_name:\n return False\n elif isinstance(e, AsmGlobalSymbol):\n if e.symbol_name == \"LIT\" and not isinstance(a, AsmLiteral):\n return False\n elif isinstance(e, JumpTarget):\n if not isinstance(a, JumpTarget):\n return False\n if e.target not in symbolic_labels:\n symbolic_labels[e.target] = a.target\n elif symbolic_labels[e.target] != a.target:\n return False\n else:\n assert False, f\"bad pattern part: {exp}\"\n return True\n\n actuali = 0\n for (pat, optional) in pattern:\n if actuali < len(actual) and match_one(actual[actuali], pat):\n actuali += 1\n elif not optional:\n return 0\n return actuali\n\n def create_div_p2(bgez: Instruction, sra: Instruction) -> Instruction:\n assert isinstance(sra.args[2], AsmLiteral)\n shift = sra.args[2].value & 0x1F\n return Instruction.derived(\n \"div.fictive\", [sra.args[0], bgez.args[0], AsmLiteral(2 ** shift)], sra\n )\n\n def try_replace_div(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(div_pattern)]\n if not matches_pattern(actual, div_pattern):\n return None\n return ([actual[1]], i + len(div_pattern) - 1)\n\n def try_replace_divu(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(divu_pattern)]\n if not matches_pattern(actual, divu_pattern):\n return None\n return ([], i + len(divu_pattern) - 1)\n\n def try_replace_div_p2_1(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n # Division by power of two where input reg != output reg\n actual = function.body[i : i + len(div_p2_pattern_1)]\n if not matches_pattern(actual, div_p2_pattern_1):\n return None\n bnez = typing.cast(Instruction, actual[0])\n div = create_div_p2(bnez, typing.cast(Instruction, actual[3]))\n return ([div], i + len(div_p2_pattern_1) - 1)\n\n def try_replace_div_p2_2(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n # Division by power of two where input reg = output reg\n actual = function.body[i : i + len(div_p2_pattern_2)]\n if not matches_pattern(actual, div_p2_pattern_2):\n return None\n bnez = typing.cast(Instruction, actual[0])\n div = create_div_p2(bnez, typing.cast(Instruction, actual[4]))\n return ([div], i + len(div_p2_pattern_2))\n\n def try_replace_div_2_s16(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(div_2_s16_pattern)]\n if not matches_pattern(actual, div_2_s16_pattern):\n return None\n sll1 = typing.cast(Instruction, actual[0])\n sra1 = typing.cast(Instruction, actual[1])\n sra = typing.cast(Instruction, actual[4])\n if sll1.args[2] != sra1.args[2]:\n return None\n div = Instruction.derived(\n \"div.fictive\", [sra.args[0], sra.args[0], AsmLiteral(2)], sra\n )\n return ([sll1, sra1, div], i + len(div_2_s16_pattern))\n\n def try_replace_div_2_s32(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(div_2_s32_pattern)]\n if not matches_pattern(actual, div_2_s32_pattern):\n return None\n addu = typing.cast(Instruction, actual[1])\n sra = typing.cast(Instruction, actual[2])\n div = Instruction.derived(\n \"div.fictive\", [sra.args[0], addu.args[1], AsmLiteral(2)], sra\n )\n return ([div], i + len(div_2_s32_pattern))\n\n def try_replace_mod_p2(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(mod_p2_pattern)]\n if not matches_pattern(actual, mod_p2_pattern):\n return None\n andi = typing.cast(Instruction, actual[1])\n val = (typing.cast(AsmLiteral, andi.args[2]).value & 0xFFFF) + 1\n mod = Instruction.derived(\n \"mod.fictive\", [andi.args[0], andi.args[1], AsmLiteral(val)], andi\n )\n return ([mod], i + len(mod_p2_pattern) - 1)\n\n def try_replace_utf_conv(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(utf_pattern)]\n if not matches_pattern(actual, utf_pattern):\n return None\n cvt_instr = typing.cast(Instruction, actual[1])\n new_instr = Instruction.derived(\"cvt.s.u.fictive\", cvt_instr.args, cvt_instr)\n return ([new_instr], i + len(utf_pattern) - 1)\n\n def try_replace_ftu_conv(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(ftu_pattern)]\n consumed = matches_pattern(actual, ftu_pattern)\n if not consumed:\n return None\n sub = next(\n x\n for x in actual\n if isinstance(x, Instruction) and x.mnemonic.startswith(\"sub\")\n )\n cfc = actual[0]\n assert isinstance(cfc, Instruction)\n fmt = sub.mnemonic.split(\".\")[-1]\n args = [cfc.args[0], sub.args[1]]\n if fmt == \"s\":\n new_instr = Instruction.derived(\"cvt.u.s.fictive\", args, cfc)\n else:\n new_instr = Instruction.derived(\"cvt.u.d.fictive\", args, cfc)\n return ([new_instr], i + consumed)\n\n def try_replace_mips1_double_load_store(\n i: int,\n ) -> Optional[Tuple[List[BodyPart], int]]:\n # TODO: sometimes the instructions aren't consecutive.\n actual = function.body[i : i + 2]\n if not matches_pattern(actual, lwc1_twice_pattern) and not matches_pattern(\n actual, swc1_twice_pattern\n ):\n return None\n a, b = actual\n assert isinstance(a, Instruction)\n assert isinstance(b, Instruction)\n ra, rb = a.args[0], b.args[0]\n ma, mb = a.args[1], b.args[1]\n # TODO: verify that the memory locations are consecutive as well (a bit\n # annoying with macros...)\n if not (\n isinstance(ra, Register)\n and ra.is_float()\n and ra.other_f64_reg() == rb\n and isinstance(ma, AsmAddressMode)\n and isinstance(mb, AsmAddressMode)\n and ma.rhs == mb.rhs\n ):\n return None\n num = int(ra.register_name[1:])\n if num % 2 == 1:\n ra, rb = rb, ra\n ma, mb = mb, ma\n # Store the even-numbered register (ra) into the low address (mb).\n new_args = [ra, mb]\n new_mn = \"ldc1\" if a.mnemonic == \"lwc1\" else \"sdc1\"\n new_instr = Instruction.derived(new_mn, new_args, a)\n return ([new_instr], i + 2)\n\n def try_replace_gcc_sqrt(i: int) -> Optional[Tuple[List[BodyPart], int]]:\n actual = function.body[i : i + len(gcc_sqrt_pattern)]\n consumed = matches_pattern(actual, gcc_sqrt_pattern)\n if not consumed:\n return None\n sqrt = actual[0]\n assert isinstance(sqrt, Instruction)\n new_instr = Instruction.derived(\"sqrt.s\", sqrt.args, sqrt)\n return ([new_instr], i + consumed)\n\n def no_replacement(i: int) -> Tuple[List[BodyPart], int]:\n return ([function.body[i]], i + 1)\n\n new_function = function.bodyless_copy()\n i = 0\n while i < len(function.body):\n repl, i = (\n try_replace_div(i)\n or try_replace_divu(i)\n or try_replace_div_p2_1(i)\n or try_replace_div_p2_2(i)\n or try_replace_div_2_s32(i)\n or try_replace_div_2_s16(i)\n or try_replace_mod_p2(i)\n or try_replace_utf_conv(i)\n or try_replace_ftu_conv(i)\n or try_replace_mips1_double_load_store(i)\n or try_replace_gcc_sqrt(i)\n or no_replacement(i)\n )\n new_function.body.extend(repl)\n return new_function", "def replace(match_obj):\n return match_obj.group(0)[1:]", "def reverseWithMap(pattern, keys):\n return \"\"", "def regexp_replace(\n self, pattern: Any, replacement: Any, flags: Optional[str] = None\n ) -> ColumnOperators:\n return self.operate(\n regexp_replace_op,\n pattern,\n replacement=replacement,\n flags=flags,\n )", "def clean_text(text, pattern):\n \n text = unidecode.unidecode(text)\n text.replace('\\\\n', '')\n text.strip(' \\\\n')\n text = re.sub(pattern, ' ', str(text))\n text = re.sub('(\\\\*n)', ' ', str(text))\n text = re.sub('\\w*\\d\\w*', ' ', str(text))\n text = re.sub(' ', ' ', str(text))\n return text", "def bb_groupname(hit):\n try:\n group = hit.group(1)\n G = Group.objects.get(name=group)\n T = loader.get_template('webview/t/group.html')\n C = Context({'G' : G})\n return T.render(C)\n except:\n # This is normally thrown when the group is invalid. Return the original result,\n # Only we add an icon to indicate an invalid group.\n return '<img src=\"/static/user_error.png\" alt=\"user\" border=\"0\" /> %s' % (group)", "def _clean_name(self, raw_names: pd.Series, abolish_patterns: Iterable[str]) -> pd.Series:\n floating, anchor_start, anchor_end = self._compile_abolish_regexes(abolish_patterns)\n return (raw_names\n .pipe(self._replace_from_pairs, pairs=self.replace_pairs)\n .str.replace(floating, '') # Floating matches might get in the way of anchored matches.\n .str.replace(anchor_start, '')\n .str.replace(anchor_end, ''))", "def _parse_pattern(cls, pattern, default_pattern: str = \"*\") -> Pattern:\n pattern = pattern or default_pattern\n if pattern is None:\n return None\n\n return Pattern(pattern)", "def regex_group_split(file_list, pattern, output=True):\n split_list = list([]) # tuple probz\n\n dicdic ={ \"Jan\":\"01\",\"Feb\":\"02\",\"Mar\":\"03\",\n \"Apr\":\"04\",\"May\":\"05\",\"June\":\"06\",\"Jun\":\"06\",\n \"July\":\"07\",\"Jul\":\"07\",\"Aug\":\"08\",\"Sep\":\"09\",\n \"Oct\":\"10\",\"Nov\":\"11\",\"Dec\":\"12\",\n \"JAN\":\"01\",\"FEB\":\"02\",\"MAR\":\"03\",\n \"APR\":\"04\",\"MAY\":\"05\",\"JUN\":\"06\",\n \"JUL\":\"07\",\"AUG\":\"08\",\"SEP\":\"09\",\n \"OCT\":\"10\",\"NOV\":\"11\",\"DEC\":\"12\"}\n\n for file in file_list:\n split_file = list(re.match(pattern, file).groups())\n #split_list.append(file.replace(\" \", \"\"))\n split_file[0], split_file[1], split_file[2], split_file[3], split_file[4], split_file[5] = \\\n split_file[0] + \" \", split_file[1], split_file[2] + \"-\", split_file[3]+ \"-\", split_file[4], split_file[5]\n split_list.append(\"\".join(split_file))\n \n if (len(split_list) > 0 and output):\n #print colors.RED + '\\033[1m' + \"renames:\" + '\\033[0m'\n for split in split_list:\n print colors.RED + split + colors.ENDC\n\n return split_list", "def match(pattern: List[str], source: List[str]) -> List[str]:\n sind = 0 # current index we are looking at in the source list\n pind = 0 # current index we are looking at in the pattern list\n result: List[str] = [] # to store the substitutions that we will return if matched\n acc = ''\n\n # keep checking as long as we haven't hit the end of both pattern and source\n while sind != len(source) or pind != len(pattern): \n # Your job is to fill out the body fo this loop\n # 1) if we reached the end of the pattern but not source \n if pind == len(pattern):\n return None\n # 2) if the current thing in the pattern is a %\n elif pattern[pind] == '%':\n pind += 1 # moving from % to next word \n while sind != len(source):\n if pind != len(pattern) and pattern[pind] == source[sind]:\n break \n else: \n if acc == \"\": \n acc += source[sind] # if it is the first character do not add a space \n else: \n acc += \" \"\n acc += source[sind]\n sind += 1\n result.append(acc)\n acc = ''\n # 3) if we reached the end of the source but not the pattern\n elif sind == len(source):\n return None \n # 4) if the current thing in the pattern is an _\n elif pattern[pind] == '_':\n result.append(source[sind])\n sind += 1\n pind += 1\n #appending is for lists and adding is for strings\n # 5) if the current thing in the pattern is the same as the current thing \n # in the source\n elif pattern[pind] == source[sind]:\n sind += 1\n pind += 1\n # 6) else : this will happen if none of the other conditions are met\n # it indicates the current thing it pattern doesn't match the current\n # thing in source\n else: \n return None\n return result", "def filter_re_replace(val: AnyStr, pattern: str, repl: str) -> str:\n return re.sub(pattern, repl, str(val))", "def replace_misc(text): \n # replace different types of \"year old\" with \n # matches: y.o., y/o, years old. year old, yearold\n text = re.sub(r'-?\\byears? ?-?old\\b|\\by(?:o|r)*[ ./-]*o(?:ld)?\\b', ' yo', text, flags=re.IGNORECASE)\n\n # Does the same thing as above but copied from https://arxiv.org/abs/1808.02622v1\n text = re.sub(r'(\\d+)\\s*(year\\s*old|y.\\s*o.|yo|year\\s*old|year-old|-year-old|-year old)', r'\\1 yo', text, flags=re.IGNORECASE)\n \n # replaces yr, yr's, yrs with years\n text = re.sub(r'\\byr[\\'s]*\\b', 'years', text, re.IGNORECASE)\n \n # replace Pt and pt with patient, and IN/OUT/OT PT with patient\n # Note: PT also refers to physical therapy and physical therapist\n text = re.sub(r'\\b[P|p]t.?|\\b(IN|OU?T) PT\\b', 'patient ', text)\n\n # replace sex with consistant token\n text = re.sub(r'\\b(gentlman|male|man|m|M)(?!\\S)\\b', 'male', text)\n text = re.sub(r'\\b(female|woman|f|F)(?!\\S)\\b', 'female', text)\n \n # replace time types\n text = re.sub(r'\\d{0,2}:\\d{0,2} \\b[A|P]\\.?M\\.?\\b', replace_time, text, flags=re.IGNORECASE)\n text = re.sub(r'\\[\\*\\*(\\d{2})\\*\\*\\] \\b[a|p].?m.?\\b', replace_time, text, flags=re.IGNORECASE)\n \n # finally remove leftover redacted stuff (mostly empty)\n text = re.sub(r'\\[\\*\\*(.*?)\\*\\*\\]', '', text, flags=re.IGNORECASE)\n\n return text", "def process(patterns, text):\n\n for i, p in enumerate(patterns):\n pattern = _fix_pattern(p)\n\n found = []\n for grammar, replace in pattern:\n\n find_and_replace = create_find_and_replace(grammar, replace)\n results = parse_grammar(find_and_replace, text)\n if not results:\n break\n else:\n found.append(len(results))\n text = _transform_results(results, text)\n\n if found:\n log.info('=> pattern {} found {} time(s) in {} pass(es)'\n .format(i + 1, sum(found), len(found)))\n else:\n log.info('__ pattern {} not found'\n .format(i + 1))\n\n return text", "def main(pattern, i=stdin, o=stdout, only_matching=False):\n sre = re.compile(pattern)\n with i as i, o as o:\n for line in i:\n match = sre.match(line)\n if match:\n if only_matching:\n o.write(\"%s\\n\" % match.groups()[0])\n else:\n o.write(line)", "def test_post_build_processing_remove_occurrences(self):\n it = [\n \"[[Chapter]] Chapter I\",\n \"This is chapter I text\",\n \"[[Article]] Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Article'],\n 'patterns': ['Chapter', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n descriptor = extend_internal_patterns(descriptor)\n descriptor = compile_patterns(descriptor)\n\n doc = post_build_process(doc, descriptor)\n\n result = [n['text'] for _, n in doc.traverse()]\n expected = [[], [\"Chapter I\", \"This is chapter I text\"], [\"Article I\", \"This is article I text\"]]\n self.assertListEqual(result, expected)", "def format_regex_string(self, name, value):\n\n if value is not None and isinstance(value, str):\n string = '%s = %s\\n' % (name, self.simple_format_string(value, force_raw=True))\n else:\n string = '%s = None\\n' % name\n return string", "def __init__(self, pattern):\n self._pattern = re.compile(pattern)", "def replace_none(ret_st, pattern):\n curr_none = [i for i in range(len(fk_array)) if ret_st[i] == 'unmapped-none']\n while curr_none:\n temp_curr_none = curr_none[:MGET_CHUNK]\n curr_none = curr_none[MGET_CHUNK:]\n vals_array = rdb.mget([pattern.format(str(fk_array[i]).upper(), taxid, hint)\n for i in temp_curr_none])\n for i, val in zip(temp_curr_none, vals_array):\n if val is None:\n continue\n ret_st[i] = val.decode()", "def _wildcardformat(regxval):\n if regxval == None:\n return None\n else:\n try:\n return regxval.replace(\"*\",\"%\").replace(\"?\",\"_\")\n except AttributeError:\n return regxval", "def get_pattern(topic):\n variants = get_variants(topic)\n sub_patterns = [r'(.*\\b)%s\\b(.*)' % variant.lower() for variant in variants]\n return re.compile(r'|'.join(sub_patterns), flags=re.IGNORECASE)", "def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:", "def bb_group(hit):\n try:\n groupid = hit.group(1)\n group = Group.objects.get(id=groupid)\n T = loader.get_template('webview/t/group.html')\n C = Context({'G' : group})\n return T.render(C)\n except:\n return \"[group]%s[/group]\" % groupid", "def to_pattern(obj):\n if isinstance(obj, Pattern):\n return obj\n return Glob(str(obj))", "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "def re_sub(pattern, replacement, txt):\n # for some reason the java security managar does not accept module 're'\n # return re.sub(\"[ \\\\n]+\", \" \", txt)\n pattern = regex.Pattern.compile(pattern)\n matcher = pattern.matcher(txt)\n return matcher.replaceAll(replacement)", "def filter_re_search(val, pattern: str):\n if not isinstance(val, str):\n return val\n result = re.search(pattern, val, re.IGNORECASE)\n if result:\n return result.group(0)\n return ''", "def parse_pattern(s: str) -> str:\n # Escape regex metacharacters\n for c in [\"\\\\\", \".\", \"(\", \")\", \"[\", \"]\", \"^\", \"$\", \"*\", \"+\", \"?\", \"|\"]:\n s = s.replace(c, \"\\\\\" + c)\n\n s = re.sub(\"~+\", \".*\", s)\n s = \"^\" + s + \"$\"\n return s", "def remove(data, pattern):\n return [''.join(filter(pattern, str)) for str in data]", "def safe_host_pattern(host):\n return \"(?i)%s://%s%s(#%s)?\" % \\\n (_safe_scheme_pattern, host, _safe_path_pattern, _safe_fragment_pattern)", "def message_to_pattern(loc, msg):\n\n # Somewhat-simplified match for a %-template\n template_re = re.compile(\n '%(?P<flag>[-#0 +]*)'\n '(?P<width>(?:[0-9]*|\\*))?'\n '(?P<precision>\\.*(?:[1-9][0-9]*|\\*))?'\n '(?:hh|h|ll|l|L|j|z|t)?'\n '(?P<conversion>[a-zA-Z%])')\n global is_regex\n is_regex = False\n\n def expand_template(match):\n global is_regex\n c = match.group('conversion').lower()\n if c == '%':\n return '%'\n is_regex = True\n if c in ['d', 'i', 'u']:\n return '[0-9]+'\n elif c == 'o':\n return '[0-7]+'\n elif c == 'x':\n return '[0-9a-f]+'\n elif c == 'p':\n return '(0x[0-9a-f]+|nil)'\n elif c == 's':\n return '.*'\n else:\n warn(loc, \"Unknown template conversion '%s'\" % match.group(0))\n return '.*'\n\n escape_re = re.compile(r'\\\\(?P<char>.)', re.DOTALL)\n def expand_escape(match):\n global is_regex\n c = match.group('char')\n if c == 'n':\n return '\\n'\n elif c in ['\\\\', '\"']:\n return c\n else:\n warn(loc, \"Unhandled escape sequence '%s'\" % match.group(0))\n is_regex = True\n return '.'\n\n pattern = template_re.sub(expand_template, msg)\n pattern = escape_re.sub(expand_escape, pattern)\n pattern = pattern.strip()\n\n compare_mode = \"string\"\n if is_regex:\n compare_mode = \"regex\"\n\n return (compare_mode, pattern)", "def convertPattern(self,pattern):\n images.convertSameFldImages(pattern,self.pathDir,self.img_destDir)\n return True", "def translate(self, pattern):\n\n if not pattern:\n return re.compile('')\n\n # Express windows, mac patterns in unix patterns.\n pattern = os.path.normcase(pattern).replace(os.sep, \"/\")\n\n # If pattern contains '/' it should match from the start.\n temp = pattern\n if pattern[0] == \"/\":\n pattern = pattern[1:]\n if temp[-1] == \"/\":\n temp = temp[:-1]\n\n # Convert pattern rules: ** * ? to regexp rules.\n pattern = re.escape(pattern)\n pattern = pattern.replace(\"\\\\?\", \"[^/]\")\n pattern = pattern.replace(\"\\\\*\\\\*\", \".*\")\n pattern = pattern.replace(\"\\\\*\", \"[^/]*\")\n pattern = pattern.replace(\"\\\\*\", \".*\")\n\n if \"/\" in temp:\n # If pattern contains '/' it should match from the start.\n pattern = \"^\\\\/\" + pattern\n else:\n # Else the pattern should match the all file or folder name.\n pattern = \"\\\\/\" + pattern\n\n if pattern[-2:] == \"\\\\/\":\n # Folder patterns should match also files (MP specific).\n pattern = pattern + \".*\"\n\n # (MP: not used because it is file-based)\n #if pattern[-2:] != \"\\\\/\" and pattern[-2:] != \".*\":\n # File patterns should match also folders.\n #pattern = pattern + \"\\\\/?\"\n\n # Pattern should match till the end.\n pattern = pattern + \"$\"\n return re.compile(pattern, re.S)" ]
[ "0.680426", "0.6300317", "0.60647875", "0.58528376", "0.5731375", "0.56507546", "0.56190664", "0.5457598", "0.5445742", "0.5427602", "0.5402431", "0.5378804", "0.531599", "0.52673435", "0.5245449", "0.5193543", "0.5173778", "0.51385576", "0.50884974", "0.50674146", "0.50603205", "0.50077075", "0.49998522", "0.49897292", "0.49650615", "0.4925082", "0.49202916", "0.49052197", "0.48967075", "0.48746672", "0.48220533", "0.48025268", "0.47972918", "0.47933084", "0.47904631", "0.47802708", "0.47722992", "0.47127", "0.47014403", "0.46702558", "0.46678823", "0.46563777", "0.4646428", "0.4625267", "0.45843107", "0.45649686", "0.45611244", "0.45607454", "0.45550483", "0.45509812", "0.454013", "0.45366603", "0.45280126", "0.45235604", "0.4508458", "0.45074794", "0.45071018", "0.45062706", "0.45043102", "0.4493181", "0.4493181", "0.44897026", "0.44869205", "0.44780457", "0.44699252", "0.44670293", "0.4464491", "0.44640884", "0.44612893", "0.44586852", "0.44446862", "0.44360712", "0.44184193", "0.44117263", "0.44062436", "0.4401387", "0.43887782", "0.43843216", "0.43686938", "0.43519068", "0.4349515", "0.4339596", "0.43358415", "0.43294424", "0.43289253", "0.43272638", "0.43198323", "0.43140262", "0.43110782", "0.43003127", "0.4296376", "0.42955014", "0.42943925", "0.42912048", "0.42845416", "0.42734852", "0.42715812", "0.4270759", "0.42696044", "0.4262204" ]
0.73646694
0
r""" Clean up urlpattern regexes into something more readable by humans. For example, turn "^(?P\w+)/athletes/(?P\w+)/$" into "//athletes//".
def simplify_regex(pattern): pattern = replace_named_groups(pattern) pattern = replace_unnamed_groups(pattern) # clean up any outstanding regex-y characters. pattern = pattern.replace('^', '').replace('$', '') if not pattern.startswith('/'): pattern = '/' + pattern return pattern
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self, sub):\n sub = re.sub(r'^RT[\\s]+', '', sub)\n sub = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', sub)\n sub = re.sub(r'#', '', sub)\n sub = re.sub(r'@[A-Za-z0–9]+', '', sub) \n\n return sub", "def url_removal(text):\n return re.sub(r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]\\\n {2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]\\\n +|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''', '', text)", "def cleanUrl(url):\n\turl_clean = url.replace(' ','%20')\n\t\"\"\" add /index.html where necessary \"\"\"\n\tif (url[-1:]=='/'):\n\t\turl_clean += 'index.html'\n\telif (url[-5:].find('.') == -1):\n\t\t url_clean += '/index.html'\n\treturn url_clean", "def generate_clean_url(self):\n\n\t\tspaces_replaced = self.title.replace(' ', '-')\n\t\tpattern = re.compile('[^a-zA-Z0-9-]+')\n\t\tstripped = pattern.sub('', spaces_replaced)\n\t\tself.cleanurl = '-'.join([str(self.pid), stripped.lower()])", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def clean_url(url):\n for noisy_url in noisy_urls:\n url = str(url).replace(noisy_url,\"\").lower()\n return url", "def clean_urls(self, tweet):\n self.urls = re.findall(self.regexpForURLs, tweet)\n\n for url in self.urls:\n tweet = tweet.replace(url, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def __clean_url(links_titles):\n clean_urls = []\n for url, title, flag in links_titles:\n duplicates_words = []\n unique_words = []\n for word in str(url).rstrip('/').split('/'):\n if word not in unique_words:\n unique_words.append(word)\n else:\n if word not in duplicates_words:\n duplicates_words.append(word)\n url = str(url).replace(word+'/', '', 1)\n clean_urls.append((url, title, flag))\n return clean_urls", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def remove_urls(self, text):\n return re.sub(r'http.?://[^\\s]+[\\s]?', '', text)", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def clean_text(text):\n cleanedup = text.lower()\n return re.sub(\"(@[A-Za-z0-9]+)|(#[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", cleanedup)", "def test_non_ideal_inputs():\n assert normalize_url(\"example.com\") == \"http://example.com/\"\n assert normalize_url(\"example.com/abc\") == \"http://example.com/abc\"\n assert normalize_url(\"//example.com/abc\") == \"http://example.com/abc\"", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def url_fix_common_typos(url):\n if url.startswith(\"http//\"):\n url = \"http://\" + url[6:]\n elif url.startswith(\"https//\"):\n url = \"https://\" + url[7:]\n return url", "def normalize_for_url(text: str) -> str:\n\n # German is our main language, so we are extra considerate about it\n # (unidecode turns ü into u)\n text = text.replace(\"ü\", \"ue\")\n text = text.replace(\"ä\", \"ae\")\n text = text.replace(\"ö\", \"oe\")\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n\n return clean", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def sanitize_urls(self, urls):\n\n absolute_and_relative = [item for item in urls if item.startswith('http') or item.startswith(r'/')]\n clean_links = []\n\n # Make relative urls absolute\n for item in absolute_and_relative:\n if item.startswith(r'/'):\n item = urljoin(self.site_url, item)\n clean_links.append(item)\n\n else:\n clean_links.append(item)\n\n clean_links = [link for link in clean_links if 'localhost' not in urlparse(item).hostname]\n return clean_links", "def clean_url(url):\n\n if url is None:\n return None\n\n if '??' in url:\n url = url.split('??')[0]\n\n if url.endswith('?'):\n url = url[:-1]\n\n if '`' in url:\n url = url.replace('`', '')\n\n return url", "def process_url(url):\n # only get url path, remove host,params.\n url = urlparse(url).path\n # url = list(url)\n # for i in range(len(url)):\n # if _is_punctuation(url[i]):\n # url[i] = \" \"\n # url = ''.join(url)\n # url = ' '.join(url.split())\n return url", "def remove_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', '', text)\n return text", "def _remove_urls(text: str) -> str:\n pattern = r'(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?'\n\n return re.sub(pattern, '', text, flags=re.MULTILINE)", "def obfuscate_url(url: str) -> str:\n return re.sub(r\"\\/\\/.*:.*@\", \"//***:***@\", url)", "def __init__(self, url_pattern):\n self._url_regex = re.compile(r'^%s$' % re.escape(url_pattern).replace('_', '([^/]+)'))", "def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)", "def sanitize_url(urlstring):\n\n # A blog's url is the best unique identifier for the data store\n # (some Twitter handles have more than one blog), but certain\n # punctuation in a string throws an error in Firebase when\n # you attempt to use that string as a key.\n return annoying_punctuation.sub('', urlstring)", "def normalize_url(self, url):\n pass", "def remove_urls(text):\n pass", "def remove_urls(self, doc):\n doc = re.sub(\n r'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)'\n r'(?:[^\\s()<>]+|\\(([^\\s()<>]+|'\n r'(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|'\n r'[^\\s`!()\\[\\]{};:\\'\".,<>]))',\n '',\n doc)\n return ' '.join(doc.split())", "def url_clean(path):\n return path[path.find('/'+settings.URL_ADMIN_SEP):]", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def clean_tweet(tweet): \n #Remove URL\n tweet = re.sub('\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', ' ', tweet) \n #Remove user\n tweet = re.sub('@[^\\s]+','',tweet)\n #Replace #word with word\n tweet = re.sub(r'#([^\\s]+)', ' ', tweet) \n return tweet", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def remove_url_patterns(filename, pattern):\n name = get_name_from_filename(filename)\n ext = get_extension_from_filename(filename)\n repl = \" \"\n new_name = re.sub(pattern, repl, name)\n\n return new_name + ext", "def url(regex, view):\n return RegexPattern(regex, view)", "def replace_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', 'URL', text)\n return text", "def clean_content(content):\n content = content.strip()\n valid_words = content.split()\n valid_words = [word for word in valid_words if not word_is_url(word)]\n return \" \".join(valid_words)", "def clean_url(url):\r\n s = url\r\n url = url.encode('utf8')\r\n url = ''.join([urllib.quote(c) if ord(c) >= 127 else c for c in url])\r\n return url", "def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url", "def reformat_weburl(s):\n s = (s or '').strip()\n\n if s and '.' in s and 'notavailable' not in s:\n match = re.match(r'^http(s)?://', s)\n if not match:\n s = 'http://' + s\n\n return s", "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "def clean_blog_url(raw_url):\n # Example urls that need handling:\n # http://jessicaanner.tumblr.com/post/113520547711/animated-versions-here-main-view-webm-gif\n # http://havesomemoore.tumblr.com/\n # http://pwnypony.com/\n # (?:https?://)([^#/'\"]+)\n stripped_url = raw_url.strip(\"\\r\\n\\t \")\n logging.debug(\"stripped_url: \"+repr(stripped_url))\n blog_url_regex = \"\"\"(?:https?://)?([^#/'\"]+)\"\"\"\n blog_url_search = re.search(blog_url_regex, stripped_url, re.IGNORECASE)\n if blog_url_search:\n blog_url = blog_url_search.group(1)\n return blog_url\n else:\n logging.error(\"Can't parse list item! Skipping it.\")\n logging.error(\"clean_blog_url()\"+\" \"+\"raw_url\"+\": \"+repr(raw_url))\n return \"\"", "def _remove_urls(self, text: str) -> str:\n pattern = r\"http\\S+\"\n return re.sub(pattern, \" \", str(text))", "def clean_url_path(markup):\n\n soup = BeautifulSoup(markup, \"html.parser\")\n elements = soup.find_all('a')\n\n for url in elements:\n url_href = url.get('href')\n if url.string:\n url_string = url.string.replace('\\n', '').replace(' ', '')\n\n # Only clean links where the URL matches the string, without custom text inside.\n if url_string == url_href:\n url_parse = urllib.parse.urlparse(url_href)\n path = '{0}{1}'.format(url_parse.netloc.replace(\"www.\", \"\"), url_parse.path)\n url.string.replace_with(path)\n return soup.prettify(soup.original_encoding)", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def clean_tweet(tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())", "def clean_url(self):\n url = self.cleaned_data['url']\n\n if url:\n pattern = re.compile(r'https?://(www\\.)?instagr(\\.am|am\\.com)/p/\\S+')\n if not pattern.match(url):\n raise forms.ValidationError('Please provide a valid instagram link.')\n\n return url", "def fixURLS():\n url_re = re.compile(r'http t co \\S+')\n tweets = Tweet.objects.all()\n for tweet in tweets:\n tweet.text = url_re.sub(' ', tweet.text)\n tweet.text = ' '.join(tweet.text.split())\n tweet.save()", "def clean_link(self, url: str) -> str:\n return self.CLEAN_REGEX.sub(lambda match: f\"%{ord(match.group(0)):02x}\", url)", "def remove_url(tweet):\n return re.sub(r\"http\\S+\", \"URL\", tweet)", "def fix_url(cls, url: str):\r\n ...", "def _prefix_only_url_replace_regex(prefix):\r\n return ur\"\"\"\r\n (?x) # flags=re.VERBOSE\r\n (?P<quote>\\\\?['\"]) # the opening quotes\r\n (?P<prefix>{prefix}) # the prefix\r\n (?P<rest>.*?) # everything else in the url\r\n (?P=quote) # the first matching closing quote\r\n \"\"\".format(prefix=re.escape(prefix))", "def preprocess(tweet): \n #Remove URL\n tweet = re.sub('\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', tweet) \n #Remove user\n tweet = re.sub('@[^\\s]+','',tweet)\n #Remove not alphanumeric symbols white spaces\n tweet = re.sub(r'[^\\w]', ' ', tweet)\n #Replace #word with word\n tweet = re.sub(r'#([^\\s]+)', ' ', tweet) \n tweet = re.sub('[:;>?<=*+()/,\\-#!$%\\{˜|\\}\\[^_\\\\@\\]1234567890’‘]',' ', tweet)\n #Remove :( or :)\n tweet = tweet.replace(':)','')\n tweet = tweet.replace(':(','') \n #Remove additional white spaces\n tweet = re.sub('[\\s]+', ' ', tweet)\n tweet = re.sub('[\\n]+', ' ', tweet)\n return tweet", "def test_drop_trailing_questionmark():\n assert normalize_url(\"http://example.com/?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/a?\") == \"http://example.com/a\"\n assert normalize_url(\"http://example.com/a/?\") == \"http://example.com/a\"", "def safe_host_pattern(host):\n return \"(?i)%s://%s%s(#%s)?\" % \\\n (_safe_scheme_pattern, host, _safe_path_pattern, _safe_fragment_pattern)", "def normalize(cls, url):\n # Always ignore the fragment\n scheme, netloc, path, query, _ = urlsplit(url)\n uri_relative = (None, None, path, query, None)\n uri_without_query = (scheme, netloc, path, None, None)\n uri_relative_without_query = (None, None, path, None, None)\n urls = [url]\n if query:\n urls.append(urlunsplit(uri_without_query))\n urls.append('~' + urlunsplit(uri_relative))\n if query:\n urls.append('~' + urlunsplit(uri_relative_without_query))\n return urls", "def _sanitize_url_prefix(url_prefix: Optional[str]) -> str:\n if not url_prefix:\n return ''\n\n while url_prefix.startswith('//'):\n url_prefix = url_prefix[1:]\n while url_prefix.endswith('/'):\n url_prefix = url_prefix[:-1]\n\n if url_prefix == '':\n return ''\n\n if url_prefix.startswith('/') \\\n or url_prefix.startswith('http://') \\\n or url_prefix.startswith('https://'):\n return url_prefix\n\n return '/' + url_prefix", "def urlunsplit(urlparts):\n res = urlparse.urlunsplit(urlparts)\n if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]:\n # UNC paths must have 4 slashes: 'file:////server/path'\n # Depending on the path in urlparts[2], urlparse.urlunsplit()\n # left only two or three slashes. This is fixed below\n repl = 'file://' if urlparts[2].startswith('//') else 'file:/'\n res = res.replace('file:', repl)\n return res", "def clean_url(url):\n return url[:url.find('?')]", "def remove_url(text):\r\n url = re.sub('https?://[A-Za-z0-9./]+', '', text)\r\n return url", "def cleanmatomo_url(self):\n self.matomo_url = re.sub(r\"/\\/$/\", \"\", self.matomo_url) # Cuts \"/\"\n\n if re.match(r\"^http://\", self.matomo_url): # replace it to \"https://\"\n self.matomo_url = re.sub(\"^http://\", \"\", self.matomo_url)\n self.matomo_url = self.protocol + self.matomo_url\n elif not bool(re.match(\"^https://\", self.matomo_url)): # check for \"https://\" and set it\n self.matomo_url = self.protocol + self.matomo_url", "def _prefix_and_category_url_replace_regex(prefix):\r\n return ur\"\"\"\r\n (?x) # flags=re.VERBOSE\r\n (?P<quote>\\\\?['\"]) # the opening quotes\r\n (?P<prefix>{prefix}) # the prefix\r\n (?P<category>[^/]+)/\r\n (?P<rest>.*?) # everything else in the url\r\n (?P=quote) # the first matching closing quote\r\n \"\"\".format(prefix=re.escape(prefix))", "def clean_tweet(tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+://\\S+)\", \" \", tweet).split())", "def _compile_audience_pattern(self, pattern):\n re_pattern = fnmatch.translate(pattern)\n if \"://\" not in pattern:\n re_pattern = \"[a-z]+://\" + re_pattern\n return re.compile(re_pattern)", "def _fix_url(url):\n\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url", "def clean_url_part(self):\n complete_url = \"{url_prefix}{url_part}\".format(\n url_prefix=self.URL_PREFIX,\n url_part=self.cleaned_data['url_part']\n )\n URLValidator(complete_url)\n self.cleaned_data['repo_url'] = complete_url\n return self.cleaned_data['url_part']", "def clean_tweet(tweet):\n\n pattern = r'http\\S+|pic.\\S+|@[a-zA-Z0-9_]+|#[a-zA-Z0-9_]+|[‘’“”’–—…]|\\xa0'\n return re.sub(pattern, '', tweet)", "def clean_string(string):\n # unCharSet = {'\\\\': ' ', \"'\": '', '(': ' ', ')': ' ',\n # '.': ' ', ',': ' ', '/': ' ', '&': ' and '}\n unCharSet = {'\\\\': ' ', \"'\": '', '(': ' ', ')': ' ',\n '.': ' ', ',': ' ', '&': ' and ',\n ';': '', ')': '', '(': '', '}': '',\n '{': '', ']': '', '[': '', '/': ' ',\n '\\\\': '', '>': '', '<': '', '=': '',\n '|': '', '%': '', '\\'': '', '*': ''}\n\n if string is None:\n return None\n\n for key, value in unCharSet.items():\n string = string.replace(key, value)\n\n \"\"\"\n most character to be replaced is straight forward\n except '/'\n some words have '/' to short the long word\n e.g. south can be written as s/th\n but sometimes we have 2/49 which means\n unit 2 and street number 49\n we want to only handle the second case\n another e.g. is 1051a/b high st\n \"\"\"\n o_string = string.split()\n \"\"\"\n below code is moved to addressparser\n \"\"\"\n # for idx, word in enumerate(o_string):\n # if '/' in word:\n # pos = word.index('/')\n # \"\"\"\n # sometimes we have string ending with '/'\n # for e.g.\n # 1st floor cnr north pde/\n # \"\"\"\n # if pos > 0:\n # leftchar = word[:pos][-1]\n # try:\n # rightchar = word[pos + 1:][0]\n # except:\n # rightchar = ''\n # if leftchar.isdigit() or rightchar.isdigit() or \\\n # remove_ordinal(word[:pos]).isdigit():\n # o_string[idx] = word.replace('/', ' ')\n\n o_string = \" \".join(o_string)\n return o_string", "def __sanitize(name):\n if name[-1] == \"/\":\n return name[:-1]\n return name", "def _remove_urls(self, doc: str):\n processed_tweet = re.sub('(https?:)?\\/\\/[\\w\\.\\/-]+', '', doc)\n return processed_tweet", "def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)", "def removeURL(text):\n text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','',text)\n text = re.sub(r'#([^\\s]+)', r'\\1', text)\n return text", "def basic_cleaning2(string):\n\n string = string.lower()\n string = re.sub('[0-9\\(\\)\\!\\^\\%\\$\\'\\\"\\.;,-\\?\\{\\}\\[\\]\\\\/]', ' ', string)\n string = re.sub(' +', ' ', string)\n return string", "def preprocess(data):\n\n #remove urls and convert to lowercase\n #used this thread for help on urls: https://stackoverflow.com/questions/11331982/how-to-remove-any-url-within-a-string-in-python\n remove_url = [re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', x) for x in data]\n lower=[x.lower() for x in remove_url]\n\n #remove all non alphanumeric chars and empty strings\n return filter(None, [re.sub(r'\\W','',x) for x in lower])", "def remove_url(text):\n return re.sub(r'http\\S+', ' ', text)", "def transform_url_without_encode(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return result", "def make_safe_url(self, url):\n\n # Split the URL into scheme, netloc, path, query and fragment\n parts = list(urlsplit(url))\n\n # Clear scheme and netloc and rebuild URL\n parts[0] = '' # Empty scheme\n parts[1] = '' # Empty netloc (hostname:port)\n safe_url = urlunsplit(parts)\n return safe_url", "def fixup_parameters(url, backend):\n result = url\n if backend == \"django\":\n result = url.replace(\"{\", \"(?P<\").replace(\"}\", \">.+)\")\n\n return result", "def test_non_urls():\n assert normalize_url(\"\") is None\n assert normalize_url(\"abc xyz\") is None\n assert normalize_url(\"asb#abc\") is None\n assert normalize_url(\"Яндекс.рф\") is not None\n assert normalize_url(\"google.blog\") is not None\n assert normalize_url(\"http//google.com\") is None\n assert normalize_url(\"http://user@pass:example.com\") is None", "def url_fix(s, charset='utf-8'):\n if isinstance(s, unicode):\n s = s.encode(charset, 'ignore')\n scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)\n path = urllib.quote(path, '/%')\n qs = urllib.quote_plus(qs, ':&=')\n return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))", "def clean_tweet(self, tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w +://\\S +)\", \" \", tweet).split())", "def clean_tweet(self, tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())", "def clean_html_url(url):\n\n if type(url) is str:\n url = url.replace(\"&amp;\", \"&\")\n url = url.replace(\"%3d\", \"=\")\n url = url.replace(\"%2f\", \"/\")\n url = url.replace(\"%3f\", \"?\")\n url = url.replace(\"%26\", \"&\") \n return url\n elif type(url) is list:\n for item in url:\n if not type(item) == str:\n return None\n url[url.index(item)] = item.replace(\"&amp;\", \"&\")\n url[url.index(item)] = item.replace(\"%3d\", \"=\")\n url[url.index(item)] = item.replace(\"%2f\", \"/\")\n url[url.index(item)] = item.replace(\"%3f\", \"?\")\n url[url.index(item)] = item.replace(\"%26\", \"&\")\n return url\n else:\n return None", "def filter_url_parse_full_links(match):\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation", "def replace_urls_token(text):\n\n text = re.sub(r\"^https?://.*[\\r\\n]*\", \"<url/>\", text, re.M | re.I)\n return re.sub(r\"http\\S+(\\s)*(\\w+\\.\\w+)*\", \"<url/>\", text, re.M | re.I)", "def RegexSafe(regex):\n regex = regex.lower()\n regex = regex.replace('.', '\\.')\n regex = regex.replace('-', '\\-')\n # This should never happen but best to be careful.\n regex = regex.replace('||', '|')\n return regex", "def sanitize_url(url: str) -> Optional[str]:\n try:\n parts = urllib.parse.urlparse(url.replace(\" \", \"%20\"))\n scheme, netloc, path, params, query, fragment = parts\n except ValueError:\n # Bad URL - so bad it couldn't be parsed.\n return \"\"\n\n # If there is no scheme or netloc and there is a '@' in the path,\n # treat it as a mailto: and set the appropriate scheme\n if scheme == \"\" and netloc == \"\" and \"@\" in path:\n scheme = \"mailto\"\n elif scheme == \"\" and netloc == \"\" and len(path) > 0 and path[0] == \"/\":\n # Allow domain-relative links\n return urllib.parse.urlunparse((\"\", \"\", path, params, query, fragment))\n elif (scheme, netloc, path, params, query) == (\"\", \"\", \"\", \"\", \"\") and len(fragment) > 0:\n # Allow fragment links\n return urllib.parse.urlunparse((\"\", \"\", \"\", \"\", \"\", fragment))\n\n # Zulip modification: If scheme is not specified, assume http://\n # We re-enter sanitize_url because netloc etc. need to be re-parsed.\n if not scheme:\n return sanitize_url(\"http://\" + url)\n\n # Upstream code will accept a URL like javascript://foo because it\n # appears to have a netloc. Additionally there are plenty of other\n # schemes that do weird things like launch external programs. To be\n # on the safe side, we allow a fixed set of schemes.\n if scheme not in allowed_schemes:\n return None\n\n # Upstream code scans path, parameters, and query for colon characters\n # because\n #\n # some aliases [for javascript:] will appear to urllib.parse to have\n # no scheme. On top of that relative links (i.e.: \"foo/bar.html\")\n # have no scheme.\n #\n # We already converted an empty scheme to http:// above, so we skip\n # the colon check, which would also forbid a lot of legitimate URLs.\n\n # URL passes all tests. Return URL as-is.\n return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))", "def _create_regexes():\n space = r'(?:[^\\S\\n]|&nbsp;|&\\#0*160;|&\\#[Xx]0*[Aa]0;)'\n spaces = r'{space}+'.format(space=space)\n space_dash = r'(?:-|{space})'.format(space=space)\n tags = [\n 'gallery',\n 'math',\n 'nowiki',\n 'pre',\n 'score',\n 'source',\n 'syntaxhighlight',\n ]\n # Based on pywikibot.textlib.compileLinkR\n # and https://gist.github.com/gruber/249502\n url = r'''(?:[a-z][\\w-]+://[^\\]\\s<>\"]*[^\\]\\s\\.:;,<>\"\\|\\)`!{}'?«»“”‘’])'''\n _regexes.update(\n {\n 'bare_url': re.compile(r'\\b({})'.format(url), flags=re.I),\n 'bracket_url': re.compile(\n r'(\\[{}[^\\]]*\\])'.format(url), flags=re.I\n ),\n 'ISBN': re.compile(\n r'\\bISBN(?P<separator>{spaces})(?P<value>(?:97[89]{space_dash}'\n r'?)?(?:[0-9]{space_dash}?){{9}}[0-9Xx])\\b'.format(\n spaces=spaces, space_dash=space_dash\n )\n ),\n 'PMID': re.compile(\n r'\\bPMID(?P<separator>{spaces})(?P<value>[0-9]+)\\b'.format(\n spaces=spaces\n )\n ),\n 'RFC': re.compile(\n r'\\bRFC(?P<separator>{spaces})(?P<value>[0-9]+)\\b'.format(\n spaces=spaces\n )\n ),\n 'tags': re.compile(\n r'''(<\\/?\\w+(?:\\s+\\w+(?:\\s*=\\s*(?:(?:\"[^\"]*\")|(?:'[^']*')|'''\n r'''[^>\\s]+))?)*\\s*\\/?>)'''\n ),\n 'tags_content': re.compile(\n r'(<(?P<tag>{})\\b.*?</(?P=tag)>)'.format(r'|'.join(tags)),\n flags=re.I | re.M,\n ),\n }\n )", "def clean_url(url: str, keys: List[str]) -> str:\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n\n for key in keys:\n query.pop(key, None)\n\n u = u._replace(query=urlencode(query, True))\n \n return urlunparse(u)", "def url_at_remove(text):\n text = re.sub(r'#\\w+|@\\w+',' ',text)\n # Remove url:\n return(re.sub(r'\\bhttps?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE))" ]
[ "0.6629905", "0.6508251", "0.638263", "0.63161266", "0.6314307", "0.6314307", "0.6314307", "0.6314307", "0.6314307", "0.6314307", "0.6314307", "0.63040775", "0.62951714", "0.6283508", "0.62794656", "0.62432885", "0.62392426", "0.6236246", "0.6177149", "0.6127283", "0.60598266", "0.60408044", "0.60407734", "0.60128367", "0.60020727", "0.60019433", "0.59634906", "0.59612113", "0.59502524", "0.59176743", "0.5908699", "0.5905467", "0.5891363", "0.58605665", "0.5857726", "0.585562", "0.5846207", "0.58433986", "0.58333075", "0.5812609", "0.58043987", "0.57872915", "0.57479405", "0.5738853", "0.5723928", "0.57231337", "0.5711101", "0.57005465", "0.56988496", "0.56963784", "0.56742156", "0.5669284", "0.56450427", "0.5640517", "0.56388503", "0.5626446", "0.5620792", "0.5620714", "0.5616369", "0.56075484", "0.56012785", "0.55911535", "0.55779827", "0.55713123", "0.55675143", "0.55556214", "0.5547402", "0.5547017", "0.5536766", "0.55365676", "0.55342776", "0.5534016", "0.5531009", "0.5512588", "0.5511894", "0.55106443", "0.5509036", "0.5497897", "0.5491231", "0.54899925", "0.54868114", "0.54840446", "0.5483195", "0.54821366", "0.5476123", "0.5474692", "0.54696435", "0.5466546", "0.54640394", "0.5454875", "0.54532915", "0.54499835", "0.54454035", "0.5443367", "0.5438772", "0.5437676", "0.54281175", "0.5426857", "0.54222614", "0.5394875" ]
0.5762543
42
return a list of manifests that are applied on this machine
def fact(): manifests = [x for x in os.walk(manifests_dir)] return { 'manifests': manifests }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def available_manifest_strands(self):\n return self._available_manifest_strands", "def list_manifests():\n import enaml\n with enaml.imports():\n from .pulses.manifest import PulsesManagerManifest\n from .tasks.manifest import PulsesTasksManifest\n from .measure.manifest import PulsesMeasureManifest\n return [PulsesManagerManifest, PulsesTasksManifest, PulsesMeasureManifest]", "def get_app_manifests(self,sfilter = None):\n if sfilter:\n try:\n return filter(lambda app: app[\"developer\"] == sfilter[\"developer\"] and\n app[\"name\"] == sfilter[\"name\"] and\n app[\"version\"] == sfilter[\"version\"], self.app_manifests)\n except:\n return []\n else :\n return self.app_manifests", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def get_manifests(arcroot):\n manifests = []\n for root, dirs, files in os.walk(arcroot):\n if 'manifest.json' in files:\n manifests.append(os.path.join(root, 'manifest.json'))\n \n return manifests", "def applications(self):\n return [self.app] + self.mounts.values()", "def get_applications(self):\n applications = []\n\n # Isolate all of the bnodes referring to target applications\n for target_app in self.get_objects(None,\n self.uri('targetApplication')):\n applications.append({\n 'guid': self.get_object(target_app, self.uri('id')),\n 'min_version': self.get_object(target_app,\n self.uri('minVersion')),\n 'max_version': self.get_object(target_app,\n self.uri('maxVersion'))})\n return applications", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def getAssemblies(pth):\n if pth.lower().endswith(\".manifest\"):\n return []\n # check for manifest file\n manifestnm = pth + \".manifest\"\n if os.path.isfile(manifestnm):\n with open(manifestnm, \"rb\") as fd:\n res = {RT_MANIFEST: {1: {0: fd.read()}}}\n else:\n # check the binary for embedded manifest\n try:\n res = GetManifestResources(pth)\n except winresource.pywintypes.error as exc:\n if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT:\n logger.info('Cannot get manifest resource from non-PE '\n 'file %s', pth)\n return []\n raise\n rv = []\n if RT_MANIFEST in res and len(res[RT_MANIFEST]):\n for name in res[RT_MANIFEST]:\n for language in res[RT_MANIFEST][name]:\n # check the manifest for dependent assemblies\n try:\n manifest = Manifest()\n manifest.filename = \":\".join([pth, str(RT_MANIFEST),\n str(name), str(language)])\n manifest.parse_string(res[RT_MANIFEST][name][language],\n False)\n except Exception as exc:\n logger.error(\"Can not parse manifest resource %s, %s\"\n \" from %s\", name, language, pth, exc_info=1)\n else:\n if manifest.dependentAssemblies:\n logger.debug(\"Dependent assemblies of %s:\", pth)\n logger.debug(\", \".join([assembly.getid()\n for assembly in\n manifest.dependentAssemblies]))\n rv.extend(manifest.dependentAssemblies)\n return rv", "def adaptation_sets(self):\n # type: () -> list[DefaultManifestCondition]\n return self._adaptation_sets", "def get_all_apps(self):\n return list(self.apps.values())", "def get_manifest(self):\n return self.manifest", "def get_last_manifest(self):\n\n try:\n doc = self.db.get_document('last-manifest')\n except cbdatabase_db.NotFoundError:\n return []\n else:\n return [doc['latest_sha']] if 'latest_sha' in doc else []", "def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)", "def load_app_manifests(self):\n self.app_manifests = []\n apps_lib_path = os.path.join(self.apps_dir_path, \"lib\")\n for app_dir in os.listdir(apps_lib_path):\n if app_dir not in (\"__init__.py\", \"__init__.pyc\"):\n if app_dir.find(\"_v\") > 1:\n app_name = app_dir[:app_dir.find(\"_v\")]\n self.app_manifests.append(json.load(file(os.path.join(self.apps_dir_path, 'lib', app_dir, \"manifest.json\"))))\n log.info(\"Manifest for %s app was loaded\" % (app_dir))\n else:\n log.info(\"Directory %s will be skipped from app loader . Doesn't match naming convention .\" % app_dir)", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def get_app_list(self):\n\n return self._get().keys()", "def get_queryset(self):\n return Manifest.objects.filter(pid=self.kwargs['pid'])", "def manifest(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")", "def manifest_dict(self):\n return self._parsed", "def manifest_dict(self):\n return self._parsed", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def applications(self) -> List[ApplicationRequestResponse]:\n return self._applications", "def get_destiny_manifest(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Manifest/\"))", "def get_all(self):\n return objects.registry.AssemblyList.get_all(self.context)", "def fetch_manifest(self):\n manifest = self.open(self.urls['manifest'])\n return manifest.read()", "async def get_installed_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_INSTALLEDAPPS, params=params)", "def build_manifest(self):\n return self._build_manifest", "def read_manifest_xml(cls, document):\n manifest = []\n with zipfile.ZipFile(document, 'a') as open_document:\n for line in open_document.open(DOCUMENT_MANIFEST_PATH):\n manifest.append(line.decode('utf-8'))\n return manifest", "def child_manifests(self, content_retriever):\n return None", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def list_installed(self) -> Generator[Path, None, None]:\n LOGGER.verbose(\"checking %s for Terraform versions...\", self.versions_dir)\n return self.versions_dir.rglob(\"*.*.*\")", "def list_envs(self):\n if self.hdfs:\n files = self.hdfs.ls(self.hdfs_home + '/.knitDeps/', True)\n return [f for f in files if f['name'].endswith('.zip')]\n else:\n raise ImportError('Set the `hdfs` attribute to be able to list'\n 'environments.')", "def get_local_app_list():\n\t\tapp_list = [\n\t\t\t{\n\t\t\t\t'name': app,\n\t\t\t\t'dir': os.path.dirname(os.path.abspath(import_module(app).__file__)),\n\t\t\t}\n\t\t\tfor app in settings.INSTALLED_APPS\n\t\t]\n\t\treturn [app for app in app_list if settings.BASE_DIR in app['dir']]", "def get_all_playbooks(self):\n return list(set(key.playbook for key in self.workflows.keys()))", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def apps(self):\n return list(self.ctx.keys())", "def listpacks(all: bool=False) -> [str, str]:\n\t# execute command\n\tcommand = ['pip', 'freeze']\n\tif all:\n\t\tcommand.append('--all')\n\tproc = subprocess.Popen(command, stdout=subprocess.PIPE)\n\tproc.wait()\n\n\t# process returned data\n\tlines = proc.stdout.read().decode('utf8')\n\tlines = list(\n\t\tfilter(\n\t\t\tlambda inf: inf[0] and inf[0].split(' ')[0].lower() != '-e',\n\t\t\tmap(\n\t\t\t\tlambda inf: list(map(\n\t\t\t\t\tlambda x: x.lower().strip(),\n\t\t\t\t\tinf.split('==')\n\t\t\t\t\t)),\n\t\t\t\tlines.split('\\n')\n\t\t\t)\n\t\t)\n\t)\n\n\treturn lines", "def get_installed_apps():\n installed_apps = []\n checked = set()\n for app in settings.INSTALLED_APPS:\n if not app.startswith('django.') and not app in checked:\n mod = import_module(app)\n checked.add(app)\n if exists(mod.__file__) and isdir(dirname(mod.__file__)):\n appdir = dirname(mod.__file__)\n installed_apps.append((appdir, mod, mod.__name__.split('.')[-1]))\n return installed_apps", "def get_apps(self):\n return self.apps", "def get_applications(status):\n return status['applications']", "def is_manifest_list(self):\n return False", "def deployments(self) -> List[Dict]:\n return [\n {\n 'name': self.name,\n 'head_host': self.head_host,\n 'head_port': self.head_port,\n }\n ]", "def check_manifests(self):\n # Fetch image repositories\n repos = self.fetch_repos()\n\n # Create an empty dataframe\n df = pd.DataFrame(columns=[\"image_name\", \"age_days\", \"size_gb\"])\n\n # Loop over the repositories\n logging.info(\"Checking repository manifests\")\n for repo in repos:\n # Get the manifest for the current repository\n logging.info(\"Pulling manifests for: %s\" % repo)\n show_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"show-manifests\",\n \"-n\",\n self.name,\n \"--repository\",\n repo,\n ]\n\n result = run_cmd(show_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n logging.info(\"Successfully pulled manifests\")\n outputs = (\n result[\"output\"]\n .replace(\"\\n\", \"\")\n .replace(\" \", \"\")[1:-1]\n .split(\"},\")\n )\n logging.info(\n \"Total number of manifests in %s: %d\" % (repo, len(outputs))\n )\n\n # Loop over the manifests for each repository\n for j, output in enumerate(outputs):\n if j < (len(outputs) - 1):\n output += \"}\"\n\n # Convert the manifest to a dict and extract timestamp\n manifest = json.loads(output)\n timestamp = pd.to_datetime(manifest[\"timestamp\"]).tz_localize(\n None\n )\n\n # Get time difference between now and the manifest timestamp\n diff = (pd.Timestamp.now() - timestamp).days\n logging.info(\n \"%s@%s is %d days old.\" % (repo, manifest[\"digest\"], diff)\n )\n\n # Check the size of each image\n image_size_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"show\",\n \"-n\",\n self.name,\n \"--image\",\n f\"{repo}@{manifest['digest']}\",\n \"--query\",\n \"imageSize\",\n \"-o\",\n \"tsv\",\n ]\n\n result = run_cmd(image_size_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n image_size = int(result[\"output\"]) * 1.0e-9\n\n # Append to dataframe\n df = df.append(\n {\n \"image_name\": f\"{repo}@{manifest['digest']}\",\n \"age_days\": diff,\n \"size_gb\": image_size,\n },\n ignore_index=True,\n )\n\n return df", "def manifest(self):\n yield self._meta\n for dir_key, meta in self._walk_dir_meta():\n yield {'logical_key': dir_key, 'meta': meta}\n for logical_key, entry in self.walk():\n yield {'logical_key': logical_key, **entry.as_dict()}", "def external_archives(self):\n return self._external_archives", "def get_manifest_and_flags(self, manifest_id):\n if manifest_id == mercurial.node.nullid:\n return {}, {}\n revid = self._lookup_revision_by_manifest_id(manifest_id)\n return self.get_manifest_and_flags_by_revid(revid)", "def applications(self):\r\n return applications.Applications(self)", "def get_middlewares(self) -> List:\n\n raise NotImplementedError()", "def list_compute_packages(self):\n return set(self.compute_packages.keys())", "def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al", "def parse_manifest(self):\n import json\n import struct\n\n num = self.selection\n try:\n manifest = json.loads(self.cryptomattes[num].get(\"manifest\", \"{}\"))\n except:\n manifest = {}\n from_names = {}\n from_ids = {}\n\n unpacker = struct.Struct('=f')\n packer = struct.Struct(\"=I\")\n for name, value in manifest.iteritems():\n packed = packer.pack(int(value,16))\n packed = packed = '\\0' * (4 - len(packed)) + packed\n id_float = unpacker.unpack( packed )[0]\n name_str = str(name)\n from_names[name_str] = id_float\n from_ids[id_float] = name_str\n\n self.cryptomattes[num][\"names_to_IDs\"] = from_names\n self.cryptomattes[num][\"ids_to_names\"] = from_ids\n\n global g_cryptomatte_manf_from_names\n global g_cryptomatte_manf_from_IDs\n g_cryptomatte_manf_from_names = from_names\n g_cryptomatte_manf_from_IDs = from_ids\n\n return from_names", "def get_applications(site) -> list:\n collection = site.Collection\n result = []\n for i in range(collection.Count):\n prop = collection[i].Properties\n result.append(SiteApplication(\n prop[\"path\"].Value,\n prop[\"applicationPool\"].Value\n ))\n\n return result", "def do_get_repository_manifests(cs, args):\n resp, data = cs.repositories.get_manifests(args.repository, args.tag)\n utils.print_dict(data)", "def get_sync_masters(self):\n ret = []\n for i in self.all_instances:\n if i.instance_type == InstanceType.SYNCMASTER:\n ret.append(i)\n return ret", "def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages", "def get_manifest_packages(cache=False,\n manifest_url=None,\n output_dir='.',\n output_filename='manifest.pkgs.txt'):\n output = os.path.join(output_dir, output_filename)\n cmd = ('''(wget -qO - %r || cat %r)'''\n ''' | awk '{ print $1 }' '''\n ''' | sort -u > %r''' % (manifest_url, manifest_url, output))\n ensure_file(cmd, output, shell=True, overwrite=not(cache))\n manifest = list(read_lines(output))\n return manifest", "def find_fluxcd_manifests_directory(path, name):\n def _is_fluxcd_app_compliant(path):\n \"\"\"Check if the directory has the desired FluxCD app structure\"\"\"\n mandatory_components = (\"base\", constants.APP_ROOT_KUSTOMIZE_FILE)\n check_mandatory = all(comp in os.listdir(path)\n for comp in mandatory_components)\n return check_mandatory\n\n mfiles = []\n manifest_dir_abs = os.path.join(path, constants.APP_FLUXCD_MANIFEST_DIR)\n if os.path.isdir(manifest_dir_abs) and \\\n _is_fluxcd_app_compliant(manifest_dir_abs):\n mfiles.append(((\"{}-{}\".format(name, constants.APP_FLUXCD_MANIFEST_DIR)),\n manifest_dir_abs))\n return mfiles", "def manifest(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")", "def services():\n return list(set(chain(*restart_map().values())))", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def get_installed_versions(cls) -> list[str]:\n\n pyenv_root = os.getenv(\"PYENV_ROOT\")\n if pyenv_root is None:\n raise Failure(\"PYENV_ROOT is not configured\")\n\n root_dir = Path(pyenv_root)\n version_dir = root_dir / \"versions\"\n\n return [i.name for i in version_dir.iterdir() if i.is_dir()]", "def deploy_environments(self):\n for key in self.deploy:\n yield key", "def required_processes(self):\n return {name for name, flag in self._required_processes.items() if flag}", "def get_package_lists(manifest_url=MANIFEST_URL, cache=False, output_dir=None):\n\n installed = get_installed_packages(cache=cache,\n output_dir=output_dir)\n manifest = get_manifest_packages(manifest_url=manifest_url,\n cache=cache,\n output_dir=output_dir)\n\n return installed, manifest", "async def running(self) -> list[dict[str, Any]]:\n data = await self.controller.request(\"get\", \"watering/program\")\n return cast(list[dict[str, Any]], data[\"programs\"])", "def remove_source_paths(cls, manifest_xml_content):\n manifest = []\n for line in manifest_xml_content:\n if 'Scripts' not in line:\n manifest.append(line)\n return manifest", "def list_themes(self):\n return sorted(self.themes.values(), key=attrgetter('identifier'))", "def get_allref(self):\n return self.__applicationList.keys()", "def get_all_versions(cls) -> list[str]:\n\n s = run([cls.command, \"install\", \"-l\"])\n versions = s.split()\n\n return list(filter(cls._is_python_version, versions))", "def get_possible_absentees(self) -> List[QualifiedAgent]:\n wum: WorklistUpdateManagerApi = self._service_provider.get_service(WorklistUpdateManagerApi)\n return self._rem_iter_handler.consume(\n wum.get_possible_absentees(),\n \"agents\",\n PossAbsRemoteIteratorApi,\n PossAbsRemoteIteratorApi.poss_abs_get_next,\n )", "def packages(self):\n return []", "def load_model_manifest(rel_path=\"model_manifest.json\"):\n manifest = []\n manifest_path = \"{}/{}\".format(Path(__file__).parents[1], rel_path)\n if path.exists(manifest_path):\n with open(manifest_path) as json_file:\n manifest = json.load(json_file)\n return manifest", "def find_all_activities(manifest_path):\n tree = ET.parse(manifest_path)\n root = tree.getroot()\n\n package = root.attrib['package']\n\n application = [child for child in root if child.tag == 'application']\n activities = [x for x in application[0] if x.tag == 'activity']\n\n result = []\n\n for activity in activities:\n is_launcher_activity = False\n\n intent_filters = [x for x in activity if x.tag == 'intent-filter']\n for intent_filter in intent_filters:\n if intent_filter.attrib != {}:\n for category in intent_filter:\n if category.tag == 'category':\n if ('{http://schemas.android.com/apk/res/android}name' in category.attrib\n and category.attrib['{http://schemas.android.com/apk/res/android}name'] == \"android.intent.category.LAUNCHER\"):\n is_launcher_activity = True\n\n result.append( (get_activity_name(activity), is_launcher_activity) )\n return result", "def list_():\n ret = []\n states_path = _states_path()\n if not os.path.isdir(states_path):\n return ret\n\n for state in os.listdir(states_path):\n if state.endswith((\"-pkgs.yml\", \"-reps.yml\")):\n # Remove the suffix - both files start with the freezer\n # name\n ret.append(state[:-9])\n return sorted(set(ret))", "def get_manifest(self):\n url = f'samples/{self.uuid}/manifest'\n return self.knex.get(url)", "def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())", "def capabilities(self):\n return []", "def get_manifest(self):\r\n if os.path.exists(self.manifestfile):\r\n return Manifest(json.loads(file(self.manifestfile).read()))\r\n return Manifest({})", "def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())", "def get(self):\n deploys = get_heroku_deployments('SERVER GOES HERE!')\n write_msg(deploys)\n return deploys", "def get_all_environments():\n return ENVIRONMENTS", "def findAttrPresets(node):\n \n attrList = []\n \n ntype = cmds.nodeType( node )\n ppath = cmds.internalVar( userPrefDir = True )\n ppath = ppath.replace('prefs','presets/attrPresets')\n ppath = ppath + ntype + \"/\"\n \n if os.path.exists(ppath):\n attrListT = os.listdir(ppath)\n for attr in attrListT:\n attrList.append(attr.replace('.mel',''))\n \n return attrList", "def get_applied_adaptations_for_workflow(workflow_name):\n all_applied = get_applied_adaptations_by_workflows()\n if workflow_name not in all_applied:\n return []\n else:\n return all_applied[workflow_name]", "def all():\n return MIGRATIONS.values()", "def firmwares(self):\n return FirmwareCollection(client=self)", "def get_all_setups_roots():\n ta_roots = cmds.ls(\"*.{}\".format(CONFIG[\"config_attr\"]), r=True, o=True)\n return ta_roots", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def get_request_candidates(self):\n return os.listdir(self.cache_dir_)", "def read_manifest(self): # -> None:\n ...", "def get_resources(self):\n return []", "def listPlugins():\n plugins = []\n for dist in pkg_resources.working_set:\n if dist.project_name.startswith('fsleyes-plugin-'):\n plugins.append(dist.project_name)\n return list(sorted(plugins))", "def get_programs(self):\n return self._programs", "def available_shells(self):\n return list(iterkeys(self._shells))", "def resources(self) -> pulumi.Output[Sequence['outputs.MachineExtensionResponse']]:\n return pulumi.get(self, \"resources\")", "def get_applied_migrations(self):\n with self.internal_db.begin() as conn:\n sql = \"SELECT name from migration;\"\n migrations = conn.execute(sql).fetchall()\n return [m[0] for m in migrations]", "def _buildmanifest(self):\n\n man = self._parents[0].manifest().copy()\n copied = self._repo.dirstate.copies()\n is_exec = util.execfunc(self._repo.root,\n lambda p: man.execf(copied.get(p,p)))\n is_link = util.linkfunc(self._repo.root,\n lambda p: man.linkf(copied.get(p,p)))\n modified, added, removed, deleted, unknown = self._status[:5]\n for i, l in ((\"a\", added), (\"m\", modified), (\"u\", unknown)):\n for f in l:\n man[f] = man.get(copied.get(f, f), nullid) + i\n try:\n man.set(f, is_exec(f), is_link(f))\n except OSError:\n pass\n\n for f in deleted + removed:\n if f in man:\n del man[f]\n\n self._manifest = man", "def get_package_resources(self):\n resources = []\n for pkg in self.packages:\n resource_data = self.get_resource_data()\n resources.extend(pkg.get_resources(resource_data))\n return resources", "def get_instance_essentials(self):\n ret = []\n for instance in self.all_instances:\n ret.append(instance.get_essentials())\n return ret", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"" ]
[ "0.7041786", "0.6661976", "0.65691805", "0.62815154", "0.62224513", "0.6135718", "0.60951227", "0.60727566", "0.5942932", "0.58993065", "0.588448", "0.58641076", "0.58577496", "0.5830976", "0.58182544", "0.58155537", "0.58051246", "0.5784909", "0.5783124", "0.57371914", "0.57371914", "0.5672125", "0.5656068", "0.5637391", "0.56110626", "0.55923384", "0.55891746", "0.5563083", "0.5524952", "0.55092067", "0.5499222", "0.5495533", "0.54884106", "0.5479506", "0.546607", "0.5462994", "0.5445362", "0.54156154", "0.5412924", "0.53885466", "0.53884375", "0.5384109", "0.5380429", "0.537286", "0.5368483", "0.5360973", "0.53443027", "0.53400177", "0.53347516", "0.5332528", "0.533039", "0.53287387", "0.53239244", "0.5317823", "0.5314675", "0.53101057", "0.53047585", "0.529555", "0.529524", "0.52881163", "0.5284775", "0.52754474", "0.5272948", "0.5266615", "0.5265854", "0.52636766", "0.526095", "0.5260829", "0.5256532", "0.52550316", "0.52522206", "0.525142", "0.52491164", "0.52455395", "0.523238", "0.5225628", "0.52223474", "0.5218467", "0.5215555", "0.5215123", "0.52129346", "0.5194788", "0.51905966", "0.51885164", "0.5186543", "0.5183387", "0.51812047", "0.5179448", "0.51723045", "0.51676655", "0.51659614", "0.5165285", "0.51607084", "0.51566607", "0.5155378", "0.5145443", "0.5137355", "0.5137004", "0.51365787", "0.51312417" ]
0.5902238
9
Avoid using literal_eval for simple addition expressions. Returns sum of all positive numbers.
def split_and_sum(expression): split_vals = expression.split('+') float_vals = [float(v) for v in split_vals] total = sum([v for v in float_vals if v > 0.0]) return total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_sum(parse_result):\r\n total = 0.0\r\n current_op = operator.add\r\n for token in parse_result:\r\n if token == '+':\r\n current_op = operator.add\r\n elif token == '-':\r\n current_op = operator.sub\r\n else:\r\n total = current_op(total, token)\r\n return total", "def clean_eval(exp):\n\n # Split expression using '+' as our split token\n number_string = exp.split(\"+\")\n total = int()\n\n # Cost each number string to int, cleaning up leading zeros, then total\n for num in number_string:\n total += int(num)\n\n return total", "def sum(*args):\n result = 0\n for i in args:\n result += i\n return result", "def add(numbers):\n sum1 = 0\n for i in numbers:\n sum1 = sum1+i\n\n return sum1", "def SUM(strArg, composList, atomDict):\n accum = 0.0\n for atom, num in composList:\n tStr = strArg.replace('DEADBEEF', atom)\n accum = accum + eval(tStr) * num\n return accum", "def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)", "def sum_plus(t, init):\n total = init\n for x in t:\n total += x\n return total", "def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")", "def calc_sum(a, b, c, d, e):\n return (a + b + c + d + e)", "def add(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n summation = str(ft.reduce(oper.add,values))\n return summation", "def sum_values(values):\n return (sum(values))", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def add(self, *args):\n sum = 0\n for arg in args:\n sum += float(arg)\n return sum", "def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)", "def lsum (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s", "def sum(*args):\n return reduce(lambda x, y: x + y, args)", "def add(*args):\n body = ['<h1>Addition Calculator</h1>']\n _sum = sum(map(int, args))\n body.append(f'Total equals: {_sum}')\n return '\\n'.join(body)", "def test_sum(self):\r\n # Use 'x' as the first term (instead of, say, '1'), so it can't be\r\n # interpreted as a negative number.\r\n self.assertEquals(\r\n preview.latex_preview('-x+2-3+4', variables=['x']),\r\n '-x+2-3+4'\r\n )", "def sum(*nums): \n s=0\n for num in nums:\n s += num\n return s", "def add(numbers):\n result = 0\n last_two = len(numbers) - 2\n\n # finds new line at the end of the string, returns -1 if there is none\n if numbers.find(\"\\n\", last_two) is not -1:\n raise ValueError('Newline present at the end of the string')\n else:\n extracted_numbers = re.findall('[-\\d]+', numbers)\n converted_to_int = list(map(int, extracted_numbers))\n for i in converted_to_int:\n if i < 0:\n raise ValueError('Negative(s) in %s not allowed' % converted_to_int)\n elif 0 <= i <= 1000:\n result += i\n return result", "def add(numbers):\n \n result = numbers[0]\n for n in numbers[1:]:\n result = n + result\n return result", "def _sum_sequence(seq):\n\n def _add(x, y): return x + y\n\n return reduce(_add, seq, 0)", "def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)", "def SUM(*args):\n return _group_function(lambda x, y: x + y, *args)", "def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s", "def sum_numbers(numbers):\n sum = 0\n for number in numbers:\n sum += number\n\n return sum", "def math_add():\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(add(a, b))", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def add(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n sum = str(args[0] + args[1])\n return sum", "def add(num1, num2):\n\n sums = num1 + num2\n return sums", "def test_evaluate_add_expression(self):\n value = self.evaluate_common(\"2M add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n value = self.evaluate_common(\"2D add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2F add 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2 add 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n try:\n value = self.evaluate_common(\"2 add '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"2 add null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")", "def summed(L):\r\n result = 0\r\n for e in L:\r\n result = result + e # or result += e\r\n return result", "def add(summand_a: int, summand_b: int) -> int:\n return summand_a + summand_b\n # x = 'this is very very very very long script that has no obvious reason to exist'", "def decimal_sum(*args):\n res = 0\n for numb in args:\n try:\n res += Decimal(str(numb))\n except:\n print(f\"Argument [ {numb} ] is skipped... not a float\")\n return res", "def zsum(s, *args, **kwargs):\n return 0 if s.empty else s.sum(*args, **kwargs)", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def MySum( l ):\n\n #checking if arg is a list\n if isinstance(l, list):\n\n #adding all numbs\n result = 0\n\n for numb in l:\n result = result + int(numb)\n\n return result\n\n else:\n return \"Argument is not a list\"", "def sum_numbers(op_sum):\n collection = []\n for op in op_sum.operators:\n # Strip numeric coefficient off\n collected = False\n num = 1\n n_removed = 0\n op = collect_numbers(op)\n if op.tensors[0].name == \"$number\":\n num = op.tensors[0].content\n new_op = Operator(op.tensors[1:])\n else:\n num = 1\n new_op = op\n\n # Sum the numbers of equal operators\n for i, (collected_op, collected_num) in enumerate(collection):\n if collected_op == new_op:\n collected = True\n collection[i] = (new_op, num + collected_num)\n break\n if not collected:\n collection.append((new_op, num))\n return [(o, num) for o, num in collection if abs(num) > 10**(-10)]", "def summation(self):\n return sum(self.read_ints())", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def smart_add(*args):\n result = 0\n for item in args:\n result += item\n\n return result", "def add(x, y):\n sum = 0\n sum = x + y\n return sum", "def sum_nums(n1=0, n2=0):\n return n1 + n2", "def parse_substract_and_sum(numbers, operators):\n while len(numbers) > 1:\n if operators[0] == \"+\": \n result = calc.sum(numbers[0], numbers[1])\n elif operators[0] == \"-\":\n result = calc.substract(numbers[0], numbers[1])\n operators, numbers = change_list_elements(operators, numbers, result, 0)\n return operators, numbers", "def sum_integer(n):\n return sum([int(elem) for elem in str(n)])", "def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def sum_all_element(my_list):\n result = 0\n for i in range(len(my_list)):\n result = result + int(my_list[i])\n return result", "def evaluate(self, seq, begin, end, *args):\n return reduce(operator.add, args, [])", "def sum(self, a, b):\n return int(a) + int(b)", "def ll_sum(x):\n xlist = []\n for i in x:\n for num in i:\n xlist.append(num)\n return sum(xlist)", "def _add_op(value, sample_args, rationals_allowed):\n entropy, sample_args = sample_args.peel()\n if rationals_allowed and sample_args.count >= 3:\n x = number.integer_or_rational(entropy, True)\n else:\n x = number.integer(entropy, True)\n if random.choice([False, True]):\n op_args = [x, value - x]\n else:\n op_args = [value - x, x]\n return ops.Add, op_args, sample_args", "def sum_numbers(numbers=None):\n if numbers is None:\n return sum(range(1, 101))\n else:\n return sum(numbers)", "def test02_unary_math_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n n = number(20)\n n += number(10)\n n -= number(10)\n n *= number(10)\n n /= number(2)\n assert n == number(100)\n\n nn = -n;\n assert nn == number(-100)", "def add(farg, *args): # *args can take 0 or more values\n print('Formal argument = ', farg)\n \n sum = 0 \n for i in args:\n sum+=i\n print('Sum of all numbers =', (farg+sum))", "def add(*args):\n\n result = int(args[0]) + int(args[1])\n\n return str(result)", "def sum(lst):\n total = 0\n for i in lst:\n total += i\n return total", "def kkAdd(*args):\n if (None in args):\n return None\n total = 0\n for arg in args:\n total += arg\n return total", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def addition(x, y):\n\n if isinstance(x, int) and isinstance(y, int):\n return x + y\n else:\n return (\"Invalid type.\")", "def ll_sum(some_list):\n #This function will return total value of all integers combinded.\n result = 0\n if type(some_list) == list: #Check the element is list or not?\n for i in range(len(some_list)):\n result += ll_sum(some_list[i]) # if it's a list call this function \n #so it will call over and over untill it found element that not a list.\n elif type(some_list) == float or type(some_list) == int: #if it's not list return it value.\n result += some_list\n return result", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def _eval(self, v):\n if v.dtype == np.complex64 or v.dtype == np.complex128:\n return ne.evaluate('sum(real(v * conj(v)))')\n else:\n return ne.evaluate('sum(v * v)')", "def test_unary_op_support():\n check_peval_expression(\"+(2)\", {}, \"2\", fully_evaluated=True, expected_value=2)\n check_peval_expression(\"-(-3)\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression_bool(\"not 0\", {}, True)\n check_peval_expression(\"~(-4)\", {}, \"3\", fully_evaluated=True, expected_value=3)", "def sum_num(a, b):\n return a + b", "def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)", "def addition_homework(data: Iterator[str]) -> int:\n n = final_sum(data)\n return n.magnitude", "def add_numbers(a,b):\r\n return a+ b", "def suma(x, y):\n return x + y", "def evaluate(expr: str) -> int:\n output = []\n operators = []\n for token in expr.replace(\" \", \"\"):\n if token.isdigit(): # we assume that there isn't any number > 9 in expr\n output.append(int(token))\n elif token == \"(\":\n operators.append(\"(\")\n elif token in [\")\", \"+\", \"*\"]:\n while operators and operators[-1] != \"(\":\n op = operators.pop()\n if op == \"+\":\n output.append(output.pop() + output.pop())\n elif op == \"*\":\n output.append(output.pop() * output.pop())\n if token != \")\":\n operators.append(token)\n elif operators:\n operators.pop()\n\n while operators:\n op = operators.pop()\n if op == \"+\":\n output.append(output.pop() + output.pop())\n elif op == \"*\":\n output.append(output.pop() * output.pop())\n\n return output[0]", "def sum(num1, num2):\n return num1 + num2", "def minSums(numbers, num_sum):\n\n def generate_permutations(numbers):\n \"\"\" [string,] Given a string of numbers, generate all possible permutations\n of the numbers with plusses in between. \"1\" returns \"1\".\n \"11\" returns [\"11\", \"1+1\"], etc \"\"\"\n\n permutations = list()\n temp = list()\n # Generate all possible permutations of numbers and plusses, record\n # the number of plus signs as cost.\n for i, num in enumerate(numbers):\n # Base case, append the number and cost of 0\n if i == 0:\n permutations.append((num, 0))\n else:\n # Iterate through permutations, appending new items to temp.\n # Strings can be permutated two ways: string + char,\n # string + '+' + char\n for item in permutations:\n temp.append((item[0] + num, item[1]))\n temp.append((item[0] + '+' + num, item[1] + 1))\n # Now we move temp to permutations and clear out temp.\n permutations = temp\n temp = list()\n return permutations\n\n def clean_eval(exp):\n \"\"\" (int) Evaluate expression, ensuring we account for weirdness with\n leading zeros, etc. \"\"\"\n\n # Split expression using '+' as our split token\n number_string = exp.split(\"+\")\n total = int()\n\n # Cost each number string to int, cleaning up leading zeros, then total\n for num in number_string:\n total += int(num)\n\n return total\n\n # Create a dictionary of each permutations' sum and cost. Cost is defined as\n # the total number of additions necessary to generate the sum.\n value_costs = dict()\n\n # Iterate through permutations and populate values and their\n # respective costs into value_costs. If we find two equal values\n # with differing costs, use the lower cost.\n for perm in generate_permutations(numbers):\n value = clean_eval(perm[0])\n cost = perm[1]\n # Default the cost to 20 as an upper limit, given our number\n # will never cost more than 9 given 10 characters max.\n if value_costs.get(value, 20) > cost:\n value_costs[value] = cost\n\n return value_costs.get(num_sum, -1)", "def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum", "def sum(num_1, num_2):\n return num_1 + num_2", "def suma(a, b):\n\n\ttotal = a + b\n\treturn total", "def sum(a, b):\n return a + b", "def sum(a, b):\n return a + b", "def sumDigits(s):\n sum = 0\n for c in s:\n try: \n sum += int(c)\n except ValueError:\n continue\n return sum", "def sumDigits(s):\n _sum = 0\n for char in s:\n try:\n _sum += int(char)\n except:\n pass\n return _sum", "def sum(self):\n total = 0\n for el in self.__list:\n if type(el) is int or type(el) is float:\n total += el\n elif not el:\n continue\n else:\n total += len(el)\n return total", "def convert_add_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Add', **kwargs)", "def sumation(cmds):\n numbers = []\n sum = 0\n try: \n for i in cmds[1:]:\n numbers.append(float(i))\n for l in numbers:\n sum = sum + l\n print(sum)\n except TypeError:\n print(\"Hmmm, I guess you haven't only entered valid numbers\")", "def test_arithmetic(self):\n for test in [\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Int(5), right = sir.Int(6)), SymbolType.Integer),\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Bytes('05'), right = sir.Bytes('06')), SymbolType.Integer),\n ]:\n self._test(test)", "def add_no_carry(*args):\r\n num_digits = []\r\n\r\n for arg in args:\r\n num_digits.append(len(str(arg)))\r\n\r\n max_digits = max(num_digits)\r\n # list comprehension way\r\n # max_digits = max([len(str(arg)) for arg in args])\r\n final_sum = 0\r\n\r\n for pwr in range(1, max_digits + 1): # iterate through ea decimal\r\n result_no_carry = 0\r\n for arg in args:\r\n if len(str(arg)) >= pwr:\r\n # modulus sets the current decimal as the most significant\r\n # decimal\r\n # floor div selects the most significant decimal\r\n result_no_carry += arg % 10**pwr // 10**(pwr - 1)\r\n\r\n # list comprehension way\r\n # result_no_carry = sum([arg % 10**pwr // 10**(pwr - 1) for arg in args if len(str(arg)) >= pwr])\r\n\r\n # final_sum = str(result_no_carry % 10) + final_sum\r\n final_sum += result_no_carry % 10\r\n\r\n return int(final_sum)", "def solve_equation_addition_precendence(eq, verbose=False):\n tokens = tokenize(eq)\n if verbose:\n print(f\"eq: {tokens}\")\n\n stack = []\n ops = {\n None: do_push,\n \"(\": do_push,\n \")\": do_parenthesis,\n \"+\": do_addition,\n \"*\": do_push,\n }\n\n for t in tokens:\n if isinstance(t, int):\n op = stack[-1] if len(stack) else None\n ops[op](stack, t)\n elif t == \"+\" or t == \"*\" or t == \"(\":\n stack.append(t)\n elif t == \")\":\n ops[\")\"](stack, t)\n # solve preparenthesis addition\n if len(stack) > 2:\n v = stack.pop()\n assert isinstance(v, int)\n ops[stack[-1]](stack, v)\n else:\n assert False, f\"fail token: {t}\"\n\n if verbose:\n print(f\"stack: {stack}\")\n\n # solve multiplications\n while len(stack) > 1:\n rhs = stack.pop()\n assert isinstance(rhs, int)\n op = stack.pop()\n if op == \"*\":\n lhs = stack.pop()\n assert isinstance(lhs, int)\n stack.append(lhs * rhs)\n else:\n assert False, f\"invalid operator (not *): {op}\"\n\n assert len(stack) == 1\n return stack[0]", "def add(self):\n return self._do_calc(self.adder)", "def add(self):\n return self._do_calc(self.adder)", "def add(self):\n return self._do_calc(self.adder)", "def add(n1, n2):\n return n1 + n2", "def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)", "def add_numbers(x, y):\n return x + y", "def power_sum(power, *args):\n total = 0\n for i in args:\n total += pow(i, power)\n return total", "def action_store_sum(raw_val):\n\n if isinstance(raw_val, list):\n val_sum = None\n for val in raw_val:\n val = auto_type_convert(val)\n if isinstance(val, (int, float)):\n if val_sum is None:\n val_sum = val\n else:\n val_sum += val\n return val_sum\n else:\n return None", "def sum(self):\n return sum(self.values)", "def get_sum(lst):\n _sum=0\n for i in lst:\n _sum+=i\n return _sum", "def add(n1, n2):\n return n1 + n2", "def add(num1, num2):\n sum = num1 + num2\n return sum", "def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s", "def test_add_all_args_less_zero(self):\n try:\n self.assertEqual(add(-7, -11), -18)\n except Exception as error:\n print(error)", "def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left" ]
[ "0.71442914", "0.70861304", "0.62418014", "0.62357926", "0.6229933", "0.62040615", "0.6063164", "0.60560673", "0.6047006", "0.5978517", "0.59740245", "0.59523994", "0.59317786", "0.59307003", "0.5925646", "0.5923283", "0.5915873", "0.5901256", "0.58456707", "0.5834098", "0.5808596", "0.5800791", "0.57977", "0.57880867", "0.5779724", "0.577967", "0.57756484", "0.5774415", "0.57724166", "0.57542914", "0.5744847", "0.5742317", "0.5740551", "0.57394785", "0.57264674", "0.5715645", "0.5708675", "0.5704922", "0.5696304", "0.5689313", "0.5679007", "0.5671617", "0.56714725", "0.56709945", "0.5639448", "0.56207865", "0.56097704", "0.5597549", "0.5593573", "0.559197", "0.5583423", "0.55531085", "0.55486685", "0.5529187", "0.5527828", "0.5520191", "0.55130684", "0.5510381", "0.5510381", "0.55051106", "0.5492824", "0.54923254", "0.5488334", "0.5481058", "0.5476057", "0.5475317", "0.5474659", "0.54681915", "0.5467516", "0.5464599", "0.5457795", "0.5446009", "0.54389036", "0.543345", "0.54238707", "0.5423373", "0.5423373", "0.5420116", "0.5404673", "0.53956115", "0.5393596", "0.53793293", "0.5373277", "0.5373265", "0.53682446", "0.53658706", "0.53658706", "0.53658706", "0.5363786", "0.5362728", "0.53612846", "0.53600127", "0.5354961", "0.5354469", "0.5354388", "0.5351524", "0.53503877", "0.534719", "0.53463715", "0.5341704" ]
0.65476674
2
Requests the last known production mix (in MW) of a given country
def fetch_production(zone_key='IN-GJ', session=None, target_datetime=None, logger=getLogger('IN-GJ')): session = session or requests.session() if target_datetime: raise NotImplementedError( 'This parser is not yet able to parse past dates') value_map = fetch_data(zone_key, session, logger=logger) data = { 'zoneKey': zone_key, 'datetime': value_map['date'].datetime, 'production': { 'biomass': None, 'coal': value_map.get('coal', 0), 'gas': value_map.get('gas', 0), 'hydro': value_map.get('hydro', 0), 'nuclear': value_map.get('nuclear', 0), 'oil': None, 'solar': value_map.get('solar', 0), 'wind': value_map.get('wind', 0), 'geothermal': None, 'unknown': value_map.get('unknown', 0) }, 'storage': { 'hydro': None }, 'source': 'sldcguj.com', } valid_data = validate(data, logger, remove_negative=True, floor=7000) return valid_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_inflation_country():\n print(\">> Downloading WORLD BANK inflation / country data...\")\n url = source_config.inflation_data_url['latest']\n output_file = source_config.inflation_data_files['raw']['latest']\n download_insee_excel(url, output_file, check=False)", "def get_country(self, country):\n if country == \"United Kingdom\": return \"en\"\n if country == \"Portugal\": return \"pt\"\n\n result = self.session.get(\"https://en.ogame.gameforge.com\")\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n code_list = soup.find(\"ul\", {\"id\": \"mmoList1\"})\n countries = {}\n for tag in code_list.find_all(\"li\"):\n link = tag.find(\"a\")[\"href\"]\n name = tag.string.strip() # name of the country\n code = link.split(\".\")[0].replace(\"//\", \"\")\n countries[name] = code # save to the dict\n\n # check if input was ok\n if not country in countries.keys():\n self.crash(\"Country\", country, \"was not found on the list.\")\n if len(countries[country]) != 2:\n self.crash(\"Can't fetch code for country\", country)\n\n return countries[country]", "def maxRetrieval():\n try:\n if request.method == 'GET':\n country = request.args.get('country') # If no key then null\n year = request.args.get('year') # If no key then null\n return spout(country, year, detail=1)\n except Exception as e:\n # Unfortunately I'm not going to wrap this in indv. strings.\n r = Response(response=error_msg+str(e),\n status=404,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r", "def fetch_production(\n zone_key: str = \"JP-KY\",\n session: Optional[Session] = None,\n target_datetime: Optional[datetime] = None,\n logger: Logger = getLogger(__name__),\n) -> Union[dict, list]:\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n data = {\n \"zoneKey\": zone_key,\n \"datetime\": None,\n \"production\": {\n \"biomass\": 0,\n \"coal\": 0,\n \"gas\": 0,\n \"hydro\": None,\n \"nuclear\": None,\n \"oil\": 0,\n \"solar\": None,\n \"wind\": None,\n \"geothermal\": None,\n \"unknown\": 0,\n },\n \"storage\": {},\n \"source\": \"www.kyuden.co.jp\",\n }\n # url for consumption and solar\n url = \"https://www.kyuden.co.jp/td_power_usages/pc.html\"\n r = get(url)\n r.encoding = \"utf-8\"\n html = r.text\n soup = BeautifulSoup(html, \"lxml\")\n # get hours, minutes\n ts = soup.find(\"p\", class_=\"puProgressNow__time\").get_text()\n hours = int(re.findall(r\"[\\d]+(?=時)\", ts)[0])\n minutes = int(re.findall(r\"(?<=時)[\\d]+(?=分)\", ts)[0])\n # get date\n ds = soup.find(\"div\", class_=\"puChangeGraph\")\n date = re.findall(r\"(?<=chart/chart)[\\d]+(?=.gif)\", str(ds))[0]\n # parse datetime\n dt = f\"{date[:4]}-{date[4:6]}-{date[6:]} {hours:02d}:{minutes:02d}\"\n dt = arrow.get(dt).replace(tzinfo=\"Asia/Tokyo\").datetime\n data[\"datetime\"] = dt\n # consumption\n cons = soup.find(\"p\", class_=\"puProgressNow__useAmount\").get_text()\n cons = re.findall(\n r\"(?<=使用量\\xa0)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?(?=万kW/)\",\n cons,\n )\n cons = cons[0].replace(\",\", \"\")\n # convert from 万kW to MW\n cons = float(cons) * 10\n # solar\n solar = soup.find(\"td\", class_=\"puProgressSun__num\").get_text()\n # convert from 万kW to MW\n solar = float(solar) * 10\n\n # add nuclear power plants\n # Sendai and Genkai\n url_s = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/sendai/rename.php?\",\n \"A=s_power.fdat&B=ncp_state.fdat&_=1520532401043\",\n ]\n )\n url_g = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/genkai/rename.php?\",\n \"A=g_power.fdat&B=ncp_state.fdat&_=1520532904073\",\n ]\n )\n sendai = get(url_s).text\n sendai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n sendai,\n )\n genkai = get(url_g).text\n genkai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n genkai,\n )\n nuclear = 0\n for sendai_i in sendai:\n nuclear += float(sendai_i)\n for genkai_i in genkai:\n nuclear += float(genkai_i)\n # convert from 万kW to MW\n nuclear = nuclear * 10\n\n # add the exchange JP-CG->JP-KY\n exch_list = occtonet.fetch_exchange(\"JP-KY\", \"JP-CG\")\n # find the nearest exchanges in time to consumption timestamp\n nearest_exchanges = sorted(exch_list, key=lambda exch: abs(exch[\"datetime\"] - dt))\n # take the nearest exchange\n exch = nearest_exchanges[0]\n # check that consumption and exchange timestamps are within a 15 minute window\n if abs(dt - exch[\"datetime\"]).seconds <= 900:\n\n generation = cons - exch[\"netFlow\"]\n data[\"production\"][\"solar\"] = solar\n data[\"production\"][\"nuclear\"] = nuclear\n data[\"production\"][\"unknown\"] = generation - nuclear - solar\n\n return data\n else:\n return []", "def getUS() -> int:\n pass", "def get_population(country_name):\n country_population=set_country_populations_dict()\n country_data=country_population[country_name]\n population_data=country_data[0]\n return population_data", "def fetch_production(zone_key='US-NY', session=None, target_datetime=None, logger=None):\n if target_datetime:\n # ensure we have an arrow object\n target_datetime = arrow.get(target_datetime)\n else:\n target_datetime = arrow.now('America/New_York')\n\n if (arrow.now() - target_datetime).days > 9:\n raise NotImplementedError('you can get data older than 9 days at the '\n 'url http://mis.nyiso.com/public/')\n\n ny_date = target_datetime.format('YYYYMMDD')\n mix_url = 'http://mis.nyiso.com/public/csv/rtfuelmix/{}rtfuelmix.csv'.format(ny_date)\n try:\n raw_data = read_csv_data(mix_url)\n except HTTPError:\n # this can happen when target_datetime has no data available\n return None\n\n clean_data = data_parser(raw_data)\n\n production_mix = []\n for datapoint in clean_data:\n data = {\n 'zoneKey': zone_key,\n 'datetime': timestamp_converter(datapoint[0]),\n 'production': datapoint[1],\n 'storage': {},\n 'source': 'nyiso.com'\n }\n\n production_mix.append(data)\n\n return production_mix", "def __get_high_res_url(country) -> str:\n wiki_stem = \"https://en.wikipedia.org\"\n country_page = requests.get(f\"{wiki_stem}/wiki/{country}\")\n country_doc = HTML(country_page.content)\n [v_card] = country_doc.xpath('.//table[@class=\"infobox geography vcard\"]')\n [flag_elem] = v_card.xpath('.//a[@class=\"image\" and contains(@title, \"Flag\")]')\n flag_page_url = f\"{wiki_stem}{flag_elem.attrib['href']}\"\n flag_page = requests.get(flag_page_url)\n doc = HTML(flag_page.content)\n [flag_url_elem] = doc.xpath('.//div[@id=\"file\"]/a/img')\n return f\"https:{flag_url_elem.attrib['src']}\"", "def get_countries():\n call = build_call('attr', 'country')\n return request_data(call)", "def test_last_max_default(client):\n client.get(\"/weather/viena\")\n client.get(\"/weather/sorocaba\")\n client.get(\"/weather/barcelona\")\n client.get(\"/weather/belo horizonte\")\n client.get(\"/weather/rio de janeiro\")\n client.get(\"/weather/recife\")\n\n\n # Validate weither return two lasts cities in cache or not\n response = client.get(\"/weather/6\")\n print(response.data)\n assert (b\"Barcelona\" in response.data) & (b\"Recife\" in response.data) & (b\"Rio de Janeiro\" in response.data) & (b\"Belo Horizonte\" in response.data) & (b\"Sorocaba\" in response.data)", "def test_top_country(self):\n tabular_format_countries_list = [['Canada', 66, '20'], ['United States', 33, '10']]\n\n result = InstallationStatistics.get_statistics_top_country(tabular_format_countries_list)\n\n self.assertEqual('Canada', result)", "def get_country(user, session, flag_pattern):\r\n page = \"https://www.fanfiction.net/u/\" + str(user)\r\n country = \"\"\r\n with closing(session.get(page, timeout=10.0, stream=True)) as r:\r\n lines = 0;\r\n for rline in r.iter_lines(chunk_size=10):\r\n lines += 1\r\n rstr = repr(rline)\r\n if rstr.find('Joined <sp') > 0:\r\n match = re.search(flag_pattern, rstr)\r\n if match:\r\n country = match.group(1)\r\n break\r\n if lines > 600:\r\n break\r\n return country", "def country() -> str:", "def get_last_dynamic_bias(self, x: np.ndarray,\n country_df: pd.DataFrame) -> np.ndarray:", "def fetch_weather(y):\r\n # request parameter(s): Start with '?'\r\n # separate name and value with '='\r\n # multiple parameter name value pairs are separate with '&'\r\n query_string = \"?id={}&units=imperial&APIKEY={}\".format(y, API_KEY)\r\n request_url = WS_URL + query_string\r\n print(\"Request URL: \", request_url)\r\n response = requests.get(request_url)\r\n if response.status_code == 200:\r\n city_name = response.json()[\"city\"][\"name\"]\r\n lst = response.json()[\"list\"]\r\n tmp_list = []\r\n for i in range(len(lst) // 8):\r\n li = [x for x in range(len(lst)) if x // 8 == i]\r\n tmp_list.append(max([lst[j][\"main\"][\"temp_max\"] for j in li]))\r\n return City(city_name, tmp_list)\r\n else:\r\n print(\"How should I know?\")\r\n return None", "def get(world, country, json):\n if world:\n dt = data.get_global_data()\n pretty_print(dt, json)\n if country:\n dt = data.get_country_data(country)\n pretty_print(dt, json)", "def test_last_cities(client):\n client.get(\"/weather/viena\")\n client.get(\"/weather/sorocaba\")\n client.get(\"/weather/barcelona\")\n\n\n # Validate weither return two lasts cities in cache or not\n response = client.get(\"/weather/1\")\n print(response.data)\n assert b\"Barcelona\" in response.data", "def find_store(request):\n r = {'result':'-1'}\n \n import httplib, urllib\n\n h = httplib.HTTPConnection(\"api.remix.bestbuy.com\")\n lat = request.POST['lat']\n lon = request.POST['lon']\n distance = request.POST['distance']\n\n h.request('GET', '/v1/stores(area(%s,%s,%s))?format=json&apiKey=%s'%(lat, lon, distance, api_key))\n\n result = h.getresponse()\n logger.info( \"BestBuy Location HTTP output: %s, reason: %s\"%(result.status, result.reason) )\n response = json.loads(result.read())\n\n stores = response.get(\"stores\", [])\n if len(stores) > 0: \n r['result'] = stores[0]\n\n return JSONHttpResponse(r)", "def get_max_fp(state_abbr, fuel_type=\"NG\", year=False):\n \n if(not year):\n\n year = UpdateParams.today.year\n\n if fuel_type.upper() == \"NG\":\n\n series_ID = \"NG.N3035\" + state_abbr + \"3.A\"\n\n elif fuel_type.upper() == \"COAL\":\n\n series_ID = \"COAL.COST.\" + state_abbr + \"-10.A\"\n\n elif fuel_type.upper() == \"PETRO\":\n\n series_ID = \"PET.EMA_EPPR_PWA_S\" + state_abbr + \"_DPG.A\"\n\n else:\n raise AssertionError(\"Please input a valid fuel_type\")\n \n # Check if state-level available, if not return USA price\n try:\n fuel_series = UpdateParams.api.data_by_series(series=series_ID)\n\n dict_key = list(fuel_series.keys())[0]\n\n # if fuel price in state is empty return national price\n if all(v is None for v in list(fuel_series[dict_key].values())):\n \n return 0.0\n \n except KeyError:\n \n return 0.0\n \n j = 0\n \n while True:\n\n try:\n return fuel_series[dict_key][str(year-j) + \" \"] / 1.0\n\n break\n\n except:\n\n j += 1", "def capacity_per_power_plant(data, power_plant_type, country='World'):\n aux_data = deepcopy(data)\n if country == 'World':\n capacity = aux_data[power_plant_type == aux_data['primary_fuel']]['capacity_mw']\n else:\n capacity = aux_data[(power_plant_type == aux_data['primary_fuel']) & (country == aux_data['country_long'])][\n 'capacity_mw']\n del aux_data\n return capacity", "def country(name):\n return location_db().find(name=name)[\"country\"]", "def test_get_last_cities_nocache(client):\n response = client.get(\"/weather/1\")\n assert response.status_code == 200", "def execute_req2(catalog, req_country):\n return controller.execute_req2(catalog, req_country)", "def test_response_body_element(BASE_URL, COUNTRY_CODE):\n # make request\n result = requests.get(f'{BASE_URL}{COUNTRY_CODE}')\n assert result.json()['country'] == 'United States'", "def get_cheapest_flight():\n\n user_agent = [\n\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'\n 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1',\n 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:24.0) Gecko/20100101 Firefox/24.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0',\n 'Opera/9.80 (Linux armv7l) Presto/2.12.407 Version/12.51 , D50u-D1-UHD/V1.5.16-UHD (Vizio, D50u-D1, Wireless)',\n\n ]\n\n proxy_list = ssl_proxies()\n # print(len(proxy_list))\n random_agent = random.randint(0, len(user_agent)-1)\n random_proxy = random.randint(0, len(proxy_list)-1)\n\n headers = { 'User-agent' : user_agent[random_agent],\n 'Connection' : 'close',\n 'Upgrade-Insecure-Requests': '1',\n 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Sec-Fetch-Site' : 'same-origin',\n 'Sec-Fetch-Mode' : 'navigate',\n 'Sec-Fetch-User' : '?1',\n 'Sec-Fetch-Dest' : 'document',\n 'Referer' : 'https://www.cheapflights.co.za/',\n 'Accept-Encoding': 'gzip, deflate' ,\n 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',\n }\n\n if check_proxy_status:\n proxies = proxy_list[random_proxy]\n print(\"Using http proxy.\")\n else:\n print(\"Proxy Down, using original ip.\")\n proxies = {}\n\n\n now = datetime.datetime.now()\n\n request_session = requests.Session()\n flight_request_uri = f\"https://www.cheapflights.co.za/flight-search/{arguments.cfrom}-{arguments.to}/{now.year}-{arguments.month}-{arguments.day}?sort=price_a\"\n\n # print(colored(flight_request_uri, 'white')) the url.\n # print(len(request.text))\n request = request_session.get(flight_request_uri, headers=headers, proxies=proxies)\n\n length = len(request.text)\n if request.text.find(\"\"\"If you are seeing this page, it means that Cheapflights thinks you are a \"bot,\" and the \n page you were trying to get to is only useful for humans.\"\"\") > -1 :\n print(colored('Ithi Uyi\\'Robot Leshandis', 'red', attrs=['bold', 'blink']))\n \n cheapest = re.search(\"\"\"Cheapest\\n</\\w+>\\n</\\w+>\\n</\\w+>\\n</\\w+>\\n<\\w+\\s\\w+=\"\\w+\\s\\w+\">\\n<\\w+\\s\\w+='\\w+-\\w+\\s\\w+-\\\n w+\\s\\w+\\s\\w+\\s\\w+\\s\\w+\\s\\w+'\\n>\\nR\\d\\s\\d{3}\\n|R\\d{3}\\n\"\"\", request.text)\n\n try:\n get_flights(request.text)\n return(cheapest.group(0).rstrip())\n except AttributeError:\n return (colored(\"Something went wrong, Try again.\", 'red'))", "def test_single_word_swanage(self):\n result = location.lookup_location('Swanage GB')\n\n self.assertEqual(result['country'], 'GB')", "def query_api(location):\n #bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n bearer_token ='SHdrjUqMJXqXBKUc7bGIplM8y6tnbwZbXXDbWPCd9wWMP8tX9PdJrC5MZHwJRhb7jMtLjXxT-hsWjNf2OkdiDWd30HsS84AVI5iRnrpxkak3HbWXAdUKvraQ_wgXWXYx'\n response = transaction_search(bearer_token,location)\n response = response.get('businesses')\n return response", "def per_capi(country):\r\n df = ouvrir_fichier()\r\n df = df.loc[df['country'].isin([country])]\r\n df = df[(df[\r\n 'emissions'] == 'Emissions per capita (metric tons of carbon dioxide)'\r\n )]\r\n resultat = {}\r\n longeur = len(df)\r\n for i in range(longeur):\r\n resultat[int(df.iloc[i][2])] = float(df.iloc[i][4])\r\n\r\n return resultat", "def country(request):\n class Results(object):\n\n def __init__(self, cc):\n self.cc = cc\n self.registered = 0\n self.dns = 0\n self.dnf = 0\n \n def add_rider(self, rider):\n self.registered += 1\n\n if rider.dns:\n self.dns += 1\n\n if rider.dnf:\n self.dnf += 1\n\n def finish_rate(self):\n \n rate = 100*(self.registered-self.dns-self.dnf)/(self.registered-self.dns)\n return rate\n\n results = {}\n for rider in models.Rider.objects.all():\n cc = rider.country.code\n results[cc] = results.get(cc, Results(cc))\n results[cc].add_rider(rider)\n\n results = results.values()\n sort = request.GET.get('sort', 'country')\n\n if sort == \"country\":\n results.sort(key=lambda x: x.cc)\n elif sort == \"registered\":\n results.sort(key=lambda x: x.registered, reverse=True)\n elif sort == \"rate\":\n results.sort(key=lambda x: x.registered, reverse=True)\n results.sort(key=lambda x: x.finish_rate(), reverse=True)\n\n total_registered = sum([r.registered for r in results])\n total_dns = sum([r.dns for r in results])\n total_dnf = sum([r.dnf for r in results])\n overall_finish_rate = 100 * (total_registered-total_dns-total_dnf)/(total_registered-total_dns)\n\n template = env.get_template(\"country.html\")\n rendered = template.render(dict(results=results,\n country_names=countries.OFFICIAL_COUNTRIES,\n registered=total_registered,\n total_dns=total_dns,\n total_dnf=total_dnf,\n overall_finish_rate=overall_finish_rate,\n ))\n\n return HttpResponse(rendered)", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR':\n return 'hk'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Congo, Rep.':\n return 'cf'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n # If the country wasn't found, return None.\n return None", "def get_budget_from_api(type_of_thing: int, qty: int):\n payload = {\"multiplier\": qty}\n if type_of_thing == 1:\n payload[\"commodity\"] = \"tomatoes\"\n elif type_of_thing == 2:\n payload[\"commodity\"] = \"broiler-chickens\"\n else:\n return None\n\n cache_key = f\"{type_of_thing}-{qty}\"\n\n val_from_cache = cache.get(cache_key)\n if val_from_cache:\n return val_from_cache\n\n json_payload = json.dumps(payload)\n\n r = requests.post(BUDGET_API_URL, json_payload)\n\n if r.status_code == 200:\n result = r.json()['data']\n cache.set(\n cache_key,\n result,\n BUDGET_CACHE_DURATION\n )\n return result\n\n return None", "def fetch_production(zone_key = 'KR', session=None, target_datetime=None, logger=getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n s=session or requests.Session()\n\n hydro, hydro_dt = fetch_hydro(s, logger)\n nuclear, nuclear_dt = fetch_nuclear(s)\n load, load_dt = fetch_load(s)\n\n generation_dts = [hydro_dt, nuclear_dt, load_dt]\n\n dt_aware = timestamp_processor(generation_dts, with_tz=True, check_delta=True)\n\n unknown = load - nuclear - hydro\n\n production = {\n 'production': {\n 'nuclear': nuclear,\n 'hydro': hydro,\n 'unknown': unknown\n },\n 'source': 'khnp.co.kr, kpx.or.kr',\n 'zoneKey': zone_key,\n 'datetime': dt_aware.datetime,\n 'storage': {\n 'hydro': None,\n 'battery': None\n }\n }\n\n return production", "def test_triple_word_weston_super_mare(self):\n result = location.lookup_location('Weston Super Mare GB')\n\n self.assertEqual(result['country'], 'GB')", "def test_get_last_cities(client):\n\n response = client.get(\"/weather/1\")\n print(response.data)\n assert response.status_code == 200", "def get_fuel_price(state_abbr, fuel_type=\"NG\", year=False):\n\n if(not year):\n\n year = UpdateParams.today.year\n\n if fuel_type.upper() == \"NG\":\n\n series_ID = \"NG.N3035\" + state_abbr + \"3.A\"\n \n series_USA = \"NG.RNGWHHD.A\"\n \n series_LA = UpdateParams.api.data_by_series(series=\"NG.N3035\" + \"LA\" + \"3.A\")\n \n dict_key_LA = list(series_LA.keys())[0]\n\n elif fuel_type.upper() == \"COAL\":\n\n series_ID = \"COAL.COST.\" + state_abbr + \"-10.A\"\n\n series_USA = \"COAL.COST.US-10.A\"\n\n elif fuel_type.upper() == \"PETRO\":\n # state level wholesale/resale price data ends 2011\n series_ID = \"PET.EMA_EPPR_PWA_S\" + state_abbr + \"_DPG.A\"\n\n series_USA = \"PET.EMA_EPPR_PWG_NUS_DPG.A\"\n\n else:\n raise AssertionError(\"Please input a valid fuel_type\")\n\n fuel_series_USA = UpdateParams.api.data_by_series(series=series_USA)\n \n dict_key_USA = list(fuel_series_USA.keys())[0]\n \n # find latest USA value\n i = 0\n\n while True:\n \n try:\n fp_USA = fuel_series_USA[dict_key_USA][str(year-i) + \" \"] / 1.0\n\n break\n\n except:\n \n i += 1\n\n # Check if state-level available, if not return USA price\n try:\n fuel_series = UpdateParams.api.data_by_series(series=series_ID)\n\n dict_key = list(fuel_series.keys())[0]\n\n # if fuel price in state is empty return national price\n if all(v is None for v in list(fuel_series[dict_key].values())):\n \n return (fp_USA, year-i)\n \n except KeyError:\n \n return (fp_USA, year-i)\n\n j = 0\n\n # find latest year for state\n while True:\n\n try:\n fp_state = fuel_series[dict_key][str(year-j) + \" \"] / 1.0\n\n break\n\n except:\n\n j += 1\n \n if fuel_type.upper() == \"NG\":\n # series_LA is just the actual series not a series ID\n fp_mult = fp_state / series_LA[dict_key_LA][str(year-j) + \" \"]\n return (fp_mult * fp_USA/1.037, year-j)\n \n # return USA value if 2 years more recent vs state\n if ((year-i) - (year-j) >= 2) | (fp_state >= fp_USA):\n \n return (fp_USA/1.037, year-i)\n\n return (fp_state, year-j)", "def getFishPopulation(genus):\n r = req.get(\"https://fishbase.ropensci.org/popchar?fields=\"+genus)\n my_dict = r.json()\n return my_dict['count']", "def test_get_country_by_geo_location(self):\n pass", "def test_double_word_coombe_martin(self):\n result = location.lookup_location('Combe Martin GB')\n\n self.assertEqual(result['country'], 'GB')", "async def c19_command(self, ctx, *, country: Optional[str]):\n with ctx.channel.typing():\n country = country or \"nepal\"\n logoUrl = \"http://covidcp.org/images/logo-icononly.png\"\n url = f\"https://coronavirus-19-api.herokuapp.com/countries/{country}\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n data = await resp.json()\n cntry = data[\"country\"]\n cases = data[\"cases\"]\n todayCases = data[\"todayCases\"]\n deaths = data[\"deaths\"]\n recovered = data[\"recovered\"]\n active = data[\"active\"]\n output = f\"Total Cases - **{cases}** \\n Cases Today - **{todayCases}** \\nTotal Deaths - **{deaths}** \\nActive Cases - **{active}** \\nTotal Recovered - **{recovered}**\"\n embed = Embed(\n color=Color.blurple(), timestamp=datetime.utcnow(), description=output\n )\n embed.set_author(name=f\"COVID-19 Stats for {cntry}\")\n embed.set_thumbnail(url=logoUrl)\n await ctx.send(embed=embed)", "def get_best_urn(word, metadata=None):\n metadata['word'] = word\n if not ('next' in metadata or 'neste' in metadata):\n metadata['next'] = 600\n if not 'year' in metadata:\n metadata['year'] = 1500\n r = requests.get('https://api.nb.no/ngram/best_urn', json=metadata)\n return r.json()", "def get_county() -> Dict:\n model = get_data_model()\n\n chart_ids = {\n \"cases\": \"Eq6Es\",\n \"deaths\": \"bSxdG\",\n \"age\": \"zSHDs\",\n \"gender\": \"FEciW\",\n \"race_eth\": \"aBeEd\",\n \"tests\": \"7sHQq\",\n }\n # The time series data for negative tests is gone, so I've just scraped positive test data using the new chart referenced above.\n\n with MarinDashboardPage() as page:\n model['name'] = \"Marin County\"\n model['update_time'] = datetime.now(tz=timezone.utc).isoformat()\n model[\"meta_from_baypd\"] = \"\"\n model['source_url'] = page.url\n model['meta_from_source'] = get_chart_meta(page, chart_ids.values())\n\n model[\"series\"][\"cases\"] = get_series_data(page, chart_ids[\"cases\"], ['Date', 'Total Cases', 'Total Recovered*'], \"cumul_cases\", 'Total Cases', 'cases')\n model[\"series\"][\"deaths\"] = get_series_data(page, chart_ids[\"deaths\"], ['Event Date', 'Total Hospitalizations', 'Total Deaths'], \"cumul_deaths\", 'Total Deaths', 'deaths', date_column='Event Date')\n\n model[\"series\"][\"tests\"] = get_test_series(page, chart_ids[\"tests\"])\n model[\"case_totals\"][\"age_group\"], model[\"death_totals\"][\"age_group\"] = get_breakdown_age(page, chart_ids[\"age\"])\n model[\"case_totals\"][\"gender\"], model[\"death_totals\"][\"gender\"] = get_breakdown_gender(page, chart_ids[\"gender\"])\n model[\"case_totals\"][\"race_eth\"], model[\"death_totals\"][\"race_eth\"] = get_breakdown_race_eth(page, chart_ids[\"race_eth\"])\n\n return model", "def get_country_by_region(region) -> tuple:\n try:\n url = URL_COUNTRY_API + f'/{region}'\n response, error = request_get(url)\n if not error:\n if (total := len(response)) > 0:\n # Random country\n country = response[randint(0, total - 1)]\n return {'country': country}, False\n else:\n return {'msg': 'Region without countries',\n 'error': 'Region without countries'}, True\n else:\n return response, error\n pass\n except Exception as ex:\n return {'msg': 'General Error in get_country_by_region',\n 'error': f'{ex}'}, True", "def pull_latest_rcp(out_file : str = \"~/Data/gt_vis_project/clean_data/polls.csv\"):\n# \n\n rcp_jsons = {\"North Carolina\" : \"https://www.realclearpolitics.com/epolls/json/6744_historical.js?1602635842295&callback=return_json\",\n \"Wisconsin\" : \"https://www.realclearpolitics.com/epolls/json/6849_historical.js?1602638476165&callback=return_json\",\n \"Florida\" : \"https://www.realclearpolitics.com/epolls/json/6841_historical.js?1602638538981&callback=return_json\",\n \"Michigan\": \"https://www.realclearpolitics.com/epolls/json/6761_historical.js?1602638561657&callback=return_json\",\n \"Pennsylvania\" : \"https://www.realclearpolitics.com/epolls/json/6861_historical.js?1602638584280&callback=return_json\",\n \"Arizona\" : \"https://www.realclearpolitics.com/epolls/json/6807_historical.js?1602638600473&callback=return_json\",\n \"Ohio\": \"https://www.realclearpolitics.com/epolls/json/6765_historical.js?1602638729359&callback=return_json\",\n \"Minnesota\" : \"https://www.realclearpolitics.com/epolls/json/6966_historical.js?1602638770015&callback=return_json\",\n \"Iowa\": \"https://www.realclearpolitics.com/epolls/json/6787_historical.js?1602638787908&callback=return_json\",\n \"Texas\" : \"https://www.realclearpolitics.com/epolls/json/6818_historical.js?1602638819149&callback=return_json\",\n \"Georgia\" : \"https://www.realclearpolitics.com/epolls/json/6974_historical.js?1602638840551&callback=return_json\",\n # Nothing from Virgina or Nevada or Colorado, or new mexico.\n \"New Hampshire\" : \"https://www.realclearpolitics.com/epolls/json/6779_historical.js?1602638879306&callback=return_json\",\n \"Maine\" : \"https://www.realclearpolitics.com/epolls/json/6922_historical.js?1602638900859&callback=return_json\",\n \"National\" :\"https://www.realclearpolitics.com/epolls/json/6247_historical.js?1602638622255&callback=return_json\"\n }\n final_df = pd.DataFrame()\n for state, url in rcp_jsons.items():\n print(state)\n # Pull in raw json\n rv = requests.get(url)\n rv = rv.content.decode('utf-8') # byte to string\n # 12:2 is to remove the return_json() characters.\n print(\"converting to dict\")\n raw_poll_json = json.loads(rv[12:-2]) # string to dict.\n \n # Convert to df\n print(\"converting to df\")\n tmp_df = poll_to_df(raw_poll_json)\n tmp_df['state'] = state\n final_df = final_df.append(tmp_df)\n final_df[\"state\"] = [us_state_abbrev[state] if state in us_state_abbrev.keys() else state for state in final_df[\"state\"]]\n final_df[\"date\"] = pd.to_datetime(final_df[\"date\"], utc = True).dt.date\n final_df.to_csv(out_file, index = False)\n\n return final_df", "def countries(self, unit=None, units=None, timezone=None,\r\n rollup=None, limit=None, unit_reference_ts=None):\r\n params = base.get_params(None, locals())\r\n return self._get('countries', params)", "def get_country_code(self):\n\n try:\n sub_div = next(sub_div for sub_div in pycountry.subdivisions if sub_div.name == self.location)\n country = next(country for country in pycountry.countries if country.alpha_2 == sub_div.country_code)\n return country.alpha_3\n except StopIteration as exc:\n print(\"Cannot find subdivision in\" + str(exc))\n return 'XXX'", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()", "def get_state_pop(api_key, year = str(), state_fip = str(), map = bool()):\n try:\n pop_url = f'http://api.census.gov/data/{year}/pep/population?get=POP&for=COUNTY&in=state:*&key={api_key}'\n r = requests.get(pop_url)\n data = json.loads(r.content) \n pop_df = pd.DataFrame(data[1:], columns=data[0]).\\\n rename(columns={\"POP\": \"Pop_Count\", \"state\": \"STATEFP\", \"county\": \"COUNTYFP\"})\n pop_df['Pop_Count'] = pop_df['Pop_Count'].astype(str).astype(int)\n pop_df = pop_df[pop_df.STATEFP == state_fip]\n geodata_url = f\"https://raw.githubusercontent.com/uscensusbureau/citysdk/master/v2/GeoJSON/20m/{year}/county.json\"\n geo_df = gpd.read_file(geodata_url)\n geo_df = geo_df[geo_df.STATEFP == state_fip]\n geo_df = geo_df.merge(pop_df, on = 'COUNTYFP')\n geo_df.drop(geo_df.filter(regex='_y$').columns.tolist(),axis=1, inplace=True)\n geo_df = geo_df.rename(columns = {'STATEFP_x':'STATE_FIP'})\n if map == True:\n return geo_df.plot(column = 'Pop_Count')\n else:\n return geo_df\n r.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'An error occured. All parameters must exist in the Census GeoJSON database and API. Please check https://github.com/uscensusbureau/citysdk/tree/master/v2/GeoJSON: {err}')", "def get_image(self, country: str) -> PngImageFile:\n url = self.__url_dict[country]\n if self.__high_res:\n url = self.__get_high_res_url(country)\n\n file_path = f\"flag_cache/{self.__res_str}/{country}.png\"\n try:\n return Image.open(file_path)\n except IOError:\n print(f\"> Getting Flag of {country}: {url}\")\n return self.get_image_from_url(url, file_path)", "def do_fetch(postal):\n from x84.bbs import echo, getch, getterminal\n import StringIO\n term = getterminal()\n disp_msg('fEtChiNG')\n resp = requests.get(u'http://apple.accuweather.com'\n + u'/adcbin/apple/Apple_Weather_Data.asp',\n params=(('zipcode', postal),))\n if resp is None:\n disp_notfound()\n return None\n if resp.status_code != 200:\n # todo: logger.error\n echo(u'\\r\\n')\n echo(term.bold_red(u'StAtUS COdE: %s' % (resp.status_code,)))\n echo(u'\\r\\n\\r\\n')\n echo(repr(resp.content))\n echo(u'\\r\\n\\r\\n' + 'PRESS ANY kEY')\n getch()\n return None\n xml_stream = StringIO.StringIO(resp.content)\n tree = ET.parse(xml_stream)\n return tree.getroot()", "def fetch_wdi() -> None:\n\n log.info(\"Started fetching WDI.\")\n url = \"http://databank.worldbank.org/data/download/WDI_csv.zip\"\n common.fetch_source_simply(name=\"wdi\", url=url)\n log.info(\"Finished fetchign WDI.\")", "def get_country_data():\n\n parser = argparse.ArgumentParser(\n description='Retrieve aggregated stats by aggregation type, metric, and region.',\n )\n parser.add_argument(\n '--aggregation',\n required=True,\n choices=[\n 'avg',\n 'count',\n 'max',\n 'min',\n 'sum',\n ],\n help='Aggregation type',\n )\n parser.add_argument(\n '--field',\n required=True,\n choices=[\n 'area',\n 'borders',\n 'countries',\n 'currencies',\n 'gini',\n 'languages',\n 'latlng',\n 'population',\n ],\n help='Metric to aggregate',\n )\n parser.add_argument(\n '--by',\n required=True,\n choices=[\n 'region',\n 'subregion',\n ],\n help='Field to group aggregates by',\n )\n\n args = parser.parse_args()\n params = {\n 'aggregation': args.aggregation,\n 'field': args.field,\n 'by': args.by,\n }\n return process_aggregation_request(params)", "def vi_fetch_voting_info_from_scb(self):\r\n\r\n # map counties and county codes using 'GET' request\r\n response = requests.get(self.url)\r\n if response.status_code == requests.codes.ok:\r\n try:\r\n data = json.loads(response.text)\r\n for code, county in zip(data['variables'][0]['values'], \\\r\n data['variables'][0]['valueTexts']):\r\n self.counties_codes[code] = county\r\n # self.vi_log_msg(self.counties_codes)\r\n except Exception as e:\r\n self.vi_log_msg('CRITICAL_ERROR : ', str(e), '. Exiting!!')\r\n sys.exit()\r\n else:\r\n self.vi_log_msg('\"GET\" request failed. Received error code:', \\\r\n response.status_code)\r\n\r\n # 'POST' required query (json query) to SCB url & get relevant info\r\n # json_query must state response format as json\r\n response = requests.post(self.url, json=self.json_query)\r\n if response.status_code == requests.codes.ok:\r\n try:\r\n json_response_obj = json.loads(codecs.encode(response.text, \\\r\n 'utf-8'))\r\n\r\n # json_response_obj['data'] is in below format\r\n # {\"key\":[county_code, voting_year],\"values\":\\\r\n # [voting_percentage]}\r\n # Eg: {\"key\":[\"01L\",\"1973\"],\"values\":[\"90.0\"]}\r\n for voting_data in json_response_obj['data']:\r\n county_code = voting_data['key'][0]\r\n voting_year = int(voting_data['key'][1])\r\n voting_percentage = voting_data['values'][0]\r\n # voting_percentage not available\r\n if voting_percentage == '..':\r\n voting_percentage = 0.0\r\n else:\r\n voting_percentage = float(voting_percentage)\r\n # get county name from county code\r\n county_name = self.counties_codes[county_code]\r\n \r\n # map voting information with voting year & county, i.e,\r\n # voting_information[voting_year][county] = \\\r\n # voting_percentage\r\n # Eg: voting_information[1973]['Stockholm county council'] \\\r\n # = 90.0\r\n if voting_year not in self.voting_information.keys():\r\n self.voting_information[voting_year] = {}\r\n self.voting_information[voting_year][county_name] = \\\r\n voting_percentage\r\n except Exception as e:\r\n self.vi_log_msg('CRITICAL_ERROR : ', str(e), '. Exiting!!')\r\n sys.exit()\r\n else:\r\n self.vi_log_msg('\"POST\" request failed. Received error code:', \\\r\n response.status_code)", "def get_country_status_timebased(country, start_time, end_time):\n api = \"country/{}?from={}T00:00:00Z&to={}T00:00:00Z\".format(\n country, start_time, end_time)\n return _get_data(api)", "def spout(c=None, y=None, detail=0):\n if c is None and y is None:\n r = Response(response='What? No country and year specified.',\n status=404,\n mimetype=\"application/json\")\n r.headers[\"Content-Type\"] = \"text/json; charset=utf-8\"\n return r\n\n if detail == 0:\n # Returns a list of all the years for a specified country.\n if y is None:\n jStr = df[df['Country Name'] == c][\n ['Year', 'Forest Land Percent']].to_json(orient='table',\n index=False)\n j1 = json.loads(jStr)['data']\n return(jsonify(j1))\n # Or if year is specified: give just that year's forest land %.\n elif y is not None:\n f = float(df[(df['Country Name'] == c) & (df['Year'] == int(y))]['Forest Land Percent']) # noqa\n # Returns Flask.Response object\n return jsonify({'Forest Coverage Percent': f})\n else:\n return 1\n\n elif detail == 1:\n # Returns a list of all the details for a specified country\n # for all the years.\n if y is None:\n jStr = df[df['Country Name'] == c][\n ['Year',\n 'Forest Land Percent',\n 'Agriculture Land Percentage',\n 'Population',\n 'GDP Per Capita (2019 USD)']].to_json(orient='table',\n index=False)\n j1 = json.loads(jStr)['data']\n return(jsonify(j1))\n\n # Returns a list of all the details for specified country\n # and specific year.\n elif y is not None:\n filtered = df[(df['Country Name'] == c) & (df['Year'] == int(y))]\n cn = filtered['Country Name'].to_string(index=False).strip()\n ct = filtered['Country Code'].to_string(index=False).strip()\n yr = int(filtered['Year'])\n ff = float(filtered['Forest Land Percent'])\n ap = float(filtered['Agriculture Land Percentage'])\n pp = float(filtered['Population'])\n my = float(filtered['GDP Per Capita (2019 USD)'])\n # Unfortunately must be in separate JSON thingies because\n # if not it'll be unpredictably unordered it seems like.\n return jsonify(\n {'Country Name': cn},\n {'Country Code': ct},\n {'Year': yr},\n {'Forest Land Percentage': ff},\n {'Agri Land Percentage': ap},\n {'Population': pp},\n {'GDP per Capita (2019USD)': my}\n )\n else:\n return 1", "def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry", "def info_widget(loc_classes, switch, weather):\r\n\r\n try:\r\n if loc_classes[\"country_iso\"]:\r\n info = {}\r\n iso = loc_classes[\"country_iso\"]\r\n\r\n \"\"\"FX-rate function\"\"\"\r\n info = fx_rate(iso)\r\n\r\n \"\"\"Language differing titles/phrases\"\"\"\r\n #German\r\n if switch == \"German\" or loc_classes['language'] == 'german':\r\n info[\"country\"] = loc_classes[\"country_de\"].title()\r\n info[\"title_euro\"] = \"Wechselkurse Euroländer\"\r\n info[\"title\"] = \"Wechselkurse\"\r\n #English:\r\n else:\r\n info[\"country\"] = loc_classes[\"country_en\"].title()\r\n info[\"title_euro\"] = \"FX box Euro countries\"\r\n info[\"title\"] = \"FX box\"\r\n\r\n\r\n \"\"\"GDP and population\"\"\"\r\n #World Band database needs iso3 country code\r\n iso_3 = db.execute(\"SELECT iso316_1_alpha_3 FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"iso316_1_alpha_3\"]\r\n #Country population in millions\r\n pop = db.execute(\"SELECT * FROM world_bank WHERE (CountryCode=:iso \\\r\n AND (SeriesCode='SP.POP.TOTL'))\",\r\n iso=iso_3)[0][\"2019\"]\r\n pop = round(int(pop) / (1000 * 1000), 1)\r\n info[\"pop\"] = pop\r\n #GDP per capita\r\n gdp = db.execute(\"SELECT * FROM world_bank WHERE (CountryCode=:iso \\\r\n AND (SeriesCode='NY.GDP.PCAP.CD'))\",\r\n iso=iso_3)[0][\"2019\"]\r\n #Convert from USD to EUR\r\n gdp_raw = 0.0\r\n gdp_cur = 0\r\n #Try/except loop, if fx-rate not available at API\r\n try:\r\n gdp_raw = round(float(gdp) / info[\"eur_usd\"])\r\n gdp_cur = \"Euro\"\r\n\r\n except:\r\n gdp_raw = round(float(gdp))\r\n gdp_cur = \"USD\"\r\n\r\n #1000 , splitter for readability\r\n locale.setlocale(locale.LC_ALL, '') # Use '' for auto, or force e.g. to 'en_US.UTF-8'\r\n gdp = f'{gdp_raw:n}'\r\n info[\"gdp\"] = gdp\r\n info[\"gdp_cur\"] = gdp_cur\r\n\r\n \"\"\"Capital, Internet domain, Country phone code\"\"\"\r\n #Capital\r\n capital = db.execute(\"SELECT capital FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"capital\"]\r\n info[\"capital\"] = capital\r\n #Internet domain\r\n internet = db.execute(\"SELECT tld FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"tld\"]\r\n info[\"internet\"] = internet\r\n #country phone code\r\n phone = db.execute(\"SELECT dial FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"dial\"]\r\n info[\"phone\"] = \"+\" + phone\r\n\r\n\r\n \"\"\"GMT time zone\"\"\"\r\n #Get time zone delta from weather dictionary\r\n time_zone = weather[0][\"hour_offset\"]\r\n zone = 0\r\n\r\n #Exception/error errorhandler\r\n if iso == \"cn\":\r\n gmt = \"+8\"\r\n\r\n else:\r\n if (int(time_zone) - time_zone) == 0:\r\n zone = round(time_zone)\r\n if zone > 0:\r\n gmt = \"+\" + str(zone)\r\n else:\r\n gmt = str(zone)\r\n else:\r\n zone = time_zone\r\n if zone > 0:\r\n gmt = \"+\" + str(zone)\r\n else:\r\n gmt = str(zone)\r\n\r\n info[\"time_zone\"] = gmt\r\n\r\n\r\n print(\"############\", info)\r\n return info\r\n\r\n except:\r\n print(\"######## ERROR #########\")\r\n return None", "def country(alpha_2_code: str) -> None:", "def country():\r\n\r\n cursor.execute('SELECT country_names FROM countries \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def getvversioncountryvalidationrulescountry(\n self, country, version, ms_correlation_id=None, ms_request_id=None, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.getvversioncountryvalidationrulescountry.metadata['url']\n path_format_arguments = {\n 'country': self._serialize.url(\"country\", country, 'str'),\n 'version': self._serialize.url(\"version\", version, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if ms_correlation_id is not None:\n header_parameters['MS-CorrelationId'] = self._serialize.header(\"ms_correlation_id\", ms_correlation_id, 'str')\n if ms_request_id is not None:\n header_parameters['MS-RequestId'] = self._serialize.header(\"ms_request_id\", ms_request_id, 'str')\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.get(url, query_parameters)\n response = self._client.send(request, header_parameters, stream=False, **operation_config)\n\n if response.status_code not in [200, 201, 400, 401, 403, 404, 500]:\n exp = CloudError(response)\n exp.request_id = response.headers.get('x-ms-request-id')\n raise exp\n\n deserialized = None\n\n if response.status_code in [200, 201]:\n deserialized = self._deserialize('MicrosoftPartnerSdkContractsV1CountryValidationRules', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def hub_city(g):\n max_flights = None\n hub_cities = []\n for key in g.city_dict:\n if(len(g.city_dict[key].get_flights_in()) > max_flights):\n max_flights = len((g.city_dict[key]).flights_in)\n \n \n for key in g.city_dict:\n if(len(g.city_dict[key].get_flights_in()) == max_flights): \n hub_cities.append(g.city_dict[key].get_name()) \n \n \n return hub_cities", "def get_pop(api_key, year = str(), map = bool()):\n try:\n year = year\n pop_url = f'http://api.census.gov/data/{year}/pep/population?get=POP&for=state:*&key={api_key}'\n r = requests.get(pop_url)\n data = json.loads(r.content) \n pop_df = pd.DataFrame(data[1:], columns=data[0]).\\\n rename(columns={\"POP\": \"Pop_Count\", \"state\": \"STATEFP\"})\n pop_df['Pop_Count'] = pop_df['Pop_Count'].astype(str).astype(int)\n geodata_url = f\"https://raw.githubusercontent.com/uscensusbureau/citysdk/master/v2/GeoJSON/20m/{year}/state.json\"\n geo_df = gpd.read_file(geodata_url)\n geo_df = geo_df.merge(pop_df, on = 'STATEFP')\n if map == True:\n return geo_df.plot(column = 'Pop_Count')\n else:\n return geo_df\n r.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'An error occured. All parameters must exist in the Census GeoJSON database. Please check https://github.com/uscensusbureau/citysdk/tree/master/v2/GeoJSON: {err}')", "def audience_simple(country):\r\n if country == 'US':\r\n return 'USA'\r\n elif country == 'IN':\r\n return 'India'\r\n else:\r\n return 'Other'", "def get_jhu(data_path: str, filter_: Union[dict, bool] = True) -> None:\n # Where JHU stores their data\n url_template = (\"https://raw.githubusercontent.com/CSSEGISandData/\"\n \"COVID-19/master/csse_covid_19_data/\"\n \"csse_covid_19_time_series/time_series_covid19_%s_%s.csv\")\n\n # Scrape the data\n dfs = {}\n for region in ['global', 'US']:\n dfs[region] = {}\n for kind in ['confirmed', 'deaths', 'recovered']:\n url = url_template % (kind, region) # Create the full data URL\n try:\n df = pd.read_csv(url) # Download the data into a dataframe\n except HTTPError:\n print(\"Could not download data for %s, %s\" % (kind, region))\n else:\n if region == 'global':\n has_no_province = df['Province/State'].isnull()\n # Whole countries only; use country name as index\n df1 = df[has_no_province].set_index('Country/Region')\n more_dfs = []\n for country in ['China', 'Canada', 'Australia']:\n if country == 'Canada' and kind in 'recovered':\n continue\n is_c = df['Country/Region'] == country\n df2 = df[is_c].sum(axis=0, skipna=False).to_frame().T\n df2['Country/Region'] = country\n df2 = df2.set_index('Country/Region')\n more_dfs.append(df2)\n df = pd.concat([df1] + more_dfs)\n elif region == 'US':\n # Use state name as index\n for k, v in us_state_abbrev.items(): # get US state abbrev\n if not us_state_abbrev[k].startswith('US_'):\n us_state_abbrev[k] = 'US_' + v # Add 'US_' to abbrev\n df.replace(us_state_abbrev, inplace=True)\n df = df.set_index('Province_State')\n df = df.groupby('Province_State').sum() # combine counties to create state level data\n\n df = df[[x for x in df if any(year in x for year in ['20', '21'])]] # Use only data columns\n # 20 or 21 signifies 2020 or 2021\n dfs[region][kind] = df # Add to dictionary of dataframes\n\n # Generate a list of countries that have \"good\" data,\n # according to these criteria:\n good_countries = get_countries(dfs['global'], filter_=filter_)\n\n # For each \"good\" country,\n # reformat and save that data in its own .csv file.\n source = dfs['global']\n for country in tqdm(good_countries, desc='Countries'): # For each country\n if country in ['Diamond Princess', 'MS Zaandam', 'Samoa',\n 'Vanuatu', 'Marshall Islands', 'US', 'Micronesia']:\n print(\"Skipping {}\".format(country))\n continue\n # If we have data in the downloaded JHU files for that country\n if country in source['confirmed'].index:\n df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',\n 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover',\n 'new_uninfected'])\n df['dates2'] = source['confirmed'].columns\n df['dates2'] = df['dates2'].apply(fix_jhu_dates)\n df['cum_cases'] = source['confirmed'].loc[country].values\n df['cum_deaths'] = source['deaths'].loc[country].values\n df['cum_recover'] = source['recovered'].loc[country].values\n df[['new_cases', 'new_deaths', 'new_recover']] = \\\n df[['cum_cases', 'cum_deaths', 'cum_recover']].diff()\n df['new_uninfected'] = df['new_recover'] + df['new_deaths']\n\n\n try:\n population = get_population_count(data_path, country)\n df['population'] = population\n except:\n pass\n\n # Fill NaN with 0 and convert to int\n dfs[country] = df.set_index('dates2').fillna(0).astype(int)\n dfs[country].to_csv(data_path / ('covidtimeseries_%s.csv' % country))\n\n else:\n print(\"No data for %s\" % country)\n\n source = dfs['US']\n states = source['confirmed'].index.tolist()\n us_recovery_data = covid_tracking_recovery(data_path)\n for state in tqdm(states, desc='US States'): # For each country\n if state in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'US_AS']:\n print(\"Skipping {}\".format(state))\n continue\n # If we have data in the downloaded JHU files for that country\n if state in source['confirmed'].index:\n df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',\n 'new_cases','new_deaths','new_uninfected'])\n df['dates2'] = source['confirmed'].columns\n df['dates2'] = df['dates2'].apply(fix_jhu_dates)\n df['cum_cases'] = source['confirmed'].loc[state].values\n df['cum_deaths'] = source['deaths'].loc[state].values\n\n df[['new_cases', 'new_deaths']] = df[['cum_cases', 'cum_deaths']].diff()\n\n # add recovery data\n df.set_index('dates2', inplace=True)\n df = df.merge(us_recovery_data[state], on='dates2', how='left')\n df['new_uninfected'] = df['new_recover'] + df['new_deaths']\n\n try:\n population = get_population_count(data_path, state)\n df['population'] = population\n except:\n pass\n # Fill NaN with 0 and convert to int\n dfs[state] = df.fillna(0).astype(int)\n dfs[state].to_csv(data_path /\n ('covidtimeseries_%s.csv' % state))\n else:\n print(\"No data for %s\" % state)", "def topBrandsandCountries(df, countries_unique):\n top_countries = {}\n for x in countries_unique:\n if df[df.geo_country==x].device_brand_name.count() > 500:\n top_countries[x] = df[df.geo_country==x].device_brand_name.count()\n\n top_3_brands = ['Apple','Samsung','Huawei']\n\n apple = []\n samsung = []\n huawei = []\n for x in top_countries.keys():\n apple.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[0]].device_brand_name.count())\n samsung.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[1]].device_brand_name.count())\n huawei.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[2]].device_brand_name.count()) \n\n return top_countries,apple,samsung,huawei", "def cli(database, city, state):\n\n # load API key\n key = wg.load_key(\"/home/curtis/etc/wunderground\")\n\n # build API request\n url = wg.build_request(key, database, city=city, state=state)\n\n # get results\n r = wg.get_request(url)\n\n # print results\n pprint(r)", "def update(self):\n url = '/weather/current/minutely' \\\n '?version=2&lat={}&lon={}&city={}&county={}&village={}' \\\n .format(self.lat, self.lon, self.city, self.county, self.village)\n self.result = self.api.get(url)['weather']['minutely'][0]", "def mostOfficial(api, newapi):\n\n if api[-3:] == 'KHR':\n return api\n if newapi[-3:] == 'KHR':\n return newapi;\n if api[-3:] == 'EXT':\n return api\n if newapi[-3:] == 'EXT':\n return newapi;\n return api", "def get_shapes4country(country='South Africa'):\n # location of data\n URL = \"http://www.naturalearthdata.com/downloads/10m-cultural-vectors\"\n URL += \"/10m-admin-1-states-provinces/\"\n # Shapefiles locally?\n # TODO - update to download automatically and store in AC_tools' data directory\n shapefiles = 'ne_10m_admin_1_states_provinces_lakes'\n# shapefiles = 'ne_10m_admin_1_states_provinces'\n folder = '/mnt/lustre/users/ts551/labbook/Python_progs/'\n folder += '/AC_tools/data/shapefiles/{}'.format(shapefiles, shapefiles)\n states = geopandas.read_file(folder)\n # Just select state of interest\n choosen_states = states.query(\"admin == '{}'\".format(country))\n choosen_states = choosen_states.reset_index(drop=True)\n # Get the shapes\n shapes = zip(choosen_states.geometry, range(len(choosen_states)))\n return shapes", "def test_preferred_zip_precedence(self):\n subscriber = Subscriber.objects.get(id=4)\n create_subscriber_in_session(self, subscriber)\n factory = RequestFactory()\n self.assemble_session(self.session) \n request = factory.get('/hudson-valley/', follow=True) \n request.session = self.session\n site = Site.objects.get(id=3)\n preferred_zip = get_preferred_zip(request, site)\n self.assertEqual(preferred_zip, '27604')", "def get_region_pop(api_key, year = str(), map = bool()):\n try:\n year = year\n pop_url = f'http://api.census.gov/data/{year}/pep/population?get=POP&for=REGION:*&key={api_key}'\n r = requests.get(pop_url)\n data = json.loads(r.content) \n pop_df = pd.DataFrame(data[1:], columns=data[0]).\\\n rename(columns={\"POP\": \"Pop_Count\", \"region\": \"REGIONCE\"})\n pop_df['Pop_Count'] = pop_df['Pop_Count'].astype(str).astype(int)\n geodata_url = f\"https://raw.githubusercontent.com/uscensusbureau/citysdk/master/v2/GeoJSON/20m/{year}/region.json\"\n geo_df = gpd.read_file(geodata_url)\n geo_df = geo_df.merge(pop_df, on = 'REGIONCE')\n if map == True:\n return geo_df.plot(column = 'Pop_Count')\n else:\n return geo_df\n r.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'An error occured. All parameters must exist in the Census GeoJSON database and API. Please check https://github.com/uscensusbureau/citysdk/tree/master/v2/GeoJSON: {err}')", "def get_country_from_hal(docid) -> str:\n url = BaseConfig.HAL_URL_STRUCTURE\\\n + \"?q=docid:{}&fl=country_s\".format(docid)\n r = requests.get(url)\n try:\n r = requests.get(url, timeout=2)\n if 'fr' == json.loads(r.text)['response']['docs'][0]['country_s']:\n return 'France'\n else:\n return json.loads(r.text)['response']['docs'][0]['country_s']\n except Exception:\n logger.debug(\"error in hal getting country for docid {}\".format(docid))\n return 'unknown'", "async def _timein_country(self, country_code):\n\t\t\n\t\tapiKey = self.settings['api_key']\n\t\tif \".com\" in apiKey:\n\t\t\tawait self.bot.say(\"You have to set your API key, see data/timein/settings.json for details\")\n\t\t\treturn\n\t\t\n\t\turl = 'http://api.timezonedb.com/v2/list-time-zone?key=' + apiKey + '&format=xml'\n\t\tflag = ':flag_'\n\n\t\tif country_code.lower() == 'use':\n\t\t\turl += '&country=US&zone=*New_York*'\n\t\t\tflag += 'us: EAST '\n\t\telif country_code.lower() == 'usw':\n\t\t\turl += '&country=US&zone=*Los_Angeles*'\n\t\t\tflag += 'us: WEST '\n\t\telif country_code.lower() == 'test':\n\t\t\turl += '&zone=*auckland*'\n\t\t\tflag += 'nz: '\n\t\telif len(country_code) != 2 or ' ' in country_code == False:\n\t\t\tawait self.bot.say(\"Country code must be 2 letters and from this list https://timezonedb.com/country-codes\")\n\t\t\treturn\n\t\telse:\n\t\t\tif country_code == 'UK' or country_code == 'uk':\n\t\t\t\tcountry_code = 'GB'\n\t\t\turl += '&country=' + country_code\n\t\t\tflag += country_code.lower() + ': '\n\t\t\t\n\t\tasync with aiohttp.get(url) as response:\n\t\t\tsoupObject = BeautifulSoup(await response.text(), \"html.parser\")\n\t\tmessage = ''\n\t\t\n\t\tstatus = soupObject.find('status').get_text()\n\t\tif status != 'OK':\n\t\t\tmessage += 'Request failed. Details:\\n```'\n\t\t\tmessage += status + '\\n'\n\t\t\tmessage += soupObject.find('message').get_text()\n\t\t\tmessage += '```\\nMake sure country code is from the list at https://timezonedb.com/country-codes'\n\t\telse:\n\t\t\tzones = soupObject.find_all('zone')\n\t\t\tfor zone in zones:\n\t\t\t\tnewmessage = ''\n\t\t\t\tnewmessage += flag\n\t\t\t\tnewmessage += zone.find('countryname').get_text() + '\\n'\n\t\t\t\tnewmessage += zone.find('zonename').get_text() + '\\n'\n\t\t\t\tunixtime = zone.find('timestamp').get_text()\n\t\t\t\tprettyTime = datetime.datetime.fromtimestamp(int(unixtime)).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\t\tnewmessage += prettyTime + '\\n'\n\t\t\t\tmessage += newmessage + '\\n'\n\t\t\n\t\tawait self.bot.say(message)", "def get_country_code(country_name):\n # worldmap_chart = pygal.maps.world.World()\n # for code, name in worldmap_chart:\n\n for code, name in i18n.COUNTRIES:\n\n # for code, name in COUNTRIES.items():\n if name == country_name:\n print(code)\n return code\n # If the country wasn't found, return None.\n return None", "def get_country_mask(country='South Africa', res='2x2.5'):\n # Get AC_tools location, then set example data folder location\n import os\n import xarray as xr\n import inspect\n filename = inspect.getframeinfo(inspect.currentframe()).filename\n path = os.path.dirname(os.path.abspath(filename))\n folder = path+'/data/LM/LANDMAP_LWI_ctm_0125x0125/'\n # Get coords from LWI 0.125x0.125 data and remove the time dimension\n ds = xr.open_dataset(folder+'ctm.nc')\n ds = ds.mean(dim='time')\n # Add a raster mask for a country\n ds = add_raster_of_country2ds(ds, test_plot=True, country=country)\n # Only include states in the assignment\n ds = ds[['states']]\n # rrgrid to coarser resolution (e.g. 2x2.5)\n ds = regrid2coarse_res(ds, res=res)\n return ds", "def test_country_unknown(self):\n survey = SurveyFactory.create()\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n\n 'question_text': 'ou812?',\n 'variation_id': '1',\n 'country': 'unknown'\n }\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 201\n ans = Answer.objects.latest('id')\n assert ans.country == 'UNK'", "def get_covid_stats_by_county(state, county):\n url = \"https://corona.lmao.ninja/v2/jhucsse/counties/\" + county\n response = requests.get(url)\n data = response.json()\n counties = []\n for res in data:\n if res[\"province\"] == state:\n county1 = res[\"county\"]\n updatedAt = res[\"updatedAt\"]\n stats = res[\"stats\"]\n confirmed = stats[\"confirmed\"]\n deaths = stats[\"deaths\"]\n recovered = stats[\"recovered\"]\n counties.append(\n CountyStats(state, county1, updatedAt, confirmed, deaths, recovered)\n )\n # return CountyStats(state,county,updatedAt,confirmed,deaths,recovered)\n return counties", "def buy_one_get_one(products):\n if 'p1' in products and products['p1'] >= 2:\n return -20\n else:\n return 0", "def retrieval():\n try:\n if request.method == 'GET':\n country = request.args.get('country') # If no key then null\n year = request.args.get('year') # If no key then null\n return spout(country, year)\n except Exception as e:\n # Unfortunately I'm not going to wrap this in indv. strings\n r = Response(response=error_msg+str(e),\n status=404,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r", "def nwLanguage(cType):\n\n nwa = config['netwitness/nw_concentrator']\n base_uri = \"/sdk?msg=language&\"\n params_dic = {'force-content-type': cType, 'expiry': 0, 'size': 200}\n\n enc_params = urllib.urlencode(params_dic)\n full_url = nwa + base_uri + enc_params\n nwu, nwp = get_creds()\n\n return get_http(full_url, nwu, nwp)", "def get_cgrt():\n\n cgrt = pd.read_csv(\n \"https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv\",\n low_memory=False\n )\n\n if \"RegionCode\" in cgrt.columns:\n cgrt = cgrt[cgrt.RegionCode.isnull()]\n\n cgrt = cgrt[[\"CountryName\", \"Date\", \"StringencyIndex\"]]\n\n cgrt.loc[:, \"Date\"] = pd.to_datetime(cgrt[\"Date\"], format=\"%Y%m%d\").dt.date.astype(str)\n\n country_mapping = pd.read_csv(os.path.join(INPUT_DIR, \"bsg/bsg_country_standardised.csv\"))\n\n cgrt = country_mapping.merge(cgrt, on=\"CountryName\", how=\"right\")\n\n missing_from_mapping = cgrt[cgrt[\"Country\"].isna()][\"CountryName\"].unique()\n if len(missing_from_mapping) > 0:\n raise Exception(f\"Missing countries in OxCGRT mapping: {missing_from_mapping}\")\n\n cgrt = cgrt.drop(columns=[\"CountryName\"])\n\n rename_dict = {\n \"Country\": \"location\",\n \"Date\": \"date\",\n \"StringencyIndex\": \"stringency_index\"\n }\n\n cgrt = cgrt.rename(columns=rename_dict)\n\n return cgrt", "def getTopPopulationRegion(self):\n\t\tdata = {}\n\t\tfor iProvince in range(con.iNumRegions):\n\t\t\tdata[iProvince] = 0\n\t\tfor iLoopPlayer in range(con.iBarbarian + 1):\n\t\t\tapCityList = PyPlayer(iLoopPlayer).getCityList()\n\t\t\tfor pCity in apCityList:\n\t\t\t\tdata[pCity.GetCy().plot().getRegionID()] += pCity.getPopulation()\n\t\tkey = -1\n\t\tfor key, value in sorted(data.iteritems(), key=lambda (k,v): (v,k)):\n\t\t\tpass\n\t\treturn key", "def get_3letter(countries):\n url = URL(\"http://www.worldatlas.com/aatlas/ctycodes.htm\")\n html = url.download()\n dom = DOM(html)\n country_table = dom.by_tag('table.tableWrap')\n iso2_list = []\n iso3_list = []\n density_list = []\n\n for table in country_table:\n\n # Check if the ISO-2 Code is in our wikipedia dictionary, than add every value to a separate list.\n for country in table('tr')[1:]:\n iso2_code = country.by_tag('td.cell02')[0].content.strip()\n iso3_code = country.by_tag('td.cell03')[0].content.strip()\n print iso2_code, iso3_code\n if iso2_code in countries:\n iso2_list.append(iso2_code)\n iso3_list.append(iso3_code)\n density_list.append(countries[iso2_code])\n\n # A check to see if we miss countries from the wikipedia page.\n for iso2 in iso2_list:\n if iso2 in countries:\n pass\n else:\n print 'MISSING', iso2\n\n f1 = open(\"countrycodes.txt\", \"w\")\n # Reformat the data to fit the json.dump function.\n json_d = []\n for i in range(len(iso2_list)):\n json_d.append({'iso2': iso2_list[i], 'iso3': iso3_list[i], 'density': density_list[i]})\n f1.write(\"{0},{1}\\n\".format(iso2_list[i], iso3_list[i]))\n with open('densities.txt', 'a') as f:\n json.dump(json_d, f, indent=4)", "def get_velocloud_costs(self, nodePair, site, accessories):\n global apac_countries\n global latam_countries\n bundleNode = nodePair.get_bundleNode()\n countryNode = nodePair.get_countryNode()\n core = self.core\n bundle = {\n \"hardware\" : 0,\n \"software\" : 0,\n \"support\" : 0,\n \"license\" : 0,\n \"accessories\": 0,\n \"BOM\" : []\n }\n\n additionalDiscount = 0\n if site[\"Contract term\"] >= 36:\n additionalDiscount = 0.03\n\n\n if \"Feature set\" in site.keys():\n featureSet = site[\"Feature set\"].split(\"-\")\n\n supportDelay = \"NDD\"\n if site[\"On-site maintenance\"] == \"Silver\":\n supportDelay = \"4H5\"\n elif site[\"On-site maintenance\"] == \"Gold\":\n supportDelay = \"4H7\"\n\n bundle[\"hardware\"] = core.get_attribute(bundleNode, \"cost\") * (1 - core.get_attribute(bundleNode, \"discount\") - additionalDiscount)\n bundle[\"BOM\"].append(create_BOM_row(siteID=str(site[\"Site ID 1\"]) + str(site[\"Site ID 2\"]),code=core.get_attribute(bundleNode, \"vendorCode\"),description=core.get_attribute(bundleNode, \"description\"), quantity=site[\"Device quantity\"], discount=core.get_attribute(bundleNode, \"discount\") + additionalDiscount,unit_list=core.get_attribute(bundleNode, \"cost\")))\n bundle[\"weight\"] = core.get_attribute(bundleNode, \"weight\")\n if latam_countries is None and apac_countries is None:\n with open(\"./imports/LATAMcountries.csv\", \"r\") as f:\n latam_countries = f.read().split(\";\")\n with open(\"./imports/APACcountries.csv\", \"r\") as f:\n apac_countries = f.read().split(\";\")\n parts = core.load_children(bundleNode)\n contract_term = str(int(site[\"Contract term\"])) + \"P\"\n\n bandwidth = 0\n if site[\"Order type\"] == \"Bandwidth based\":\n if int(site[\"Bandwidth category\"])//1000 == 0:\n if int(site[\"Bandwidth category\"]) < 100:\n bandwidth = \"0\" + str(site[\"Bandwidth category\"]) + \"M\"\n else:\n bandwidth = str(site[\"Bandwidth category\"]) + \"M\"\n else:\n bandwidth = str(int(site[\"Bandwidth category\"]) // 1000) + \"G\"\n\n for part in parts:\n if contract_term in core.get_attribute(part, \"name\"):\n if core.get_attribute(part, \"type\") == \"Support\" and supportDelay in core.get_attribute(part, \"name\"):\n core.set_registry(part, \"color\", \"#00FF00\")\n bundle[\"support\"] = core.get_attribute(part, \"cost\") * (1 - core.get_attribute(bundleNode, \"discount\") - additionalDiscount) + core.get_attribute(part, \"upgradeMargin\") * bundle[\"hardware\"]\n if site[\"Contract term\"] >= 36:\n bundle[\"BOM\"].append(create_BOM_row(siteID=str(site[\"Site ID 1\"]) + str(site[\"Site ID 2\"]),code=core.get_attribute(part, \"vendorCode\"),description=core.get_attribute(part, \"description\"), quantity=site[\"Device quantity\"], discount=core.get_attribute(bundleNode, \"discount\") + additionalDiscount,unit_list=core.get_attribute(part, \"cost\")))\n bundle[\"BOM\"].append(create_BOM_row(siteID=str(site[\"Site ID 1\"]) + str(site[\"Site ID 2\"]),code=\"VC-SUP-UPG-\" + supportDelay + \"-12P\",description=core.get_attribute(part, \"upgradeDescription\"), quantity=site[\"Device quantity\"], discount=core.get_attribute(bundleNode, \"discount\") + additionalDiscount,unit_list=core.get_attribute(part, \"upgradeMargin\") * core.get_attribute(bundleNode, \"cost\")))\n elif bandwidth != 0 :\n if site['Device quantity'] == 1:\n quantity = 1\n else:\n quantity = site['Device quantity'] // 2\n i = 0\n for i in range(len(featureSet)):\n if featureSet[i] not in core.get_attribute(part,\"name\") :\n break\n elif i == len(featureSet) - 2:\n if \"G\" not in featureSet[2] and \"G\" in core.get_attribute(part,\"name\"):\n break\n upgradeMargin = 0\n if featureSet[-1] == \"PROD\":\n upgradeCode = \"VC-PROD-UPG-\" + str(site[\"Contract term\"]) + \"P\"\n upgradeMargin = 0.05\n upgradeDescription = \"VMware SD-WAN support upgrade to Production, Subscription for \" + str(int(site[\"Contract term\"] / 12)) + \" year, Prepaid\"\n elif featureSet[-1] == \"PREM\":\n upgradeCode = \"VC-PREM-UPG-\" + str(site[\"Contract term\"]) + \"P\"\n upgradeMargin = 0.07\n upgradeDescription = \"VMware SD-WAN support upgrade to Premier, Subscription for \" + str(int(site[\"Contract term\"] / 12)) + \" year, Prepaid\"\n \n if core.get_attribute(part, \"type\") == \"Software\" and bandwidth in core.get_attribute(part, \"name\"):\n core.set_registry(part, \"color\", \"#00FF00\")\n bundle[\"software\"] = core.get_attribute(part, \"cost\") * (1 - core.get_attribute(part, \"discount\") - additionalDiscount) + upgradeMargin * core.get_attribute(part, \"cost\") * (1 - core.get_attribute(part,\"discount\") - additionalDiscount)\n bundle[\"BOM\"].append(create_BOM_row(siteID=str(site[\"Site ID 1\"]) + str(site[\"Site ID 2\"]),code=core.get_attribute(part, \"vendorCode\"),description=core.get_attribute(part, \"description\"), quantity=quantity, discount=core.get_attribute(part, \"discount\") + additionalDiscount,unit_list=core.get_attribute(part, \"cost\")))\n if featureSet[-1] != \"BAS\":\n bundle[\"BOM\"].append(create_BOM_row(siteID=str(site[\"Site ID 1\"]) + str(site[\"Site ID 2\"]),code=upgradeCode,description=upgradeDescription, quantity=quantity, discount=core.get_attribute(part, \"discount\") + additionalDiscount, unit_list= upgradeMargin * core.get_attribute(part, \"cost\")))\n if core.get_attribute(part, \"type\") == \"License\" and featureSet[1] == \"HO\" and featureSet[2] == \"HG\" and bandwidth in core.get_attribute(part, \"vendorCode\") and ((core.get_attribute(countryNode, \"name\") in apac_countries and \"APAC\" in core.get_attribute(part,\"name\")) or (core.get_attribute(countryNode, \"name\") in latam_countries and \"LATAM\" in core.get_attribute(part,\"name\"))):\n core.set_registry(part, \"color\", \"#00FF00\")\n bundle[\"license\"] = core.get_attribute(part, \"cost\") * (1 - core.get_attribute(bundleNode, \"discount\") - additionalDiscount)\n bundle[\"BOM\"].append(create_BOM_row(siteID=str(site[\"Site ID 1\"]) + str(site[\"Site ID 2\"]),code=core.get_attribute(part, \"vendorCode\"),description=core.get_attribute(part, \"description\"), quantity=quantity, discount=core.get_attribute(part, \"discount\") + additionalDiscount,unit_list=core.get_attribute(part, \"cost\")))\n for accessory in accessories:\n for part in parts:\n if core.get_attribute(part, \"type\") == \"Accessory\" and accessory['code'] == core.get_attribute(part, \"vendorCode\"):\n core.set_registry(part, \"color\", \"#00FF00\")\n bundle[\"accessories\"] += core.get_attribute(part, \"cost\") * (1 - core.get_attribute(bundleNode, \"discount\") - additionalDiscount) * accessory['quantity']\n bundle[\"BOM\"].append(create_BOM_row(siteID=str(site[\"Site ID 1\"]) + str(site[\"Site ID 2\"]),code=core.get_attribute(part, \"vendorCode\"), description=core.get_attribute(part, \"description\"), quantity=accessory['quantity'], discount=core.get_attribute(bundleNode, \"discount\") + additionalDiscount, unit_list=core.get_attribute(part, \"cost\")))\n return bundle", "def get_house_est(api_key, year = str(), map = bool()):\n try:\n house_url = f'http://api.census.gov/data/{year}/pep/housing?get=HUEST&for=state:*&key={api_key}'\n r = requests.get(house_url)\n data = json.loads(r.content) \n house_df = pd.DataFrame(data[1:], columns=data[0]).\\\n rename(columns={\"HUEST\": \"Housing_Estimates\", \"state\": \"STATEFP\"})\n house_df['Housing_Estimates'] = house_df['Housing_Estimates'].astype(str).astype(int)\n geodata_url = f\"https://raw.githubusercontent.com/uscensusbureau/citysdk/master/v2/GeoJSON/20m/{year}/state.json\"\n geo_df = gpd.read_file(geodata_url)\n geo_df = geo_df.merge(house_df, on = 'STATEFP')\n if map == True:\n return geo_df.plot(column = 'Housing_Estimates')\n else:\n return geo_df\n r.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n except Exception as err:\n print(f'An error occured. All parameters must exist in the Census GeoJSON database and API. Please check https://github.com/uscensusbureau/citysdk/tree/master/v2/GeoJSON: {err}')", "def get_country_details(self,country):\n try:\n country_obj = pycountry.countries.get(name=country)\n if country_obj is None:\n c = pycountry.countries.search_fuzzy(country)\n country_obj = c[0]\n continent_code = pc.country_alpha2_to_continent_code(country_obj.alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj.alpha_3, continent\n except:\n if 'Congo' in country:\n country = 'Congo'\n elif country == 'Diamond Princess' or country == 'Laos' or country == 'MS Zaandam' or country == 'Holy See' or country == 'Timor-Leste':\n return country, country\n elif country == 'Korea, South' or country == 'South Korea':\n country = 'Korea, Republic of'\n elif country == 'Taiwan*':\n country = 'Taiwan'\n elif country == 'Burma':\n country = 'Myanmar'\n elif country == 'West Bank and Gaza':\n country = 'Gaza'\n else:\n return country, country\n country_obj = pycountry.countries.search_fuzzy(country)\n continent_code = pc.country_alpha2_to_continent_code(country_obj[0].alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj[0].alpha_3, continent", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def get(self):\n city = str(request.args.get('city')) ## /?city=stockholm\n source = urllib.request.urlopen('http://127.0.0.1:5050/?city=' + city).read()\n data = json.loads(source)\n print(data)\n tempinc = {\"name\" : (str(data['name'])),\n \"country\" : (str(data['country'])),\n \"temp\" : (str(data['temp']))+' c'}\n return tempinc", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def get(test_url, headless, tab_concurrency, browser_concurrency, limit, selector, source_num, geo, bin_path, chrome_args, debug):\n chrome_args = chrome_args.split(',')\n _args = []\n for arg in chrome_args:\n if len(arg) > 0:\n if not arg.startswith('--'):\n arg = '--{}'.format(arg)\n _args.append(arg)\n client = proxytools.Client(debug=True)\n results = client.get_proxies(test_url,\n headless=headless,\n tab_concurrency=tab_concurrency,\n browser_concurrency=browser_concurrency,\n limit=limit,\n selector=selector,\n source_num=source_num,\n bin_path=bin_path,\n chrome_args=chrome_args)\n if geo:\n wait = 1 # seconds between WHOIS request\n for result in results:\n proxy = proxytools.proxy.Proxy.from_string(result['proxy'])\n country = proxy.country()\n result['country'] = country\n time.sleep(wait)\n print(json.dumps(results, indent=4))", "def query_api(term, location):\n response = search(term, location)\n\n businesses = response.get('businesses')\n\n if not businesses:\n print 'No businesses for {0} in {1} found.'.format(term, location)\n return\n\n business_id = businesses[0]['id']\n \n print '{0} businesses found, querying business info for the top result \"{1}\" ...'.format(\n len(businesses),\n business_id\n )\n \n response=[]\n for biz in range(len(businesses)):\n response.append(get_business(businesses[biz]['id']))\n #response = get_business(business_id)\n return response", "def test_get_country_states(self):\n pass", "def ServiceQuery(self,org,dst):\n \n\n res=requests.get(\"http://data.fixer.io/api/latest?access_key=8694056806013f8123e3ec7c9569ce1c&symbols={},{}\".format(dst,org))\n try:\n dst_rate=res.json()['rates'][dst] #de euro a dst\n org_rate=res.json()['rates'][org] #de euro a org\n except KeyError:\n return False\n return (Decimal(org_rate)/Decimal(dst_rate))", "def Get_LonghurstProvinceName4Num(input):\n LonghurstProvinceDict = {\n 'ALSK': 'AlaskaDownwellingCoastalProvince',\n 'ANTA': 'AntarcticProvince',\n 'APLR': 'AustralPolarProvince',\n 'ARAB': 'NWArabianUpwellingProvince',\n 'ARCH': 'ArchipelagicDeepBasinsProvince',\n 'ARCT': 'AtlanticArcticProvince',\n 'AUSE': 'EastAustralianCoastalProvince',\n 'AUSW': 'AustraliaIndonesiaCoastalProvince',\n 'BENG': 'BenguelaCurrentCoastalProvince',\n 'BERS': 'N.PacificEpicontinentalProvince',\n 'BPLR': 'BorealPolarProvince(POLR)',\n 'BRAZ': 'BrazilCurrentCoastalProvince',\n 'CAMR': 'CentralAmericanCoastalProvince',\n 'CARB': 'CaribbeanProvince',\n 'CCAL': 'CaliforniaUpwellingCoastalProvince',\n 'CHIL': 'ChilePeruCurrentCoastalProvince',\n 'CHIN': 'ChinaSeaCoastalProvince',\n 'CHSB': 'CheasapeakeBayProvince',\n 'CNRY': 'CanaryCoastalProvince(EACB)',\n 'EAFR': 'E.AfricaCoastalProvince',\n 'ETRA': 'EasternTropicalAtlanticProvince',\n 'FKLD': 'SWAtlanticShelvesProvince',\n 'GFST': 'GulfStreamProvince',\n 'GUIA': 'GuianasCoastalProvince',\n 'GUIN': 'GuineaCurrentCoastalProvince',\n 'INDE': 'E.IndiaCoastalProvince',\n 'INDW': 'W.IndiaCoastalProvince',\n 'ISSG': 'IndianS.SubtropicalGyreProvince',\n 'KURO': 'KuroshioCurrentProvince',\n 'LAKE': 'CaspianSea,AralSea',\n 'MEDI': 'MediterraneanSea,BlackSeaProvince',\n 'MONS': 'IndianMonsoonGyresProvince',\n 'NADR': 'N.AtlanticDriftProvince(WWDR)',\n 'NASE': 'N.AtlanticSubtropicalGyralProvince(East)(STGE)',\n 'NASW': 'N.AtlanticSubtropicalGyralProvince(West)(STGW)',\n 'NATR': 'N.AtlanticTropicalGyralProvince(TRPG)',\n 'NECS': 'NEAtlanticShelvesProvince',\n 'NEWZ': 'NewZealandCoastalProvince',\n 'NPPF': 'N.PacificPolarFrontProvince',\n 'NPSE': 'N.PacificSubtropicalGyreProvince(East)',\n 'NPSW': 'N.PacificSubtropicalGyreProvince(West)',\n 'NPTG': 'N.PacificTropicalGyreProvince',\n 'NWCS': 'NWAtlanticShelvesProvince',\n 'OCAL': 'OffshoreCaliforniaCurrentProvince',\n 'PEQD': 'PacificEquatorialDivergenceProvince',\n 'PNEC': 'N.PacificEquatorialCountercurrentProvince',\n 'PSAE': 'PacificSubarcticGyresProvince(East)',\n 'PSAW': 'PacificSubarcticGyresProvince(West)',\n 'REDS': 'RedSea,PersianGulfProvince',\n 'SANT': 'SubantarcticProvince',\n 'SARC': 'AtlanticSubarcticProvince',\n 'SATL': 'SouthAtlanticGyralProvince(SATG)',\n 'SPSG': 'S.PacificSubtropicalGyreProvince',\n 'SSTC': 'S.SubtropicalConvergenceProvince',\n 'SUND': 'SundaArafuraShelvesProvince',\n 'TASM': 'TasmanSeaProvince',\n 'WARM': 'W.PacificWarmPoolProvince',\n 'WTRA': 'WesternTropicalAtlanticProvince'\n }\n return LonghurstProvinceDict[input]", "def fetch(self, query: str) -> str:\n params = {\n 'utf8': '✓',\n 'country_code_facet': 'US',\n 'title': query,\n 'term-require-all': \"true\"\n }\n headers = {\n 'User-Agent': f\"{self.bot_name}\"\n }\n response = requests.get(self.base_url, params=params, headers=headers)\n return response.text", "def country_codes():\n\n iso_sel = [\n Freedom_short.iso_code,\n Freedom_short.year,\n Freedom_short.country,\n Freedom_short.region,\n Freedom_short.hf_score,\n Freedom_short.hf_rank,\n Freedom_short.hf_quartile,\n ]\n\n # Use Pandas to perform the sql query\n #Grab 2017 Data Only for Dropdown\n codes_stmt = db.session.query(*iso_sel).filter(Freedom_short.year == 2017).order_by(Freedom_short.iso_code).statement\n codes_df = pd.read_sql_query(codes_stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(codes_df[\"iso_code\"]))", "def _extract_channels_from_sb_country(html_rsp):\n soup = BeautifulSoup(html_rsp, 'html.parser')\n channel_list = list()\n for link in soup.find_all('a'):\n url = link.get('href')\n if '/youtube/user/' in url:\n channel_list.append('https://socialblade.com' + url + '/monthly') # Get the detailed statistics page.\n return channel_list", "def getActiveCurrencies():", "def search_using_magento_code(cls, code):\n countries = cls.search([('code', '=', code)])\n\n if not countries:\n return cls.raise_user_error(\n \"country_not_found\", error_args=(code, )\n )\n\n return countries[0]", "def lookup_country(latitude, longitude):\n r = requests.get(\"https://api.opencagedata.com/geocode/v1/json?q={}+{}&key=1a43cea9caa6420a8faf6e3b4bf13abb\".format(latitude, longitude))\n if r.status_code != 200:\n print(\"Error accessing OpenCage API: {}\".format(r.content))\n return \"Unknown\"\n result = r.json()\n if not \"results\" in result.keys() or len(result[\"results\"]) < 1:\n print(\"No results found\")\n return \"Unknown\"\n components = result[\"results\"][0][\"components\"]\n if not \"country\" in components.keys():\n print(\"Couldn't locate {}N {}E to a country\".format(latitude, longitude))\n return \"Unknown\"\n return components[\"country\"]", "def get_channels_by_country(self, url, proxies=None):\n country_id = url.split('/')[-1] # The country id iso code is always last on sb country urls.\n html_rsp = self._get_url_wrapper(url, proxies=proxies)\n if not html_rsp:\n return False\n channel_list = self._extract_channels_from_sb_country(html_rsp)\n return {country_id: channel_list}" ]
[ "0.56177616", "0.54850656", "0.5471012", "0.5399314", "0.526083", "0.5193768", "0.51925", "0.51359487", "0.50710964", "0.5054442", "0.49891022", "0.49824235", "0.4921711", "0.488063", "0.48740438", "0.48602745", "0.48566684", "0.48416945", "0.48135594", "0.480547", "0.47955135", "0.47747287", "0.477403", "0.47249746", "0.47089475", "0.47083277", "0.47021592", "0.46934733", "0.46914035", "0.46864453", "0.46743658", "0.46706754", "0.46591914", "0.46504796", "0.46466294", "0.4645888", "0.46284685", "0.46227163", "0.46214092", "0.46166328", "0.46108064", "0.46106035", "0.45976636", "0.45955083", "0.45888755", "0.45832273", "0.45832026", "0.45651856", "0.45622844", "0.45596817", "0.45592043", "0.45572886", "0.45568708", "0.45494118", "0.45446157", "0.45401597", "0.4537291", "0.45168737", "0.4514555", "0.45142528", "0.45066154", "0.44999325", "0.44909266", "0.44778854", "0.44676718", "0.4462517", "0.44612232", "0.44597074", "0.4448542", "0.4441078", "0.44329214", "0.4429985", "0.44287372", "0.44284818", "0.44253188", "0.44163874", "0.44137958", "0.44086143", "0.44005427", "0.43919387", "0.43862376", "0.4386224", "0.43852422", "0.43845627", "0.43832043", "0.43828908", "0.43815035", "0.43804592", "0.43799704", "0.43743837", "0.43706954", "0.43695462", "0.436902", "0.43677703", "0.43668118", "0.43639016", "0.43626627", "0.43557447", "0.43531513", "0.43454883" ]
0.45403993
55
Method to get consumption data of Gujarat
def fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None, logger=getLogger('IN-GJ')): session = session or requests.session() if target_datetime: raise NotImplementedError( 'This parser is not yet able to parse past dates') value_map = fetch_data(zone_key, session, logger=logger) data = { 'zoneKey': zone_key, 'datetime': value_map['date'].datetime, 'consumption': value_map['total consumption'], 'source': 'sldcguj.com' } return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEnergyUsage():\n energy_data = asyncio.run(plug.get_emeter_realtime())\n\n return energy_data", "def getStockData():\n pass", "def getCurentData(self):\n if not self.labExperiment:\n super().getCurentData()\n else:\n return np.array(self.connection.query('get_actuator_data'))", "def get_data(self, gauge_name, date_key):\n pass", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def get(self):\n return self._measurementController.getMeasurements(), 200", "def _get_consumption(self, url, start, end, aggregation):\n start = self._to_milliseconds(start)\n end = self._to_milliseconds(end)\n\n headers = {\"Authorization\": \"Bearer {}\".format(self.access_token)}\n params = {\n \"aggregation\": aggregation,\n \"from\": start,\n \"to\": end\n }\n r = requests.get(url, headers=headers, params=params)\n r.raise_for_status()\n return r.json()", "def get_data():\n loopstate = get_loopstate()\n loudness = grovepi.analogRead(LOUDNESS_SENSOR)\n [temp, hum] = grovepi.dht(TEMP_HUM_SENSOR, module_type=0)\n return [loopstate, loudness, temp, hum]", "def getData(self):\n self.ser.write(b'g')\n readString = self.ser.readline()\n print(readString)\n readString = readString.decode(\"utf-8\")\n splittedString = readString.split('\\t')\n for i, num in enumerate(splittedString):\n try:\n splittedString[i] = int(float(num))\n except ValueError:\n pass\n self.accString.set('Accleration\\nX: %.5f\\nY: %.5f\\nZ: %.5f' %\n (splittedString[0], splittedString[1],\n splittedString[2]))\n self.logFile.write(readString)\n self.comJob = root.after(10, self.getData)", "def collect_data():\n\n \"Aqui va el codigo de alberto para recoger los datos que puede venir en forma de diccionario\"\n #TODO: Función para recoger los datos de los bms y meterlos en diccionarios (Alberto jr.)\n\n bms1 = dict()\n bms2 = dict()\n bms3 = dict()\n general = dict()\n\n\n # Ejemplos de datos para meter en los diccionarios\n\n temperature = 35.5\n voltage1 = 15.2\n voltage2 = 14.8\n date = time.strftime(\"%Y-%m-%d\") # Current date\n t = time.strftime(\"%H:%M:%S\") # Current time\n\n return bms1, bms2, bms3, general", "def get_data():\n pass", "def getdata(self):\n return self.cwt", "def consumption():\n # Query all consumption data by state\n Consumption = engine.execute(\"SELECT * FROM cons\").fetchall()\n \n return jsonify({'Consumption': [dict(row) for row in Consumption]})", "def getData(dig, pipe, event, pulses):\n logging.info(\"Started getData\")\n start_time = time.time()\n for pulse in range(pulses):\n samples = dig.get_data_raw()\n# logging.info(\"GetData retrieved: %d\", len(samples))\n pipe.put(samples)\n end_time = time.time()\n elapsed = end_time - start_time\n samplesProcessed = (pulses * len(samples[0]) * len(samples))\n logging.info(\"getData processed %d Msamples in %.3f s\",\n samplesProcessed / 1e6,\n elapsed)\n logging.info(\"getData rate: %.3f Msa/s in lumps of %d samples\",\n samplesProcessed / elapsed / 1e6,\n dig.pointsPerCycle)", "def get_gga_data(port):\n \n \n # Wait for GGA message :\n gga = port.readline().decode(\"utf-8\")\n while not 'GGA' in gga:\n if gga: print(\"Wait for GGA : \", gga)\n gga = port.readline().decode(\"utf-8\")\n \n \n t = np.float(gga[7:16])\n \n # Print messages :\n print(\"Heading antenna frame :\")\n print(\" GGA: \",gga)\n \n # Quality check :\n if not 'GGA' in gga:\n print(\"Issue with GGA frame decoding !\\nMessage:\\nGGA:{0}\\n\".format(gga))\n gga, t = get_gga_data(port)\n \n return gga, t", "def sample_consumption():\n product = Product(\"NameA\", 15, 17.85, 0.07, \"oak\", 0.08, \"ENplusA1\",\n \"Pelletics.cz\", date(2020, 12, 20))\n delivery = Delivery(product, \"Pellets2Home\", 7350, 42500,\n date(2020, 12, 20))\n some_datetime = datetime(2020, 11, 20, 14, 22, 46, 0)\n consumption = Consumption(some_datetime, delivery, 30, \"30 kgs\")\n return consumption", "def get_all(self):\n try:\n return self.current_data\n except:\n print('No data received from sensor')", "def get_data():\n return", "def get_data(self):\n return DataGatherer().get_wind_data()", "def get_data(self):", "def get_usage_data(self):\n with self._lock:\n data_copy = self._data.copy()\n return data_copy", "def get_data(self, calc_throughput=False):\n n_timestamp = datetime.datetime.now()\n delta = n_timestamp - self.timestamp\n\n try:\n ret = ds_client().call('bridge\\n')\n except socket.error:\n return self.data\n\n self.timestamp = n_timestamp\n\n for brdata in ret:\n name = brdata[u'name']\n req = 'bridge ' + name.encode() + ' stats\\n'\n res = ds_client().call(req)[0]\n\n if calc_throughput == True:\n try:\n lps = calc_pps(res[u'flow-lookup-count'],\n self.data[brdata[u'name']][u'flow-lookup-count'],\n delta)\n mps = calc_pps(res[u'flow-matched-count'],\n self.data[brdata[u'name']][u'flow-matched-count'],\n delta)\n res.update({u'lookup_per_sec':lps, u'match_per_sec':mps})\n except KeyError:\n res.update({u'lookup_per_sec':0, u'match_per_sec':0})\n try:\n c_hit = (res[u'flowcache-hit']\n - self.data[brdata[u'name']][u'flowcache-hit'])\n lookup = (res[u'flow-lookup-count']\n - self.data[brdata[u'name']][u'flow-lookup-count'])\n c_rate = c_hit / lookup\n res.update({u'cache_hitrate':str(c_rate)})\n except (KeyError, ZeroDivisionError):\n res.update({u'cache_hitrate':'-'})\n self.data[res[u'name']] = res\n return self.data", "def get_data(self):\n pass", "def get_data(self):\n pass", "def data(self):\n return self._fastqc_data", "def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def data(self):\n\t\tself.dworker()\n\t\treturn self.d", "def get_data(self):\r\n pass", "def getBatteryInfo():\n \n # WMIC Win32_Battery fields to query\n battery_vars = ['BatteryStatus', 'EstimatedChargeRemaining', 'EstimatedRunTime',\n 'Status']\n\n # Start data with timestamp\n result = datetime.now().strftime(DATE_FORMAT)\n\n for var in battery_vars:\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_Battery\", \"get\", var, \"/value\"]) \n \n result += ', ' + temp.strip()\n \n result = '[' + result + ']'\n \n return result", "def get_dtc(self):\n r = self.sensor(1)\n num = r[0]\n # get all DTC, 3 per mesg response\n self.send_command(GET_DTC_COMMAND)\n #for i in range(0, ceil(num/3.0)):\n res = self.get_result()\n print res\n return res\n # fixme: finish", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data(self):\n return self._beta", "def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()", "def get_data(self):\n return read_sensor(bus=self.bus,\n address=self.address)", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def _get_data(self):\n raise NotImplementedError()", "async def get_device_data(self):\n pass", "def get_metric_info(self):\n metric_data_object = self.client.get_metric_data(\n MetricDataQueries=[\n {\n \"Id\": \"cdbdata_invocations\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Invocations\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_errors\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Errors\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_throttles\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"Throttles\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n },\n {\n \"Id\": \"cdbdata_concurrentexec\",\n \"MetricStat\": {\n \"Metric\": {\n \"Namespace\": \"AWS/Lambda\",\n \"MetricName\": \"ConcurrentExecutions\",\n \"Dimensions\": [\n {\n \"Name\": \"FunctionName\",\n \"Value\": self.function_name\n }\n ]\n },\n \"Period\": 60,\n \"Stat\": \"Sum\"\n },\n \"ReturnData\": True\n }\n ],\n StartTime=self.start_timestamp,\n EndTime=self.end_timestamp,\n ScanBy='TimestampDescending'\n )\n\n metric_data_points = metric_data_object[DataPointsCollector.RESPONSE_KEY]\n\n return metric_data_points", "def measurements():\n print(\"server received request for precipitation data...\")\n return jsonify(measurements_data)", "async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n JSON = {\"startDate\": str(lastBilledDate.strftime(\"%m%d%Y\"))}\n data = {}\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.post(\n URL_APPLIANCE_USAGE.format(account=account), json=JSON\n )\n if response.status == 200:\n electric = (await response.json())[\"data\"][\"electric\"]\n\n full = 100\n for e in electric:\n rr = round(float(e[\"percentageDollar\"]))\n if rr < full:\n full = full - rr\n else:\n rr = full\n data[e[\"category\"].replace(\" \", \"_\")] = rr\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"energy_percent_by_applicance\": data}", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def get_usage_and_cost(self, meter_id, year=2015, month=1, day=1):\n date_format = '%Y-%m-%d'\n start_date = datetime.datetime(year, month, day)\n start_date_string = datetime.datetime.strftime(start_date, date_format)\n\n resource = '{0}/meter/{1}/consumptionData'.format(self.domain,meter_id)\n\n usage_and_cost = []\n page = 1\n # Initialize the URL\n url = '{0}?page={1}&startDate={2}'.format(resource, page, start_date_string)\n\n while url:\n self.logger.debug(\"Pulling data from {0}\".format(url))\n page += 1\n\n response = self.session.get(url)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n\n # Set URL to none to stop loop if no more links\n url = None\n\n data = response.text\n root = Et.fromstring(data)\n for element in root.findall(\"meterConsumption\"):\n month_data = dict()\n # Get the usage and cost data\n month_data[\"DATE\"] = element.find(\"endDate\").text\n month_data[\"USAGE\"] = float(element.find(\"usage\").text)\n try:\n month_data[\"COST\"] = float(element.find(\"cost\").text)\n except AttributeError:\n month_data[\"COST\"] = 0\n\n # append to usage\n usage_and_cost.append(month_data)\n\n for element in root.findall(\"meterDelivery\"):\n month_data = dict()\n # Get the usage and cost data\n month_data[\"DATE\"] = element.find(\"deliveryDate\").text\n month_data[\"USAGE\"] = float(element.find(\"quantity\").text)\n try:\n month_data[\"COST\"] = float(element.find(\"cost\").text)\n except AttributeError:\n month_data[\"COST\"] = 0\n\n # append to usage\n usage_and_cost.append(month_data)\n # Get the next URL\n for element in root.find(\"links\"):\n if element.get(\"linkDescription\") == \"next page\":\n url = \"{0}{1}\".format(self.domain, element.get(\"link\"))\n # Return the cost for the time period\n return usage_and_cost", "def read_measurement(self):\n return self.execute(SdpI2cCmdReadMeasurement())", "def get_data(self):\n self.dev.write(1, 'A0')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data0.append((digit1 + 256*digit2)*5.0/1024)", "def fetch(self):\n return read_voltage()", "def read_consumption(api, config_entry):\n # read the configuration data\n utility = UTILITY_ELECTRICITY\n daily_update = DEFAULT_DAILY_UPDATE\n start = None\n end = None\n\n # check options\n if config_entry.options:\n utility = config_entry.options.get(CONF_UTILITY)\n daily_update = config_entry.options.get(CONF_DAILY_UPDATE)\n if not daily_update:\n start = config_entry.options.get(CONF_START)\n end = config_entry.options.get(CONF_END)\n\n # get power consumption data\n data = None\n try:\n data = api.read_consumption(utility, start, end)\n _LOGGER.info(f\"[READ_CONSUMPTION] Grabbed consumption data: ({start}-{end})\")\n except ValueError as ex:\n _LOGGER.warning(f\"[READ_CONSUMPTION] Error: {str(err)}\")\n finally:\n return data", "def get_meter_info(apt_no):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n payload = (\"select uuid, Metadata/Instrument/SupplyType \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=payload)\n # logger.debug (\"%s\",r)\n payload_body = r.json()\n # logger.debug (\"Payload:\\n%s\", payload_body)\n\n meters = []\n for i in range(0, len(payload_body)):\n meter = payload_body[i]\n\n meters.append({'uuid': meter['uuid'], 'type': meter[\n 'Metadata']['Instrument']['SupplyType']})\n\n return meters", "def get_usage_data(username, password):\n usage_req = XfinityUsage(username, password, browser_name=\"firefox-headless\")\n return usage_req.run()", "def get_gauge_data(self, gauge_name, min_date_key=None, max_date_key=None):\n pass", "async def get_pressure(self) -> float: # type: ignore\n ...", "def read_core_vbat(self) -> float:", "def AcquiredData (self, arguments=None) :\n\t\tself.OODriver.Wrapper_getSpectrum(self.wrapperHandle,self.spectrometerIndex,self.bufferHandle)\n\t\t\n\t\tif self.OODriver.Wrapper_isSaturated(self.wrapperHandle,self.spectrometerIndex) :\n\t\t\tprint \"Warning: OcenOptics spectrometer is saturated!\"\n\t\t\t\n\t\ttry : return self.buffer[self.spectral_interval]\n\t\texcept AttributeError : return self.buffer", "def GetData():\r\n return _hiew.HiewGate_GetData()", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def get_gear_data(self):\n\n return self.data", "def readData(self):\n if (self.model == 'GDS'):\n self.write(':ACQ'+str(ch)+':MEM?\\n')\n elif (self.model == 'TDS'):\n self.write('CURVe?\\n')\n\n # Check for the initial '#'; if not present, raise error.\n if (self.read(1) != '#'):\n raise Exception, \"Expected header not present\"\n\n # Read the data length indicator\n dataSize = int(self.read(int(self.read(1))))\n\n # extra steps for GDS\n if (self.model == 'GDS'):\n # subtract the 8 bytes we will read.\n dataSize -= 8\n # Read the sampling period\n hstep = struct.unpack('>f', self.read(4))[0]\n # also, fix hoff so it corresponds with that for TDS\n # FIXME: check with the scope at some point.\n hoff = hoff - float(dataSize/4) * hstep\n # Read 4 bytes to advance to the actual data: first byte\n # contains the channel and the three are not used,\n # according to the GDS800 manual.\n self.read(4)\n \n # Read data; TDS expects a 1-byte data, GDS expects 2-byte one.\n if (self.model == 'TDS'):\n data = list(struct.unpack('>'+str(dataSize)+'b',\n self.read(dataSize)))\n # TDS has a trailing '\\n' that should be drained.\n self.read(1)\n elif (self.model == 'GDS'):\n data = list(struct.unpack('>'+str(dataSize/2)+'h',\n self.read(dataSize)))\n\n return data", "def _query_data(self, index, tag):\n version, datapoints = yield self.quasar.stream_get(self.name, tag, tag+(15*qdf.MINUTE))\n values = np.empty((BLOCK_SIZE,), dtype=(type(datapoints[0])))\n values[:] = None\n \n for point in datapoints:\n time = float(point.time - tag)\n time_index = int(round(time*SAMPLE_RATE/qdf.SECOND))\n values[time_index] = point\n\n self.cache[index][CACHE_INDEX_TAG] = tag\n self.cache[index][CACHE_INDEX_DATA] = values", "def data():\n return volumes_fetchers.get_json_data()", "def reqData(self):\r\n #self.reqGlobalCancel()\r\n #self.add_historical(\"Stock('TSLA', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('IBM', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('MSFT', 'SMART', 'USD')\")\r\n self.add_historical(\"Stock('FB', 'SMART', 'USD')\")", "def stats(self):", "def GetData(self):\r\n if self.Error == False:\r\n Extra = {}\r\n try:\r\n result = {}\r\n temp = self.ScrapeMainWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters1Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeParameters2Webpage()\r\n if temp != None:\r\n result.update(temp)\r\n temp = self.ScrapeStatusWebpage()\r\n if temp != None:\r\n result.update(temp)\r\n sqlArray = {}\r\n sqlArray[self.deviceDescr] = {}\r\n sqlArray[self.deviceDescr][self.devNumber] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"General\"] = result\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"] = {}\r\n sqlArray[self.deviceDescr][self.devNumber][\"_ExtractInfo\"][\"ExtractTime\"] = time.time()\r\n sqlArray[\"ReadError\"] = False \r\n return sqlArray\r\n \r\n except Exception as e: \r\n self.log.printError(\"ERROR in Retreiving Seatel VSAT Data,%s Module Error\" % sys._getframe().f_code.co_name) \r\n self.log.printError( str(e))\r\n self.Error = True\r\n Extra[\"ReadError\"] = True\r\n return Extra\r\n else:\r\n self.log.printWarning(\"%s skipped due to previous failure\" % sys._getframe().f_code.co_name)\r\n return None", "def import_measurements():\n\n print('Receive a transfer...')", "def getAllMeasurement(self): \n return self.measurement", "def acquire(self):\n self.logger.debug(\"in NerscAllocationInfo acquire\")\n return {\"Nersc_Allocation_Info\": pd.DataFrame(self.send_query())}", "def get_statistics(self):\n\t\treturn Job(SDK.PrlSrv_GetStatistics(self.handle)[0])", "def _get_data(self):\n \n print(\"Getting Data...\")\n self.data = sgs.dataframe(self.serie_name, \n start = self.start_date, \n end = self.end_date)\n\n print(f\"Done! {self.data.shape[0]} rows were collected\")\n \n self.data.reset_index(inplace=True)\n self.data.columns = ['date', 'cdi']\n\n return self.data", "def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter", "def readOneData(self):\n\n\t\tif self._mt5Client is not None:\n\t\t\tdatas = self._mt5Client.getData()\n\n\t\t\tif datas is not None:\n\t\t\t\tPERIOD = int(self._config['data']['predict'])\n\t\t\t\tHALF_PERIOD = int(PERIOD/2)\n\n\t\t\t\tdata = []\n\n\t\t\t\t#Time Got\n\t\t\t\tself._LAST_PERIOD_PREDICTED_END = datas['time']\n\n\t\t\t\t#time open high low close tick_volume spread real_\n\t\t\t\t#Switch the price type calucation\n\n\t\t\t\tw_p = self._config['data']['price']\n\t\t\t\tv = 0\n\n\t\t\t\tif(w_p == CHART_PRICES_TYPE['O']):\n\t\t\t\t\tv = float(datas['open']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['C']):\n\t\t\t\t\t\n\t\t\t\t\tv = float(datas['close']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['H']):\n\t\t\t\t\t\n\t\t\t\t\tv = float(datas['high'])\n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['L']):\n\n\t\t\t\t\tv = float(datas['low']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['HL/2']):\n\t\t\t\t\tv = ( float(datas['low']) + float(datas['high']) ) /2\n\t\t\t\t\n\t\t\t\tself.notify(msg={\n\t\t\t\t\t\t\t\t\t'prices': {\n\t\t\t\t\t\t\t\t\t\t'values': {\n\t\t\t\t\t\t\t\t\t\t\t'RP': str(v)\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} \n\t\t\t\t\t\t\t\t} \n\t\t\t\t)\n\n\t\t\t\tdata.append(100000 * v ) \n\n\t\t\t\tself._TEMPORARY_GLOBAL_DATA.append(data[-1])\n\n\t\t\t\tself._GLOBAL_DATA.append(data[-1])\n\n\t\t\t\treturn data", "def data(self):\n tmp = self.acceleration()\n tmp.append(self.temperature())\n return tuple(tmp)", "def get_data(self):\n if self.ser.in_waiting:\n data_string = self.ser.readline().decode().strip()\n if not data_string: return self.data\n self.data = [\n float(element) for element in data_string.split()\n ]\n self.ser.reset_input_buffer()\n return self.data", "def get(self) -> list[float]:", "def producer_get(self):\r\n return self.__sample", "def getData(self):\n data = self.rx.getNData()\n\n return (data)", "def get_metrics(self):\n return None", "def get_grating_blaze(self):\r\n msg = struct.pack('>2B', 56, 3)\r\n response = self.query(msg)\r\n return struct.unpack('>H', response[:2])[0]", "def get_data(self):\n return self.read_sample_win()", "def _get_network_utilization(self):\n options = self.scenario_cfg[\"options\"]\n interval = options.get('interval', 1)\n count = options.get('count', 1)\n\n cmd = \"sudo sar -n DEV %d %d\" % (interval, count)\n\n raw_result = self._execute_command(cmd)\n result = self._filtrate_result(raw_result)\n\n return result", "def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })", "def charging_current_and_voltage(self):\n done, data = self._request('GG')\n if done:\n milliamps = float(data[0])\n millivolts = float(data[1])\n return {\n 'amps': float(milliamps) / 1000 if milliamps > 0 else 0.0,\n 'volts': float(millivolts) / 1000 if millivolts > 0 else 0.0\n }\n\n raise EvseError", "def Take_Voltage_Measurement(self):\n self._CHK(nidaq.DAQmxStartTask(self.task_handle))\n samples_read = int32()\n data = numpy.zeros((self.num_samples,),dtype=numpy.float64)\n self._CHK(nidaq.DAQmxReadAnalogF64(self.task_handle,uInt32(self.num_samples),float64(self.timeout),\n DAQmx_Val_GroupByChannel,data.ctypes.data,\n self.num_samples,ctypes.byref(samples_read),None))\n if self.task_handle.value != 0:\n nidaq.DAQmxStopTask(self.task_handle)\n nidaq.DAQmxClearTask(self.task_handle)\n if samples_read.value != self.num_samples:\n return 'error'\n else:\n return data", "def Take_Voltage_Measurement(self):\n self._CHK(nidaq.DAQmxStartTask(self.task_handle))\n samples_read = int32()\n data = numpy.zeros((self.num_samples,),dtype=numpy.float64)\n self._CHK(nidaq.DAQmxReadAnalogF64(self.task_handle,uInt32(self.num_samples),float64(self.timeout),\n DAQmx_Val_GroupByChannel,data.ctypes.data,\n self.num_samples,ctypes.byref(samples_read),None))\n if self.task_handle.value != 0:\n nidaq.DAQmxStopTask(self.task_handle)\n nidaq.DAQmxClearTask(self.task_handle)\n if samples_read.value != self.num_samples:\n return 'error'\n else:\n return data", "async def fetch_logs(self) -> bytes:\n host = \"127.0.0.1\"\n port = 42000\n dt = datetime.now(pytz.timezone(\"Europe/Amsterdam\"))\n request = {\"id\": 1, \"method\": \"getstat\"}\n\n point = TCP4ClientEndpoint(reactor, host, port)\n try:\n connected_p = await connectProtocol(\n point, EWBFProtocol()) # type: EWBFProtocol\n response = await connected_p.make_request(request)\n except Exception as e:\n print(\"couldn't connect. {}\".format(e))\n return b\"\"\n else:\n rl = []\n t = 0 # type: int\n power = speed = accept = reject = 0\n for idx, data in enumerate(response['result']):\n rl.append(\"GPU{0}_SPEED: {1} H/s\".format(\n idx, data['speed_sps']))\n rl.append(\"GPU{0}_POWER: {1}\".format(\n idx, data['gpu_power_usage']))\n t = data['start_time']\n power += data['gpu_power_usage']\n speed += data['speed_sps']\n accept += data['accepted_shares']\n reject += data['rejected_shares']\n\n rl.append(\"Power: {0}\".format(power))\n rl.append(\"Total speed: {0} Sol/s\".format(speed))\n rl.append(\"Accepted share: {0}\".format(accept))\n rl.append(\"Rejected share: {0}\".format(reject))\n rl.append(\"Total GPUs: {0}\".format(len(response['result'])))\n rl.append(\"START_TIME: {0}\".format(int(t)))\n rl.append(\"CURRENT_TIME: {0}\".format(int(dt.timestamp())))\n rl.append(\"UPTIME: {0}\".format(int(dt.timestamp() - t)))\n return \";\".join(rl).encode('utf-8') + b\";\"", "def GetData(self):\r\n \r\n return self._data", "def get(self):\r\n return self.data_array", "def usage_metrics(self) -> Sequence['outputs.GetServiceQuotaUsageMetricResult']:\n return pulumi.get(self, \"usage_metrics\")", "def get_accumulated_data(self, topic, start_ts, end_ts, units):\n return self.manager.get_accumulated_data(topic, start_ts, end_ts, units)", "def AcquiredData (self) :\n\t\treturn self.run(\"AcquiredData\")", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1] # The most recent two hours of data\n# print([elem['avg_pot_5'] for elem in two_hours])\n# avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) / (int(key) / 100) for elem in two_hours]\n avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n# print(avg_pot_data[0][-5:])\n avg_pot_data = [[max(min(elem, 100),0) for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n avg_pot_data = [[elem if elem != 100 else 0 for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n# print(avg_pot_data[0][-5:])\n return avg_pot_data", "def inspect_instance(self):\n url = \"http://%s:1337/metrics\" % self.host\n r = requests.get(url)\n try:\n s = r.json()\n except TypeError:\n s = r.text\n return s", "def read_ag_data(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 14)\n temp = lsm9ds1.to_int16(data[0:2])\n gyro = lsm9ds1.to_vector_left_to_right_hand_rule(data[2:8])\n acc = lsm9ds1.to_vector_left_to_right_hand_rule(data[8:14])\n return temp, acc, gyro", "def get_data(self):\n data = {\n \"ts\": self.drone.pos[0][0],\n \"drone\": self.drone,\n \"subject\": self.subject,\n \"peds\": self.peds, # can be None\n \"objs\": self.objs # can be None\n }\n self.empty_bag()\n return data", "def data(self):", "def get(self, sid):\n\n data = SubscriptionManager.get_current_usage(sid)\n result = DataUsageSchema().dump(data)\n return result.data", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_data(self, out_format: str='json'):\n if self.data:\n return self.data\n self.data_ready = self.check_available()\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def gpus(self):\n return self.__gpus" ]
[ "0.658672", "0.63099957", "0.6295263", "0.62537384", "0.61207724", "0.6117239", "0.61045456", "0.606737", "0.6031213", "0.6023843", "0.5998073", "0.598272", "0.59735596", "0.59629476", "0.5935335", "0.5935141", "0.593349", "0.5888127", "0.58589953", "0.5831155", "0.5789991", "0.5786737", "0.5776419", "0.5776419", "0.57576096", "0.57457006", "0.57434857", "0.574034", "0.57112056", "0.57103306", "0.57064855", "0.5670855", "0.5670855", "0.5670855", "0.56617516", "0.5658524", "0.56456", "0.56439096", "0.56345975", "0.5629169", "0.56210726", "0.55950636", "0.5594366", "0.55895966", "0.55823064", "0.55791557", "0.55705225", "0.554894", "0.5546251", "0.55462176", "0.5535808", "0.5531965", "0.55261827", "0.5525294", "0.55201215", "0.5518369", "0.55156", "0.55086094", "0.55019003", "0.54969877", "0.54969454", "0.5496138", "0.54711497", "0.5470923", "0.54706603", "0.54668266", "0.54636747", "0.54630196", "0.54608333", "0.54588217", "0.54549223", "0.54530275", "0.54472625", "0.54469883", "0.54438764", "0.5437126", "0.54352957", "0.54311275", "0.54302734", "0.5428184", "0.54255086", "0.5420581", "0.5414753", "0.5412571", "0.5411705", "0.5411705", "0.5410178", "0.5396652", "0.53941804", "0.53877616", "0.5379582", "0.53770906", "0.53692657", "0.5364527", "0.5362626", "0.53587604", "0.5351225", "0.53511566", "0.5342506", "0.53405285", "0.5335379" ]
0.0
-1
Smoothed absolute function. Useful to compute an L1 smooth error.
def abs_smooth(net_loc, input_loc): x = net_loc - input_loc absx = tf.abs(x) minx = tf.minimum(absx, 1) r = 0.5 * ((absx - 1) * minx + absx) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _l1_smooth_loss(self, y_true, y_predicted):\n absolute_value_loss = tf.abs(y_true, y_predicted) - 0.5\n square_loss = 0.5 * (y_true - y_predicted)**2\n smooth_l1_condition = tf.less(absolute_value_loss, 1.0)\n l1_smooth_loss = tf.select(smooth_l1_condition,\n square_loss,\n absolute_value_loss)\n return tf.reduce_sum(l1_smooth_loss, -1)", "def smooth_l1_loss(y_true, y_pred):\n\n # Take absolute difference\n x = K.abs(y_true - y_pred)\n\n # Find indices of values less than 1\n mask = K.cast(K.less(x, 1.0), \"float32\")\n\n # Loss calculation for smooth l1\n loss = (mask * (0.5 * x ** 2)) + (1 - mask) * (x - 0.5)\n return loss", "def apply_smoothstep(image):\n image_out = 3 * image**2 - 2 * image**3\n return image_out", "def get_smooth_loss(disp, img):\n grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])\n grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])\n\n grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)\n grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)\n\n grad_disp_x *= torch.exp(-grad_img_x)\n grad_disp_y *= torch.exp(-grad_img_y)\n\n return grad_disp_x.mean() + grad_disp_y.mean()", "def smooth(dest, f):\n if f.is_Constant:\n # Return a scaled version of the input if it's a Constant\n dest.data[:] = .9 * f.data\n else:\n Operator(Eq(dest, f.avg(dims=f.dimensions[-1])), name='smoother').apply()", "def compute_smooth_loss(disp, img):\n img = F.interpolate(img, disp.shape[2:], mode=\"bilinear\", align_corners=False)\n\n grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])\n grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])\n\n grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)\n grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)\n\n grad_disp_x *= torch.exp(-grad_img_x)\n grad_disp_y *= torch.exp(-grad_img_y)\n\n smooth_loss = grad_disp_x.mean() + grad_disp_y.mean()\n\n return smooth_loss", "def _smooth(values, std):\n width = std * 4\n x = np.linspace(-width, width, min(2 * width + 1, len(values)))\n kernel = np.exp(-(x / 5)**2)\n\n values = np.array(values)\n weights = np.ones_like(values)\n\n smoothed_values = np.convolve(values, kernel, mode='same')\n smoothed_weights = np.convolve(weights, kernel, mode='same')\n\n return smoothed_values / smoothed_weights", "def m_step_x0(x_smooth):\n return x_smooth[0]", "def smooth(F, x_predicted, P_predicted, x_filtered, P_filtered, z):\n ntimesteps, nmatrices = get_observations_shape(z)\n x_smooth = [None] * ntimesteps\n P_smooth = [None] * ntimesteps\n L = [None] * ntimesteps\n # set mean and covariance at the end to the forward filtered data to start the smoother\n x_smooth[-1] = x_filtered[-1]\n P_smooth[-1] = P_filtered[-1]\n\n # Run the smoother backwards\n for t in reversed(range(ntimesteps - 1)):\n F_cur = pick_nth_step(F, t)\n x_smooth[t], P_smooth[t], L[t] = kalman_smoothing_step(F_cur, x_filtered[t], P_filtered[t], x_predicted[t + 1],\n P_predicted[t + 1], x_smooth[t + 1], P_smooth[t + 1])\n return np.array(x_smooth), np.array(P_smooth), np.array(L)", "def mean_absolute_error(x0, x1):\n return MeanAbsoluteError()(x0, x1)", "def smooth_al(data):\n wd = 5\n optimize = True\n DW_min = 5\n while optimize == True:\n smooth = savgol_filter(data, wd, 2)\n DW = DW_cal(data, smooth)\n if abs(2 - DW) < DW_min:\n wd = wd + 2\n DW_min = abs(2 - DW)\n else:\n wd = wd - 2\n smooth = savgol_filter(data, wd, 2)\n DW = DW_cal(data, smooth)\n break\n return smooth, wd", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def smoothed(self, box_pts=True):\n elong = self.copy()\n elong.ys = smooth_curve(self.ys, box_pts)\n return elong", "def abs_robust_loss(diff, eps=0.01, q=0.4):\n return torch.pow((torch.abs(diff) + eps), q)", "def smoothed_diff(\n data: np.ndarray,\n *,\n bias: float = 1e-4,\n damped: bool = True,\n linear: bool = False,\n fast_init: bool = False,\n retall: bool = False,\n **kwargs,\n) -> Union[np.ndarray, Result]:\n values = np.asarray(data)\n maximum_value = values.max()\n values = np.diff(values, prepend=0.0)\n\n # Guarantee that there are no zero values in the\n # dataset\n if not linear:\n values = np.maximum(values, 0.0)\n S = values.sum()\n values *= 1 - bias\n values += bias * S\n\n # Choose empirically validated values\n kwargs.setdefault(\"smoothing_level\", 1 / 5)\n\n # Holt exponential smoothing\n holt = sm.tsa.Holt(values, exponential=not linear, damped=damped)\n res = holt.fit(use_brute=not fast_init, **kwargs)\n out = res.fittedvalues\n out *= maximum_value / out.sum()\n return Result(out, res) if retall else out", "def smooth(self, xs, ys, x):\n w = np.sum(np.exp(np.multiply(np.square(np.divide(np.subtract(xs, x), self.sigma)), -0.5)))\n v = np.sum(np.multiply(ys, np.exp(np.multiply(np.square(np.divide(np.subtract(xs, x), self.sigma)), -0.5))))\n\n return v / w", "def abs_loss(model: tf.keras.Model,\n model_input: tf.Tensor,\n model_target: tf.Tensor\n ):\n y_ = model(model_input)\n return tf.losses.absolute_difference(labels=model_target,\n predictions=y_,\n reduction=\"none\"\n )", "def smooth_l1_loss(input, target, reduction='none', beta=1.0):\n delta = input - target\n mask = delta.abs() < beta\n delta[mask] = delta[mask].pow(2) / (2 * beta)\n delta[~mask] = delta[~mask].abs() - beta / 2\n if reduction == 'mean':\n return delta.mean()\n elif reduction == 'sum':\n return delta.sum()\n elif reduction == 'none':\n return delta\n else:\n raise ValueError(f\"reduction must be 'mean', 'sum' or 'none', but got '{reduction}'\")", "def smooth_l1_loss(input, target, reduction='none', beta=1.0):\n delta = input - target\n mask = delta.abs() < beta\n delta[mask] = (delta[mask]).pow(2) / (2 * beta)\n delta[~mask] = delta[~mask].abs() - beta / 2\n\n if reduction == 'mean':\n return delta.mean()\n elif reduction == 'sum':\n return delta.sum()\n elif reduction == 'none':\n return delta\n else:\n raise ValueError(f'reduction must be \\'mean\\', \\'sum\\' or '\n f'\\'none\\', but got \\'{reduction}\\'')", "def l1_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.abs(obs - actual) , 1)", "def smooth_with_function_and_mask(image, function, mask):\n bleed_over = function(mask.astype(float))\n masked_image = np.zeros(image.shape, image.dtype)\n masked_image[mask] = image[mask]\n smoothed_image = function(masked_image)\n output_image = smoothed_image / (bleed_over + np.finfo(float).eps)\n return output_image", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def smooth_signal(self, signal, window=7, order=3):\n\n from scipy.signal import savgol_filter\n try:\n signal_smoothed = savgol_filter(signal, window_length=window, polyorder=order)\n except:\n return signal\n return signal_smoothed", "def smooth_series(y,p = 6.25):\n cycle, trend = sm.tsa.filters.hpfilter(y, p)\n return trend", "def steffensen ( fun , x , fx = None , args = () ) :\n \n if fx is None : fx = float ( fun ( x , *args ) ) ## reuse if already calculated\n if fx : \n gx = ( fun ( x + fx , *args ) - fx ) / fx\n if gx : return x - fx / gx", "def smooth(o_l, o_r, c_l, c_r, AMT):\n l = o_l * AMT + (1-AMT) * c_l\n r = o_r * AMT + (1-AMT) * c_r\n return (l, r)", "def _lidstone_smooth(prob, smoothing, observations, outcomes):\n return (prob + smoothing) / (observations + (smoothing * outcomes))", "def compute_smooth_loss(self, preds, targets):\n losses = []\n for disp, img in zip(preds, targets):\n grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])\n grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])\n\n grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)\n grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)\n\n grad_disp_x *= torch.exp(-grad_img_x)\n grad_disp_y *= torch.exp(-grad_img_y)\n\n losses.append(grad_disp_x.mean() + grad_disp_y.mean())\n\n return sum(losses)", "def single_exponential_smoothing(self, series, horizon, alpha=0.5):\n result = [0, series[0]]\n for i in range(1, len(series) + horizon - 1):\n if i >= len(series):\n result.append((series[-1] * alpha) + ((1-alpha) * result[i]))\n else:\n result.append((series[i] * alpha) + ((1-alpha) * result[i]))\n return result[len(series):len(series)+horizon]", "def absolute_value(x):\n x_star = x.clone()\n x_star[1] *= -1\n return elementwise_mult(x, x_star)[0].sqrt_()", "def fabs(x):\n return 0.0", "def absolute_trick(bias, slope, predictor, current_value, learning_rate):\n predicted_value = bias + slope*predictor\n if current_value > predicted_value:\n slope += learning_rate*predictor\n bias += learning_rate\n else:\n slope -= learning_rate*predictor\n bias -= learning_rate\n return slope, bias", "def scipyols(array):\n\t# +++++ Get the OLS +++++\n\ttry:\n\t\tslope, intercept, r_value, p_value, std_err = stats.linregress(array)\n\t\t# +++++ calculate the total change +++++\n\t\t# change = (slope*array.shape[0])\n\t\t# +++++ return the results +++++\n\t\treturn slope #p.array([change, slope, intercept, r_value**2, p_value, std_err])\n\texcept:\n\t\tipdb.set_trace()\n\t\treturn np.NAN", "def hyperopt_loss_function(results: DataFrame, trade_count: int,\n *args, **kwargs) -> float:\n total_profit = results['profit_abs'].sum()\n return -1 * total_profit", "def soft_absolute(u):\n\tepsilon = 1e-8\n\treturn np.sqrt(epsilon + u * u)", "def smoothL1(input_flow, target_flow, size_average = True):\n target_valid = (target_flow < 192) & (target_flow > 0) \n return F.smooth_l1_loss(input_flow[target_valid], target_flow[target_valid], size_average=size_average)", "def fun(params, slope, data):\n x, y_true = data\n return y_true - model_fun(params, slope, x)", "def kalman_smoothing_step(F, x_filtered, P_filtered, x_predicted, P_predicted, x_smooth, P_smooth):\n # \"Kalman-like matrix to include predicted\"\n L = matrix_matrix_product(matrix_matrix_product(P_filtered, transpose_tensor(F)), special_inv(P_predicted))\n x_smooth = x_filtered + matrix_vector_product(L, (x_smooth - x_predicted))\n P_smooth = P_filtered + matrix_matrix_product(matrix_matrix_product(L, (P_smooth - P_predicted)),\n transpose_tensor(L))\n return x_smooth, P_smooth, L", "def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.):\n sigma2 = sigma * sigma\n\n inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))\n\n smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)\n smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)\n smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)\n smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),\n tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))\n\n outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)\n\n return outside_mul", "def _residual(function, p, x, y, y_err):\n return (y - function(p, x)) / y_err", "def f(x):\n return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)", "def f(x):\n return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)", "def spline(self, smoothing=None):\r\n from lsst.analysis import utils\r\n return utils.fitspline(self.points, self.z, smoothing)", "def lapserate(t, z, sigma, lat):\n import numpy as np\n dT = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n dz = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n for i in range(np.ma.size(sigma, axis=0)-1):\n dT[i, :] = t[i+1, :] - t[i, :]\n for i in range(np.ma.size(sigma, axis=0)-1):\n dz[i, :] = z[i+1, :] - z[i, :]\n lapse = -1000 * dT[0:-1] / dz[0:-1]\n # zonalplot(lapse, sigma[0:-1], lat, 'Lapse rate')\n return lapse", "def lapserate(t, z, sigma, lat):\n import numpy as np\n dT = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n dz = np.zeros((np.ma.size(sigma), np.ma.size(lat)))\n for i in range(np.ma.size(sigma, axis=0)-1):\n dT[i, :] = t[i+1, :] - t[i, :]\n for i in range(np.ma.size(sigma, axis=0)-1):\n dz[i, :] = z[i+1, :] - z[i, :]\n lapse = -1000 * dT[0:-1] / dz[0:-1]\n # zonalplot(lapse, sigma[0:-1], lat, 'Lapse rate')\n return lapse", "def smoothness(self, range_x):\n step = np.diff(range_x)\n diff_val_square = np.square(np.diff(self(range_x)))\n res = np.sum(np.array(diff_val_square / step))\n res = res/(range_x[-1] - range_x[0])\n return res", "def FN2(lam):\n return 1.034 + 3.17 *1e-4 *lam**(-2)", "def zzX_abs(f):\n if poly_univariate_p(f):\n return zzx_abs(f)\n else:\n return [ zzX_abs(coeff) for coeff in f ]", "def track_smooth(self, filtered_track):\n\n estimates = self._get_estimates(filtered_track)\n\n penultimate_index = len(filtered_track) - 2\n\n smoothed_track = copy.deepcopy(filtered_track)\n\n # Iterating backwards from the penultimate state, to the first state.\n for t in range(penultimate_index, -1, -1):\n smoothed_track[t] = self.smooth(filtered_track[t],\n estimates[t+1],\n smoothed_track[t+1])\n\n return smoothed_track", "def smooth(inputdata, w, imax):\n data = 1.0*inputdata\n data = data.replace(np.nan, 1)\n data = data.replace(np.inf, 1)\n \n smoothed = 1.0*data\n normalization = 1\n for i in range(-imax, imax+1):\n if i==0:\n continue\n smoothed += (w**abs(i))*data.shift(i, axis=0)\n normalization += w**abs(i)\n smoothed /= normalization\n return smoothed", "def smooth_image(self, image, mask):\n \n filter_size = self.smoothing_filter_size.value\n if filter_size == 0:\n return image\n sigma = filter_size / 2.35\n #\n # We not only want to smooth using a Gaussian, but we want to limit\n # the spread of the smoothing to 2 SD, partly to make things happen\n # locally, partly to make things run faster, partly to try to match\n # the Matlab behavior.\n #\n filter_size = max(int(float(filter_size) / 2.0),1)\n f = (1/np.sqrt(2.0 * np.pi ) / sigma * \n np.exp(-0.5 * np.arange(-filter_size, filter_size+1)**2 / \n sigma ** 2))\n def fgaussian(image):\n output = scipy.ndimage.convolve1d(image, f,\n axis = 0,\n mode='constant')\n return scipy.ndimage.convolve1d(output, f,\n axis = 1,\n mode='constant')\n #\n # Use the trick where you similarly convolve an array of ones to find \n # out the edge effects, then divide to correct the edge effects\n #\n edge_array = fgaussian(mask.astype(float))\n masked_image = image.copy()\n masked_image[~mask] = 0\n smoothed_image = fgaussian(masked_image)\n masked_image[mask] = smoothed_image[mask] / edge_array[mask]\n return masked_image", "def __abs__(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()", "def calculate_abs(self):\n ref_spectra_raw = np.array(self.raw_data['spectrum_0'].attrs['reference'])\n self.ref_spectra_arr = np.subtract(ref_spectra_raw,self.back_spectra_arr)\n abs=-np.log10(self.pre_proc_data.div(self.ref_spectra_arr))\n self.abs_data=abs\n return self.abs_data", "def _smooth(self):\n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n self._extrapolate()", "def myTwistFunctionAirliner(Epsilon):\n return -(6.53*Epsilon*Epsilon - 14.1*Epsilon + 4.24)", "def smooth(self, mri_data):\n\n \"\"\"DEBUG\n import matplotlib.pyplot as plt\n self = rtp_smooth\n\n \"\"\"\n\n # image dimension\n nx, ny, nz = mri_data.img_data.shape\n if hasattr(mri_data.img_header, 'info'):\n dx, dy, dz = np.abs(mri_data.img_header.info['DELTA'])\n elif hasattr(mri_data.img_header, 'get_zooms'):\n dx, dy, dz = mri_data.img_header.get_zooms()[:3]\n else:\n self.errmsg(\"No voxel size information in mri_data header\")\n\n # Copy function image data and get pointer\n fim0 = mri_data.img_data\n if fim0.dtype != np.float32:\n fim_arr = fim0.astype(np.float32)\n else:\n fim_arr = fim0\n fim_arr = np.moveaxis(np.moveaxis(fim_arr, 0, -1), 0, 1).copy()\n fim_p = fim_arr.ctypes.data_as(ctypes.POINTER(ctypes.c_float))\n\n self.rtp_smooth(fim_p, nx, ny, nz, dx, dy, dz, self.mask_byte_p,\n self.blur_fwhm)\n fim_arr = np.moveaxis(np.moveaxis(fim_arr, 0, -1), 0, 1)\n\n return fim_arr", "def lapse(self):\n pass", "def stability_of_timeseries(returns):\n\n cum_log_returns = np.log1p(returns).cumsum()\n rhat = sp.stats.linregress(np.arange(len(cum_log_returns)),\n cum_log_returns.values)[2]\n\n return rhat", "def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) ->Tensor:\n return sum_abs_error / n_obs", "def test_temporal_smoothing_how(perfectModelEnsemble_initialized_control_1d_ym_cftime):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed_mean = pm.smooth({\"lead\": 4}, how=\"mean\")\r\n pm_smoothed_sum = pm.smooth({\"lead\": 4}, how=\"sum\")\r\n assert (\r\n pm_smoothed_sum.get_initialized().mean()\r\n > pm_smoothed_mean.get_initialized().mean() * 2\r\n )", "def clean_series(y,smooth = False,p = 6.25,logsmooth = True):\n\n # Remove null values in the middle of the series using interpolate\n # First null values are not interpolated but later filled by 0.0\n y = y.replace(0.0,np.NaN).interpolate().fillna(0.0)\n\n # Smooth using Hodrick Prescott filter with parameter p\n if smooth:\n y = smooth_series(y,p)\n y.loc[(y < 1) & (y > 0)] = 1\n\n if logsmooth:\n y = y.map(lambda x : np.log(1+x))\n y = smooth_series(y,p)\n y = y.map(lambda x : np.exp(x) - 1)\n y.loc[(y < 1) & (y > 0)] = 1\n y.loc[y < 0] = 0\n\n return y", "def smape(true, predictions):\n \n true = np.array(true)\n predictions = np.array(predictions)\n \n return np.mean(np.abs(true - predictions) * 2/ (np.abs(true) + np.abs(predictions))) * 100", "def _apply_smooth_update(self):\n self.print(\"SGD with Momentum: Applying smooth update...\", line_above=True)\n\n raw_update = self.get_h5_data(self.raw_update_path)\n update = self.get_h5_data(self.smooth_update_path)\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the smoothed update.\"\n \"Check the raw update and smoothing process.\"\n )\n\n max_upd = np.max(np.abs(update))\n print(f\"Max smooth model update: {max_upd}\")\n\n update_scaling_fac_alpha = self.alpha / max_upd\n\n self.print(\n f\"Recaling based on alpha: {update_scaling_fac_alpha},\"\n f\"New maximum update is: {max_upd * update_scaling_fac_alpha}\"\n )\n\n update *= update_scaling_fac_alpha\n\n # normalise theta and apply update\n theta_0 = self.get_h5_data(self._get_path_for_iteration(0, self.model_path))\n\n # Update parameters\n if max(self.roughness_decay_smoothing_length) > 0.0:\n theta_prev = self.get_h5_data(self.smoothed_model_path)\n\n # If relative perturbations are smoothed, make model physical\n if self.roughness_decay_type == \"relative_perturbation\":\n theta_prev = (theta_prev + 1) * theta_0\n else:\n theta_prev = self.get_h5_data(self.model_path)\n\n # Normalize the model and prevent division by zero in the outer core.\n theta_prev[theta_0 != 0] = theta_prev[theta_0 != 0] / theta_0[theta_0 != 0] - 1\n\n # Make sure that the model is only updated where theta is non_zero\n theta_new = np.zeros_like(theta_0)\n theta_new[theta_0 != 0] = (\n theta_prev[theta_0 != 0]\n - update[theta_0 != 0]\n - (1 - self.beta) * self.perturbation_decay * theta_prev[theta_0 != 0]\n )\n\n # Remove normalization from updated model and write physical model\n theta_physical = (theta_new + 1) * theta_0\n shutil.copy(\n self.model_path,\n self.tmp_model_path,\n )\n self.set_h5_data(\n self.tmp_model_path,\n theta_physical,\n )", "def _core_calc_sse2(self,arp) :\n\t\tprod = self._core_calc_prod(arp.bp,arp.Lp)\n\t\tdegrad = self._core_calc_degrad(arp.bd,arp.Ld)\n\t\te = arp.slopes - prod + degrad\n\t\tsse = np.dot(e,e)\t\t\t\n\t\treturn sse", "def real_attenuation(og, fg):\n\n oe = sg2plato(og)\n re = real_extract(og, fg)\n return (oe - re) / oe * 100.", "def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)", "def abs_(a):", "def abs(x):\n pass", "def smooth_curve(times, magnitudes):\n x = times[:,0]\n y = magnitudes[:,0]\n\n smoothed_times = np.linspace(np.min(x), np.max(x), 1000)\n\n itp = interp1d(x,y, kind='linear')\n window_size, poly_order = 101, 3\n smoothed_magnitudes = savgol_filter(itp(smoothed_times), window_size, poly_order)\n\n smoothed_times = smoothed_times.reshape(smoothed_times.size ,1)\n smoothed_magnitudes = smoothed_magnitudes.reshape(smoothed_magnitudes.size ,1)\n\n return (smoothed_times, smoothed_magnitudes)", "def masked_l1_loss(prediction, target, mask):\n abs_error = F.l1_loss(prediction, target, reduction='none')\n loss = weighted_mean(abs_error, mask)\n return loss", "def calc_smape(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass", "def absolute(x):\n return -x if x < 0 else x", "def SMAPE(pred, true):\n denom = torch.abs(true) + torch.abs(pred)\n smape = torch.where(denom == 0., torch.zeros_like(true), torch.abs(pred - true) / denom)\n mean_smape = smape.mean()\n return mean_smape * 200.", "def Psmooth(self, k):\n om = self.om\n ol = self.ol\n omz = self.cosmology.Om(self.z) # Omega matter at z\n olz = ol/np.square(self.cosmology.efunc(self.z)) # MBW Eqn 3.77\n g0 = 5/2*om/(np.power(om, 4/7) - ol + ((1+om/2)*(1+ol/70))) # Eqn 4.76\n gz = 5/2*omz/(np.power(omz, 4/7) - olz + ((1+omz/2)*(1+olz/70)))\n Dlin_ratio = gz / (1+self.z) / g0\n Psmooth = self.P0smooth * np.square(self.T0(k)) * \\\n np.power(k, self.ns) * np.square(Dlin_ratio)\n return Psmooth", "def slerp_gaussian(val, low, high):\n offset = norm.cdf(np.zeros_like(low)) # offset is just [0.5, 0.5, ...]\n low_gau_shifted = norm.cdf(low) - offset\n high_gau_shifted = norm.cdf(high) - offset\n circle_lerped_gau = slerp(val, low_gau_shifted, high_gau_shifted)\n epsilon = 0.001\n clipped_sum = np.clip(circle_lerped_gau + offset, epsilon, 1.0 - epsilon)\n result = norm.ppf(clipped_sum)\n return result", "def eml_add_smooth(yi, xi, eqml):\n return (eqml[yi][xi] + 1) / (sum(eqml[yi].values()) + train_set_size)", "def copy_abs(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()", "def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y", "def __loss(self, h, y):\n return (-y*np.log(h)-(1-y)*np.log(1-h)).mean()", "def _semi_relativistic_loss(eps):\n P = Pcoef * np.imag(1./eps) / np.real(Theta**2.+ThetaE**2.)\n return P", "def smape_loss(y_true, y_pred):\n # mask=tf.where(y_true,1.,0.)\n mask = tf.cast(y_true, tf.bool)\n mask = tf.cast(mask, tf.float32)\n sym_sum = tf.abs(y_true) + tf.abs(y_pred)\n condition = tf.cast(sym_sum, tf.bool)\n weights = tf.where(condition, 1. / (sym_sum + 1e-8), 0.0)\n return 200 * tnp.nanmean(tf.abs(y_pred - y_true) * weights * mask)", "def abs(array):\n return np.abs(array)", "def invert_smoothstep(image):\n image = image.clamp(0.0, 1.0)\n return 0.5 - torch.sin(torch.asin(1.0 - 2.0 * image) / 3.0)", "def _get_smooth(self, value):\n \n # We use deque here as it is more efficient for in/out behaviour than regular list/tuple\n if not self._last_readings:\n self._last_readings = deque((value, ) * self.SMOOTH_READINGS_NUMBER, self.SMOOTH_READINGS_NUMBER)\n else:\n self._last_readings.appendleft(value)\n \n # Average last temperature readings\n return sum(self._last_readings) / self.SMOOTH_READINGS_NUMBER", "def half_life(z):\r\n dz = (z - z.shift(1))[1:]\r\n prev_z = z.shift(1)[1:].values.reshape(-1, 1)\r\n fit_obj = LinearRegression().fit(prev_z - np.mean(z), dz)\r\n\r\n return - np.log(2) / fit_obj.coef_[0]", "def find_absolute_value(x):\n return math.fabs(x)", "def abs(value):\n return _abs(value)", "def smooth(*args, numiter=1) -> core.Smooth:\n X, Y, kws = util.parseargs(*args)\n return core.Smooth(X, Y, numiter=numiter)", "def SMAPE(y_true, y_pred):\n return smape(y_true, y_pred) / 2", "def f1_loss(y_true, y_pred):\n return 1.0 - f1_score(y_true, y_pred, average='weighted')", "def calc_loss(self, lambdas):\n # Rayleigh scattering [dB/km*um^4]\n # value 0.74 for SiO2 from Appl. Phys. Lett. 83, 5175 (2003)\n # value 2.33 for GeO2 from Appl. Optics 36(27) (1997)\n R = .74 + (2.33 - .74) * self.doping\n alphaR = R * lambdas**(-4) * 1e-3\n # measured fiber water peak\n alphaoh_1_38 = 2.43\n sigma_lambda = 0.030\n # Journal of Non-Crystalline Solids Volume 203 (1996)\n alphaoh = 0.00012 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.444) / (sigma_lambda))**2) + \\\n 0.00050 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.506) / (sigma_lambda))**2) + \\\n 0.00030 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.566) / (sigma_lambda))**2) + \\\n 0.00640 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.593) / (sigma_lambda))**2) + \\\n 0.00028 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.651) / (sigma_lambda))**2) + \\\n 0.00440 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.685) / (sigma_lambda))**2) + \\\n 0.07800 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.724) / (sigma_lambda))**2) + \\\n 0.00380 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.825) / (sigma_lambda))**2) + \\\n 0.08000 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.878) / (sigma_lambda))**2) + \\\n 1.6 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 0.943) / (sigma_lambda))**2) + \\\n 0.07 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 1.139) / (sigma_lambda))**2) + \\\n 2.7 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 1.246) / (sigma_lambda))**2) + \\\n alphaoh_1_38 * np.exp(-.5 * ((lambdas - 1.383) / (sigma_lambda))**2) + \\\n 0.84 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 1.894) / (sigma_lambda))**2) + \\\n 201 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 2.212) / (sigma_lambda))**2) + \\\n 10000 / 62.7 * alphaoh_1_38 * np.exp(-.5 * ((lambdas - 2.722) / (sigma_lambda))**2)\n # Hiroshi Murata, Handbook of optical fibers and cables (1996)\n alphaIR = 4.2e8 * np.exp(-47.5 / lambdas)\n a = (alphaoh + alphaR + alphaIR) / (10 / np.log(10))\n a[a > self.maxloss] = self.maxloss\n self.alpha = a", "def fit_loss(self, smoothing_factor=None, weight=None):\n\n if smoothing_factor is None:\n smoothing_factor = self.smoothing_factor\n\n # print \"smoothing_factor\", smoothing_factor\n spline = self.fit_univariate_spline(smoothingFactor=smoothing_factor, weight=weight)\n err = self.lossFunction(spline)\n\n return err", "def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])", "def angle_loss(self, preds_S, preds_T):\n angle_T = angle(preds_T)\n angle_S = angle(preds_S)\n return F.smooth_l1_loss(angle_S, angle_T)", "def mean_absolute_error(self):\n print('Mean absolute error regression loss: ' + str(mean_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))", "def fitfunc(x_unshifted, p=default()):\n x = x_unshifted+p[4]\n xtr, ytr, gradtr = logcontinuity(p)\n if x < xtr:\n return logpeak(x, p)\n else:\n return logpowerlaw(x, p)", "def smooth_l1_loss(inputs, beta=1., reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['beta'] = float(args['beta'])\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.SmoothL1Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(\n beta=args['beta'],\n reduction=args['reduction'],\n ).apply(inputs)\n else:\n return op_lib.blend(**args)", "def backward_loss(self, loss: torch.Tensor, model: Model, optimizer: Optimizer) -> None:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()", "def backward_loss(self, loss: torch.Tensor, model: Model, optimizer: Optimizer) -> None:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()", "def solve_absolute_mde(self):\n e = FTestPower().solve_power(\n effect_size=None\n ,df_num=self.df_denom\n ,df_denom=self.df_num\n ,alpha=self.alpha\n ,power=(1 - self.beta)\n ,ncc=1\n )\n\n Y = (self.test_splits * self.absolute_effects).sum()\n num1 = np.square(self.absolute_effects - Y)\n num = (self.test_splits * num1).sum()\n\n a = f * self.sigma / np.sqrt(num)\n return a * self.absolute_effects" ]
[ "0.64517206", "0.58765614", "0.56680757", "0.5649169", "0.5616871", "0.55822164", "0.55743736", "0.5557006", "0.5528426", "0.5517026", "0.54865664", "0.5475101", "0.5430965", "0.54217964", "0.5417211", "0.53726065", "0.5364687", "0.5338996", "0.5331391", "0.5327666", "0.53195363", "0.53098166", "0.5308995", "0.5292608", "0.5287077", "0.5281952", "0.5276485", "0.52736074", "0.5268386", "0.5258025", "0.5257461", "0.5244968", "0.52437353", "0.5242464", "0.52297884", "0.5216768", "0.5213828", "0.5196201", "0.5193139", "0.5191394", "0.51887196", "0.51887196", "0.51818436", "0.5161574", "0.5161574", "0.51282495", "0.51265216", "0.5126231", "0.5121585", "0.5109806", "0.51094675", "0.51026875", "0.5102198", "0.5101264", "0.5091989", "0.50888085", "0.5082359", "0.50815403", "0.50776976", "0.50731707", "0.5068178", "0.5067786", "0.50525886", "0.50462043", "0.504136", "0.5038399", "0.5032527", "0.5030863", "0.5029336", "0.5028935", "0.5015947", "0.5014153", "0.50112927", "0.50032735", "0.50027996", "0.5001522", "0.5000468", "0.49958694", "0.4988259", "0.49780595", "0.49746427", "0.49729243", "0.49709994", "0.49603596", "0.49582198", "0.4956814", "0.49540225", "0.49456573", "0.49418002", "0.49416834", "0.49414796", "0.49398106", "0.49347508", "0.4934696", "0.4932942", "0.49275059", "0.49270055", "0.4911882", "0.4911882", "0.48930943" ]
0.5849999
2
if keys from dict occur in kw, pop them
def _kw_extract(kw,dict): for key,value in dict.items(): dict[key]=kw.pop(key,value) return kw,dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mRemove(self, **kw):\n kw = copy_non_reserved_keywords(kw)\n for key, val in kw.items():\n # It would be easier on the eyes to write this using\n # \"continue\" statements whenever we finish processing an item,\n # but Python 1.5.2 apparently doesn't let you use \"continue\"\n # within try:-except: blocks, so we have to nest our code.\n try:\n orig = self._dict[key]\n except KeyError:\n # No existing variable in the environment, so just skip it\n pass\n else:\n try:\n # Most straightforward: just try to substract it.\n # But this will not work in most cases :-(\n self._dict[key] = orig - val\n except TypeError:\n try:\n # It orig and val is dictionaties:\n for k in val.keys():\n del orig[k]\n # May be some recursion ?\n except AttributeError:\n try:\n # Check if the original is a list.\n remove_from_orig = orig.remove\n except AttributeError:\n # Can't do nothing more\n pass\n else:\n # The original is a list, so remove\n # value from it.\n try:\n i = val[0]\n except TypeError:\n val = [ val ]\n for i in val:\n try:\n remove_from_orig(i)\n except ValueError:\n pass\n self.scanner_map_delete(kw)", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def removekwd(header, kwd):\n if kwd in header.keys():\n header.remove(kwd)\n return", "def pop_non_relevant_search_fields(data: Dict):\n keys_to_keep = [\"title\", \"published_at\", \"identifier\"]\n for key in list(data):\n if key not in keys_to_keep:\n data.pop(key)", "def popall(self, k, default=_MISSING):\n super_self = super(OrderedMultiDict, self)\n if super_self.__contains__(k):\n self._remove_all(k)\n if default is _MISSING:\n return super_self.pop(k)\n return super_self.pop(k, default)", "def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)", "def cut(d, k):\n\tif isinstance(d, dict):\n\t\tn = d.copy()\n\t\tif k in n:\n\t\t\tdel n[k]\n\t\treturn n\n\treturn [v for v in d if v != k]", "def remove_by_keys(self, keys):\n return list(filter(lambda item: item.keyword not in set(keys), self._metadata))", "def _remove_keys(results: dict, remove: list) -> dict:\n removed = {}\n for key, val in results.items():\n if key not in remove:\n removed[key] = val\n return removed", "def pop_non_relevant_vuln_fields(data: Dict):\n keys_to_keep = [\n \"title\",\n \"description\",\n \"content_type\",\n \"published_at\",\n \"references\",\n \"severity\",\n \"solutions\",\n \"alternate_ids\",\n ]\n for key in list(data):\n if key not in keys_to_keep:\n data.pop(key)", "def deleteDistortionKeywords(hdr):\n # We need to use '.pop' to guard against the possibility, however remote,\n # that the keyword has already been removed before calling this function.\n for kw in DIST_KWS:\n hdr.pop(kw, None)\n\n # This can use 'del' since it will work even if the keywords\n # are missing altogether since the multi_kw uses wild-cards\n for multi_kw in DIST_MULTI_KWS:\n del hdr[multi_kw]", "def exclude(m, keys):\n return {k: v for k, v in m.items() if k not in keys}", "def _filter_kwargs(names, dict_):\n return {k: v for k, v in dict_.items() if k in names and v is not None}", "def keep_in_dictionary(self,dictionary,*keys):\r\n remove_keys = [k for k in dictionary if k not in keys]\r\n self.remove_from_dictionary(dictionary,*remove_keys)", "def prune(bushy: dict) -> dict:\n pruned = dict()\n for key in bushy:\n if bushy[key]:\n pruned[key] = bushy[key]\n return pruned", "def test_remove_multiple_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key([\"funilrys\", \"Py\"])\n\n self.assertEqual(expected, actual)", "def remove_value(self, thing_key, dkey):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n return\n dic.pop(dkey, None)", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def remove_from_multidict(d: MultiDict, key: str, item: typing.Any):\n # works by popping all, removing, then re-adding into\n i = d.popall(key, [])\n if item in i:\n i.remove(item)\n\n for n in i:\n d.add(key, n)\n\n return d", "def remove_var(b, exclude):\n return dict((k, v) for k, v in b.items() if param_name(k) not in exclude)", "def filter(self, dict):\n for (pos, hashKey) in enumerate(self._sequence):\n for (key, value) in dict.items():\n data = self.dictionary[hashKey]\n \n if not (data.has_key(key) and data[key].find(value) == 0):\n del self.dictionary[hashKey]\n self._sequence.pop(pos)", "def unset_queries(self, *args):\n for k in args:\n self._query_dict.pop(k, None)", "def get_anti_subdict(indict, phrase):\n\tsubdict = {}\n\tfor key in indict:\n\t\tif phrase not in key:\n\t\t\tsubdict[key] = indict[key]\n\treturn subdict", "def remove_from_dictionary(self,dictionary,*keys):\r\n for key in keys:\r\n if key in dictionary:\r\n value = dictionary.pop(key)\r\n logger.info(\"removed item with key '%s' and value '%s'\" %(key,value))\r\n else:\r\n logger.info(\"Key '%s' not found\" %(key))", "def keepers(d: dict) -> dict:\n keep = {k: v for k, v in d.items() if v is not None}\n return keep", "def prune_option_list(opts, keys):\n opt_d = opt_to_dict(opts)\n for k in keys:\n if k in opt_d:\n del opt_d[k]\n return [k for item in opt_d.iteritems() for k in item]", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def removeDic(dic, key):\n pass", "def __delitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> None:\n self.contents = {i: self.contents[i] for i in self.contents \n if i not in more_itertools.always_iterable(key)}\n return", "def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)", "def delete_keys_from_dict(dict_del, the_keys):\n # make sure the_keys is a set to get O(1) lookups\n if type(the_keys) is not set:\n the_keys = set(the_keys)\n for k, v in dict_del.items():\n if k in the_keys:\n del dict_del[k]\n if isinstance(v, dict):\n delete_keys_from_dict(v, the_keys)\n if isinstance(v, list):\n for item in v:\n if isinstance(item, dict):\n delete_keys_from_dict(item, the_keys)\n return dict_del", "def remove(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n a[h] = None", "def remove_and_prune(a_dict, b_dict):\n for k in b_dict:\n if isinstance(b_dict[k], dict):\n if k in a_dict and type(a_dict[k]) is configobj.Section:\n remove_and_prune(a_dict[k], b_dict[k])\n if not a_dict[k].sections:\n a_dict.pop(k)\n elif k in a_dict:\n a_dict.pop(k)", "def filter_checkpoint_parameter_by_list(origin_dict, param_filter):\n for key in list(origin_dict.keys()):\n for name in param_filter:\n if name in key:\n print(\"Delete parameter from checkpoint: \", key)\n del origin_dict[key]\n break", "def sup_dicti(self, x, y):\n for key in self.dict_possiblity:\n if x in self.dict_possiblity[key]:\n self.dict_possiblity[key].remove(x)\n if y in self.dict_possiblity[key]:\n self.dict_possiblity[key].remove(y)\n del self.dict_possiblity[y]\n del self.dict_possiblity[x]", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def remove_keys_from_dict(dictionary, keys):\n\n # Copy dictionary\n dictionary_updated = dictionary.copy()\n try:\n [dictionary_updated.pop(key) for key in keys]\n except:\n print(\"Error: No ratio and sampling strategy parameters\")\n return dictionary_updated", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def dict_cleanup(self, data):\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n if filter_value not in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n return data", "def discard(self, key):\r\n if key in self.map: \r\n key, prev, next = self.map.pop(key)\r\n prev[NEXT] = next\r\n next[PREV] = prev", "def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:\n keywords_to_scrub: List[str] = ['extra_arguments', 'kernel_id']\n scrubbed_kwargs = kwargs.copy()\n for kw in keywords_to_scrub:\n scrubbed_kwargs.pop(kw, None)\n\n return scrubbed_kwargs", "def f_remove(self, *args):\n for arg in args:\n arg = self.f_translate_key(arg)\n if arg in self._data:\n del self._data[arg]\n else:\n raise AttributeError(\n \"Your result `%s` does not contain %s.\" % (self.name_, arg)\n )", "def clearkeywords(self):\n self._kw = []", "def customization(record):\n for popkey in pop_list:\n if popkey in record:\n record.pop(popkey)\n return record", "def remove_some_extraneous_information(variant):\n for key in ['xpos','xstop','vep_annotations',]: variant.pop(key, None)", "def Exclude(*keys):\n\n def exclude(row):\n res = dict(row)\n for k in keys:\n if k in res:\n del res[k]\n return res\n\n return \"Exclude\" >> beam.Map(exclude)", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def remove_field(pl, key):\n\n if type(pl) is tuple:\n r = (remove_field(v, key) for v in pl)\n\n elif type(pl) is list:\n r = [remove_field(v, key) for v in pl]\n \n elif type(pl) is dict:\n r = {k: remove_field(v, key) for (k, v) in pl.items() if k != key}\n else: \n r = pl\n\n return r", "def clean_up_dict(clean_dict, ignore_list):\n for i in ignore_list:\n clean_dict.pop(i, None)\n return clean_dict", "def remove_outlier(dict_object, keys):\r\n for key in keys:\r\n dict_object.pop(key, 0)", "def pop_non_relevant_module_fields(data: Dict):\n keys_to_keep = [\n \"title\",\n \"description\",\n \"content_type\",\n \"published_at\",\n \"references\",\n \"architectures\",\n \"authors\",\n \"rank\",\n \"reliability\",\n ]\n for key in list(data):\n if key not in keys_to_keep:\n data.pop(key)", "def filter_args_dict(self, args):\n return dict((k,v) for (k,v) in viewitems(args) if self.has_arg(k))", "def removekeywords(self, keywords):\n if isinstance(keywords, str):\n keywords = [keywords]\n for kw in keywords:\n self._kw.remove(kw)", "def del_quiet(dic, *keys):\n for key in keys:\n try:\n del dic[key]\n except KeyError:\n pass", "def _cleasing(series: Optional[dict], freq:list) -> dict:\n if series is not None:\n if series[\"freq\"] in freq:\n if series[\"final\"].year == 2021:\n if series[\"fonte\"] in fonte_in:\n if str(series[\"number\"]) not in remove['tickers']:\n return series", "def del_dict_keys(dict_in, keys):\n for key in keys:\n if key in dict_in:\n del dict_in[key]\n return dict_in", "def _del_item(dic: dict, keys: list):\n\tdic = _get_item(dic, keys[:-1])\n\tdel dic[keys[-1]]", "def UnLoadDictKeys(dct, keys_lst):\n if not keys_lst:\n return dct\n SanityCheck.ValidateTypes(((dct, dict), (keys_lst, list)))\n\n new_dct = {}\n for key in dct:\n if key in keys_lst:\n continue\n new_dct[key] = dct[key]\n\n return new_dct", "def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new", "def remove_outlier(dict_object, keys):\n for key in keys:\n dict_object.pop(key, 0)", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def except_keys(dic, *keys):\n ret = dic.copy()\n for key in keys:\n try:\n del ret[key]\n except KeyError:\n pass\n return ret", "def pop(self, key, *args):\n return super(ReadOnlyDict, self).pop(key, *args) # pragma: no cover", "def _cleanse_dict(original):\n return {k: v for k, v in original.items() if \"_pass\" not in k}", "def omit(self, *keys):\n return _({k: self[k] for k in self._ if k not in keys})", "def remove_outlier(keys):\n for key in keys:\n data_dict.pop(key, 0)", "def dict_pop(d, key):\n return d.pop(key)", "def remove_naked_sets_from_candidates(c, *args, naked_sets=defaultdict(list)):\n for d in args:\n for k, v in d.items():\n for coord in v:\n c[coord] = [n for n in c[coord] if n not in k]\n naked_sets[coord].extend(list(k))\n return c, dict(naked_sets)", "def filter_out(path_data):\n for key in exclude_keywords:\n if key in path_data['tags']:\n return False\n return True", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n actual = Dict(self.test_subject).remove_key([\"Py\", \"test\"])\n\n self.assertEqual(expected, actual)", "def keyword_subset(keyword_arguments, allowed_keywords):\n keywords = set(keyword_arguments.keys())\n keyswords_to_extract = keywords.intersection(allowed_keywords)\n\n new_kw = {key: keyword_arguments[key] for key in keyswords_to_extract}\n\n return new_kw", "def test_remove_facet_dictionary(self):\n pass", "def _remove_data(things, lst_remove=None):\n\n for data in things:\n data.pop(\"_sa_instance_state\", None)\n data.pop(\"user_id\", None)\n\n if lst_remove is not None:\n for str_remove in lst_remove:\n if str_remove in data:\n data.pop(str_remove, None)\n\n return things", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def filter_dict(dictionary, pred):\n return dict((k, v) for k, v in dictionary.items() if pred(k, v))", "def remove(self, key):", "def dfilter(d: dict, *keys: Iterable, reverse=False) -> dict:\n return {k: v for k, v in d.items() if k in keys and not reverse or k not in keys and reverse}", "def exclude_keys(value, *exclude):\n\n if not isinstance(value, QueryDict):\n raise RuntimeError(\"getquerydict should be used with QueryDict instances only (e.g. request.GET)\")\n\n value = value.copy()\n for key in exclude:\n if key in value: del value[key]\n return value", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n return [{key: value for key, value in dic.items() if key not in redundant_keys} for dic in data]", "def select_features(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def critere_keys(key):\n critere = (key not in [\"input_observation\", \"y_true\", \"y_action\", \"y\"])\n critere = critere & (key[-3:] != \"_ph\") & (key[-7:] != \"_assign\")\n\n return critere", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def remove_item(self, item):\r\n\r\n for key in self._inner_dict:\r\n if item in self._inner_dict[key]:\r\n idx = self._inner_dict[key].index(item)\r\n del self._inner_dict[key][idx]", "def remove_unused_keys(cop):\n delete_these = [\n 'officer_atty',\n 'officer_atty_firm',\n 'case_id',\n 'cop_first_name',\n 'cop_middle_initial',\n 'cop_last_name',\n 'entered_by',\n 'entered_when',\n 'fact_checked_by',\n 'fact_checked_when',\n 'matched_by',\n 'matched_when'\n ]\n\n for key in delete_these:\n del cop[key]\n\n return cop", "def _removeKeywords(self,candidates,candidates_predictors):\r\n return candidatesUtils.removeKeywords(candidates,candidates_predictors)", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n # Test of the case that a dict is not given\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def sliceoffparams(pardict, parprefix):\n return {k: v.value for k, v in pardict.items() if not k.startswith(parprefix)}", "def drop_keys(d):\n if isinstance(d, dict):\n return {\n k: drop_keys(v)\n for k, v in d.items()\n if k not in [\"propNames\", \"package\"]\n and v is not None\n and not (k == \"children\" and v == \"\")\n }\n elif isinstance(d, list):\n return [drop_keys(x) for x in d]\n return d", "def delete_key_HELPER(data_dict, key_list, key_to_delete):\n data_dict = get_key_from_dict_HELPER(data_dict, key_list[:-1])\n data_dict.pop(key_to_delete)\n return data_dict", "def delete_dict_entries(dictionary, entries):\n\n for key in entries:\n if key in dictionary:\n del dictionary[key]\n\n return dictionary\n # parameters = {key: parameters[key] for key in parameters if key not in del_parameter}", "def without_keys(keys):\n keys = frozenset(keys) # frozenset has efficient membership lookup\n return filter_keys_c(fnot(partial(operator.contains, keys)))", "def update_naked_set(ns, cpns):\n for k, v in cpns.items():\n if len(v) == 1:\n del ns[v.pop()]\n else:\n if len(set(v)) < 3:\n for coord in set(v):\n del ns[coord]\n return ns", "def _delete_volatile_keys(self, solr_dict):\n\n def delete(del_solr_dict, path_list):\n k = path_list[0]\n if k in del_solr_dict:\n if len(path_list) > 1:\n delete(del_solr_dict[k], path_list[1:])\n else:\n del del_solr_dict[k]\n\n delete(solr_dict, ['response', 'maxScore'])\n delete(solr_dict, ['responseHeader', 'QTime'])", "def delete(self, keyword, key):", "def pop(self, k, d=None): # real signature unknown; restored from __doc__\n pass", "def remove_keys(_dict, _keys):\n if isinstance(_keys, str):\n if _keys in _dict:\n del _dict[_keys]\n else:\n for _key in _keys:\n _dict = remove_keys(_dict, _key)\n return _dict", "def detachDict(dict, key1, key2):\n\n for key in dict.keys():\n if key == key1 or key == key2:\n del dict[key]\n else:\n for subkey in dict[key].keys():\n if subkey == key1 or subkey == key2:\n del dict[key][subkey]", "def uniqueKwdTheme( theme_kw ) :\n return { k : theme_kw[k][0] for k in theme_kw if len( theme_kw[k] )==1 }", "def unused_kwargs(kw):\n fn_kw = dict(base_class=None,\n base_name=None, name=None, base_arg=None, base_kw=None, parent=None,\n infer_kw=None, in_shape='BCD', base_shape=None, out_shape='BCD', tuple_out=False,\n forward_arg=None, forward_kw=None, initialization=None, activation=None, )\n return {k:v for k, v in kw.items() if k not in fn_kw}", "def remove(enforcer_dict, key):\n del enforcer_dict['f']\n assert other.keystring == 'abcde'\n assert other.valuesum == 15\n\n enforcer_dict['a'] = 2\n assert other.keystring == 'bcdea'\n assert other.valuesum == 16\n\n enforcer_dict.clear()\n assert other.keystring == ''\n assert other.valuesum == 0" ]
[ "0.659709", "0.658242", "0.64829236", "0.6343324", "0.62005615", "0.602767", "0.59886026", "0.5980594", "0.5973243", "0.59430367", "0.5851196", "0.5803663", "0.578763", "0.5769544", "0.5725139", "0.5696665", "0.564784", "0.56437635", "0.5620172", "0.56135494", "0.5593906", "0.55902684", "0.55674213", "0.5549724", "0.5547823", "0.55158055", "0.5481601", "0.5479546", "0.54738075", "0.5465834", "0.5461114", "0.5443207", "0.5436585", "0.5431623", "0.5430098", "0.5429108", "0.5426874", "0.54239815", "0.540124", "0.53969955", "0.5391651", "0.5385963", "0.53826046", "0.5381219", "0.53651375", "0.53523576", "0.5348112", "0.5335766", "0.53330475", "0.531364", "0.5312409", "0.5301968", "0.53005624", "0.52984226", "0.52885795", "0.52881575", "0.52871656", "0.5285951", "0.5281875", "0.52801126", "0.52681273", "0.52662784", "0.52641827", "0.52463704", "0.5244862", "0.52445334", "0.52384526", "0.522424", "0.5218794", "0.52087665", "0.5202075", "0.5200198", "0.5199806", "0.51888776", "0.5172103", "0.51679844", "0.5166264", "0.51606137", "0.51535505", "0.51520336", "0.51508975", "0.51434636", "0.51410717", "0.51378965", "0.51361257", "0.5120769", "0.51109165", "0.51077384", "0.5097419", "0.509735", "0.50932777", "0.5087732", "0.50704503", "0.50570685", "0.5048468", "0.50429004", "0.50419676", "0.5038371", "0.5036402", "0.5031462" ]
0.73164487
0
plot(f) allows f to be (x,y) tuple
def plot(f,**kw): if type(f)==tuple: x,y=f pl.plot(x,y,**kw) else: pl.plot(f,**kw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_plot(x,y):", "def plot():\n pass", "def plot_f(self, *args, **kwargs):\r\n kwargs['plot_raw'] = True\r\n self.plot(*args, **kwargs)", "def __plot(self, x: list, y:list):\r\n # clear the figure\r\n self.figure.clear()\r\n # create an axis\r\n self.canvas.axes = self.figure.add_subplot(111)\r\n # plot data\r\n self.canvas.axes.plot(x, y, self.primaryColor, label=self.inputFunction)\r\n # refresh canvas\r\n self.canvas.draw()", "def plot(f):\n def wrapper(*args, **kwargs):\n logging.info('generating plot: %s', f.__name__)\n f(*args, **kwargs)\n\n fig = plt.gcf()\n\n plotfile = plotdir / '{}.png'.format(f.__name__)\n fig.savefig(str(plotfile))\n logging.info('wrote %s', plotfile)\n plt.close(fig)\n\n plot_functions[f.__name__] = wrapper\n\n return wrapper", "def show_trace_2d(f, results):\n plt.close()\n # draw input points\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n # get the field of figure\n x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))\n # draw the contour of function using x1,x2 as step\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()", "def plot(self, *args, **kwargs):\n pass", "def plot(f, color):\n turtle.penup()\n turtle.setposition(x_begin, f(x_begin))\n turtle.pencolor(color)\n turtle.pendown()\n x = x_begin\n while x < x_end:\n turtle.setposition(x, f(x))\n x += x_increment", "def plot(self, *args, **kwargs):\n raise NotImplementedError", "def display(f, x_min, x_max, delta=0.001):\n x = list(drange(x_min, x_max,delta))\n y = [f(i) for i in x]\n plt.title(f.__name__)\n plt.grid(True)\n plt.xlabel('X')\n plt.ylabel('Y= '+f.__name__ + '(X)')\n plt.plot(x,y, 'r')\n plt.show()", "def plot(\n self,\n function: Callable[[float], float],\n x_range: Sequence[float] | None = None,\n use_vectorized: bool = False,\n **kwargs,\n ):\n\n t_range = np.array(self.x_range, dtype=float)\n if x_range is not None:\n t_range[: len(x_range)] = x_range\n\n if x_range is None or len(x_range) < 3:\n # if t_range has a defined step size, increase the number of sample points per tick\n t_range[2] /= self.num_sampled_graph_points_per_tick\n # For axes, the third coordinate of x_range indicates\n # tick frequency. But for functions, it indicates a\n # sample frequency\n\n graph = ParametricFunction(\n lambda t: self.coords_to_point(t, function(t)),\n t_range=t_range,\n scaling=self.x_axis.scaling,\n use_vectorized=use_vectorized,\n **kwargs,\n )\n graph.underlying_function = function\n return graph", "def plot_response_surface(f, p, dims=[0,1]):\n import pylab\n if len(dims) == 1:\n xi = dims[0]\n x = pylab.linspace(-10,10,40) - p[xi]\n def value(v):\n p[xi] = v\n return f(p)\n z = [value(v) for v in x]\n pylab.plot(x,z)\n else:\n xi,yi = dims\n x = pylab.linspace(-10,10,40) - p[xi]\n y = pylab.linspace(-10,10,40) - p[yi]\n def value(pt):\n p[xi] = pt[0]\n p[yi] = pt[1]\n return f(p)\n z = np.array([[value((v,w)) for v in x] for w in y])\n pylab.pcolor(x,y,z)", "def show_trace_2d(f, results): #@save\n set_figsize()\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1),torch.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')", "def plot_scatter(x, y):\n\tplt.scatter(x, y)", "def plot_obj_func():\n X1 = [i for i in range(-63, 65, 1)]\n Y1 = [8 * math.sin(0.06 * x) + 8 * math.cos(0.14 * x) + 8 * math.exp(math.cos(0.2*x)) for x in X1]\n plt.plot(X1, Y1)\n plt.show()", "def plot(self, plot_cmd=None, tf=lambda y: y):\r\n if not plot_cmd:\r\n plot_cmd = self.plot_cmd\r\n colors = 'bgrcmyk'\r\n pylab.hold(False)\r\n res = self.res\r\n\r\n flatx, flatf = self.flattened()\r\n minf = np.inf\r\n for i in flatf:\r\n minf = min((minf, min(flatf[i])))\r\n addf = 1e-9 - minf if minf <= 0 else 0\r\n for i in sorted(res.keys()): # we plot not all values here\r\n if type(i) is int:\r\n color = colors[i % len(colors)]\r\n arx = sorted(res[i].keys())\r\n plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')\r\n pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)\r\n pylab.hold(True)\r\n plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')\r\n pylab.ylabel('f + ' + str(addf))\r\n pylab.draw()\r\n show()\r\n # raw_input('press return')\r\n return self", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def plot(self, a=None, b=None):\n\n # === choose reasonable interval if [a, b] not specified === #\n if a is None:\n a = self.observations.min() - self.observations.std()\n if b is None:\n b = self.observations.max() + self.observations.std()\n\n # === generate plot === #\n x_vals = np.linspace(a, b, num=100)\n f = np.vectorize(self.__call__)\n plt.plot(x_vals, f(x_vals))\n plt.show()", "def tuple_grapher(arr):\n # Sets the column titles\n cols = [\"x\", \"y\"]\n # Creates a pandas DB using the list of tuples\n db = pandas.DataFrame.from_records(arr, columns=cols)\n # Plots the DB\n db.plot(kind='scatter', x=cols[0], y=cols[1])\n # Displays the graph\n return plt.show()", "def plot_xy(xy: np.ndarray, *args: Optional, **kwargs: Optional):\n trj = traja.from_xy(xy)\n trj.traja.plot(*args, **kwargs)", "def plot_xy(self, xpts, ypts):\n self.plot(np.asarray((xpts, ypts)).T)", "def my_plotter(ax, data1, data2, param_dict):\n out = ax.plot(data1, data2, **param_dict)\n return out", "def plotter(ax, data1, data2, param_dict):\n out = ax.plot(data1, data2, **param_dict)\n return out", "def plot(self, ax=..., *, name=..., **kwargs):\n ...", "def drawPairPlot(x, y1, y2, xlabel,ylabel, y1label, y2label):\n\tplt.plot(x, y1, label = y1label)\n\tplt.plot(x, y2, label = y2label)\n\tplt.xlabel(xlabel)\n\tplt.ylabel(ylabel)\n\tplt.legend()\n\tplt.show()", "def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()", "def plot_xyh(self, plot='line', _x=0, _y=1, _hue=None, **kwargs) -> None:\n\n plot_call = getattr(seaborn, plot + 'plot', None)\n if not plot_call:\n raise UnknownPlotTypeError(f\"'{plot}' is not a known plot type in seaborn.\")\n\n # Seaborn needs column names, not index\n _x = self.headers()[_x]\n _y = self.headers()[_y]\n _hue = self.headers()[_hue] if _hue is not None else None\n\n df = self.get_df()\n plot_call(x=_x, y=_y, hue=_hue, data=df, **kwargs)\n plt.show()", "def plot(self, p=None):\n import pylab\n if self.dy is not None:\n pylab.errorbar(self.x, self.y, yerr=self.dy, fmt='x')\n else:\n pylab.plot(self.x, self.y, 'x')\n if p is None: p = self.start\n x,y = self.profile(p)\n pylab.plot(x,y)", "def plot(x, y, z):\n pylab.plot(x, y)\n pylab.plot(x, z)\n pylab.show()", "def showFromTuple(self, context, t=(0, 0), **kwargs):\n p = Point(*t)\n self.show(context, p, **kwargs)", "def builtin_plot(self, **kwargs):\n self.gp.plot(**kwargs)\n return", "def show_trace_2d(f, results): #@save\n d2l.set_figsize()\n d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = d2l.meshgrid(d2l.arange(-5.5, 1.0, 0.1),\n d2l.arange(-3.0, 1.0, 0.1))\n d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n d2l.plt.xlabel('x1')\n d2l.plt.ylabel('x2')", "def plot(self, x_range=(0, 10), point_spacing=1.0, unit_factor_x=1.0, unit_factor_y=1.0):\n for func_map in self.functions:\n function = func_map[\"function\"]\n xs, ys = function.get_xy_vals(x_range=x_range, point_spacing=point_spacing)\n plt.plot([x * unit_factor_x for x in xs],\n [y * unit_factor_y for y in ys], func_map[\"style\"], label=func_map[\"label\"])\n plt.legend()\n plt.xlabel(self.x_label)\n plt.ylabel(self.y_label)\n plt.suptitle(self.title, fontsize=12)\n plt.show()", "def plotone(x,y,xlabel,ylabel,filename):\n fig=plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(x,y,linewidth=2.0)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n fig.savefig(filename)", "def plot(self,x,y):\r\n scalex,scaley = self.getScale()\r\n x = x*self.samplerate\r\n x = (x-scalex[0])/(scalex[1]-scalex[0])*self.w\r\n y = (y-scaley[0])/(scaley[1]-scaley[0])*self.h\r\n return (x,y)", "def make_scatter():\n x = np.linspace(4, 8, 6)\n y = np.sin(x)\n plt.plot(x, y, 'o', color='black');\n plt.show()", "def plot( # type: ignore[override]\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n return self._plot(val, ax)", "def plot( # type: ignore[override]\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n return self._plot(val, ax)", "def plot( # type: ignore[override]\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n return self._plot(val, ax)", "def plot(self, **kwargs) -> Line2D:\n if {'x','y','z'}.issubset(self.data.columns):\n ax = self.data.plot('x','y', **kwargs)\n else:\n logger.warning('x,y columns not available in this data')\n ax = None\n return ax", "def display_fit(x,y,p,func,fig=None):\n if fig is None:\n fig = plots.tac_figure('x','y','fitting')\n fig.plot(x,np.log(y),label='data')\n \n \n fig.plot(x,np.log(func(p,x)),'--x',label=func.__name__ + '('+\n ','.join(['%.1e'%k for k in p])+ ')')\n \n return fig", "def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n return self._plot(val, ax)", "def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n return self._plot(val, ax)", "def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n return self._plot(val, ax)", "def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))", "def plot_data(self):", "def line_graph():\n fig = plt.figure()\n ax = plt.axes()\n x = [1, 2, 3]\n y = [5, 6, 7]\n plt.plot(x, y)\n plt.show()", "def plot_graph(\n x_label: str,\n y_label: str,\n points: {str: typing.Tuple[typing.List[float], typing.List[float], bool]},\n title: typing.Optional[str] = \"\",\n):\n\n legend = []\n\n for key, point_set in points.items():\n\n if point_set[2]:\n plt.plot(point_set[0], point_set[1])\n else:\n plt.scatter(point_set[0], point_set[1])\n\n legend.append(key)\n\n plt.legend(legend)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.suptitle(title)\n plt.show()", "def plot_graph(self) -> None:", "def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)", "def plot_figure(param1, param2):\n return 0", "def plot_data(x, t, new_figure=True, save_path=None):\r\n # Plot the binary data\r\n ma = ['o', 's', 'v']\r\n fc = ['r', 'g', 'b'] # np.array([0, 0, 0]), np.array([1, 1, 1])]\r\n tv = numpy.unique(t.flatten()) # an array of the unique class labels\r\n if new_figure:\r\n plt.figure()\r\n for i in range(tv.shape[0]):\r\n pos = (t == tv[i]).nonzero() # returns a boolean vector mask for selecting just the instances of class tv[i]\r\n plt.scatter(numpy.asarray(x[pos, 0]), numpy.asarray(x[pos, 1]), marker=ma[i], facecolor=fc[i])\r\n\r\n plt.xlabel('$x_1$')\r\n plt.ylabel('$x_2$')\r\n\r\n if save_path is not None:\r\n plt.savefig(save_path, fmt='png')", "def drawit(fignum=1,xlabel=\" \",ylabel=\" \",xvar=None,\n yvar=None,title=\" \",ylimit=None,\n xlimit=None):\n fig=plt.figure(fignum)\n fig.clf()\n ax1=fig.add_subplot(111)\n line=ax1.plot(xvar,yvar)\n ax1.set_xlim(xlimit)\n ax1.set_ylim(ylimit)\n ax1.set_title(title)\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n fig.tight_layout()\n fig.canvas.draw()\n return fig,ax1,line[0]", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot_line(m, b, xrange=None, yrange=None):\n if(G):\n line = Gnuplot.Func ('%f + (%f) * x' % (b, m))\n if xrange: G.set_range('xrange', xrange)\n if yrange: G.set_range('yrange', yrange)\n G.plot(line)\n wait_for_input()", "def coords_plot(self):\n self.load_coords()\n x = []\n y = []\n px = [] \n for item in self.coords:\n if item[1] >52.10 and item[1] <52.4 and item[2]>20.8 and item [2] <21.4:\n x.append(item[1])\n y.append(item[2])\n px.append(item[3])\n plt.scatter(x,y,c=px,s=150,alpha=0.3)\n plt.show()", "def fplot(xlab, ylab, ax=None, axargs=None, scales=None):\n if ax is None:\n ax = plt.gca()\n if scales is not None: # Could allow to accept only one arg\n ax.set_xscale(scales[0])\n ax.set_yscale(scales[1])\n ax.set_xlabel(xlab)\n ax.set_ylabel(ylab)\n if axargs is not None:\n ax.axis(axargs)\n # plt.ticklabel_format(useOffset=False, axis='x')\n # plt.ticklabel_format(useOffset=False, axis='y')", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def plot_data(x, y, Amp, freq, filename):\n plb.plot(x, y, 'b', linestyle = ':')\n y_fit = Amp * np.sin(freq * x)\n plb.plot(x, y_fit, 'r')\n plb.savefig(filename)", "def plot(self):\n pass", "def _apply_plot(self, *args, cmap=None, values=None, **kwargs):\n # Deprecated functionality\n if cmap is not None:\n warnings._warn_proplot(\n 'Drawing \"parametric\" plots with ax.plot(x, y, values=values, cmap=cmap) '\n 'is deprecated and will be removed in the next major release. Please use '\n 'ax.parametric(x, y, values, cmap=cmap) instead.'\n )\n return self.parametric(*args, cmap=cmap, values=values, **kwargs)\n\n # Plot line(s)\n method = kwargs.pop('_method')\n name = method.__name__\n sx = 'y' if 'x' in name else 'x' # i.e. plotx\n objs = []\n args = list(args)\n while args:\n # Support e.g. x1, y1, fmt, x2, y2, fmt2 input\n # NOTE: Copied from _process_plot_var_args.__call__ to avoid relying\n # on public API. ProPlot already supports passing extra positional\n # arguments beyond x, y so can feed (x1, y1, fmt) through wrappers.\n # Instead represent (x2, y2, fmt, ...) as successive calls to plot().\n iargs, args = args[:2], args[2:]\n if args and isinstance(args[0], str):\n iargs.append(args[0])\n args = args[1:]\n\n # Call function\n iobjs = method(self, *iargs, values=values, **kwargs)\n\n # Add sticky edges\n # NOTE: Skip edges when error bars present or caps are flush against axes edge\n lines = all(isinstance(obj, mlines.Line2D) for obj in iobjs)\n if lines and not getattr(self, '_no_sticky_edges', False):\n for obj in iobjs:\n data = getattr(obj, 'get_' + sx + 'data')()\n if not data.size:\n continue\n convert = getattr(self, 'convert_' + sx + 'units')\n edges = getattr(obj.sticky_edges, sx)\n edges.append(convert(min(data)))\n edges.append(convert(max(data)))\n\n objs.extend(iobjs)\n\n return tuple(objs)", "def xyPlot(xPlotFunc\n\t\t\t,yPlotFunc\n\t\t\t,table\n\t\t\t,filterList\n\t\t\t,ax\n\t\t\t,legendLabel=None\n\t\t\t,labelFunc=None\n\t\t\t,title=None\n\t\t\t,commonConstraints=[completed]\n\t\t\t,codeList=['ro--','gx--','b^--','ms--','y*--','ko--','co--','ro:','gx:','b^:','ms:','y*:','ko:','co:','ro-','gx-','b^-','ms-','y*-','ko-','co-']):\n\txys=[]\n\tfor i,constraintList in enumerate(filterList):\t\n\t\txs = [xPlotFunc.func(*x) for x in plotQuery(table,xPlotFunc.cols,constraintList+commonConstraints)]\n\t\tys = [yPlotFunc.func(*y) for y in plotQuery(table,yPlotFunc.cols,constraintList+commonConstraints)]\n\t\tif labelFunc is not None: \n\t\t\tlabel = [labelFunc.func(*l) for l in plotQuery(table,labelFunc.cols,constraintList+commonConstraints)]\n\t\ttry: \n\t\t\txy= sorted(zip(xs,ys)) #order the pairs\n\t\t\tx,y = zip(*xy)\n\t\t\tax.plot(x,y,codeList[i%len(codeList)],label='' if legendLabel is None else legendLabel[i])\n\t\t\tif labelFunc is not None: \n\t\t\t\tfor i in range(len(x)):\tax.annotate(label[i],xy=(x[i],y[i]),fontsize=9)\n\t\texcept ValueError: print \"Warning, no data found for constraint #\"+str(i+1)\n\t\txys.append(xy)\n\tif title is not None: ax.set_title(title)\n\n\tax.set_xlabel(xPlotFunc.axisLabel)\n\tax.set_ylabel(yPlotFunc.axisLabel)\n\t\n\tif legendLabel is not None: \n\t\tlegend = ax.legend(loc='best', shadow=True)\n\t\tlegend.get_frame().set_facecolor('#00FFCC')\n\t\tlegend.draggable()\n\treturn xys", "def plot_scatter_points(self):\n self.plot(1)", "def plot_data(array_list, params):\n\tkey = array_list[0]\n\tvals = array_list[1]\n\tprint key\n\tprint len(vals)\n\tfigure_id = 1\n\tfor item in params:\n\t\tx_axis = get_by_keys(item['x_keys'], key, vals)\n\t\ty_axis = get_by_keys(item['y_keys'], key, vals)\n\t\tplt.figure(figure_id)\n\t\tplt.plot(x_axis, y_axis)\n\t\tfigure_id = figure_id + 1\n\tplt.show()", "def plot_matrix(loc_list):\n x_list = [x[0] for x in loc_list]\n y_list = [y[1] for y in loc_list]\n\n # print(x_list, y_list)\n # plt.figure()\n\n plt.plot(x_list, y_list)", "def plot_three_functions(values, sin_values, cos_values, complex_function_values):\n\n # Cambio la escala del eje x a una trigonometrica\n # En el docstring de la funcion indico de donde copio esta funcion\n # Esta funcion ESTA COPIADA DE INTERNET como especifico en la documentacion\n set_x_axis_scale_to_pi()\n\n # Pongo un titulo al grafico\n plt.title(\"Gráfica de las tres funciones\")\n\n # En verde, con lineas discontinuas\n plt.plot(values, sin_values, \"--g\")\n\n # En negro, con lineas discontinuas\n plt.plot(values, cos_values, \"--k\")\n\n # En rojo, con lineas discontinuas\n plt.plot(values, complex_function_values, \"--r\")\n\n plt.show()\n wait_for_user_input()", "def plot(self, center=0, xmin=-1, xmax=1):\n n = 200./self.eps\n x = concatenate(\n linspace(xmin, center-self.eps, 21),\n linspace(center-self.eps, center+self.eps, n+1),\n linspace(center+self.eps, xmax, 21))\n y = self(x)\n return x, y", "def evaluate(self, plot):", "def plot_data(f_name):\n # The load_data_from_csv function is a utility function that will dump our\n # csv data into an array called data.\n x, data = load_data_from_csv(f_name)\n # plt.subplots is a way of initializing matplotlib so you can plot\n fig, ax = plt.subplots()\n # ax.errorbar is the main plotting function call.\n # The `fmt`, `capsize`, `elinewidth`, `color` and `label` keyword\n # arguments are there to style the plot -- they are not instrumental.\n std_err = np.std(data,axis=1)/np.sqrt(data.shape[1])\n ax.errorbar(x,np.mean(data, axis=1),yerr=std_err,\n fmt='o',capsize=3, elinewidth=1, color='green',\n label=\"Some description of data\")\n # Set the text on the x axis.\n ax.set_xlabel(\"Simulated Independent Variable (units)\")\n # Set the text on the y axis.\n ax.set_ylabel(\"Simulated Dependent Variable, (units)\")\n # Set the text for the title.\n ax.set_title(\"Some Noisy Data with a linear trend\")\n # Turn on the legend box that appears on the plot figure.\n ax.legend()\n # Turn on grid lines\n ax.grid(True)\n # Create a window with the plot. You can click the save icon to\n # save it to file. Alternatively, you can uncomment the\n # `fig.savefig(\"sample_data_plot.png\")` line to save directly.\n # plt.show()\n fig.savefig(\"sample_data_plot.png\")", "def plot_tseries(*args, **kwargs) :\n data = kwargs.pop('data')\n return data.dropna().plot(x=args[0], y=args[1], **kwargs)", "def plotPoints(x,y):\n display = PacmanPlot(x,y)\n display.takeControl()", "def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n val = val if val is not None else self.compute_all()\n fig, ax = plot_single_or_multi_val(\n val,\n ax=ax,\n name=self.__class__.__name__,\n )\n return fig, ax", "def _plot_dict_scatter(d):\n xvals, yvals = _dict2lists(d)\n pylab.scatter(xvals, yvals)", "def plot_pmf(self, **options):\n xs, ps = zip(*sorted(self.items()))\n plt.plot(xs, ps, **options)", "def plot_pointing(self, *args, coord=\"tl\", **kwargs):\n # TODO: Generalize that function\n warnings.warn(\"Deprecated function needs update.\", DeprecationWarning)\n self.__check_attributes([\"F_{}_az\".format(coord), \" F_{}_el\".format(coord)])\n return kids_plots.checkPointing(self, *args, **kwargs)", "def plot_2D(df):\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,6))\n fig.clf()\n #Get the current Axes instance on the current figure matching the given \n #keyword args, or create one.\n ax = fig.gca()\n df.plot(kind = 'scatter', x = 'x', y = 'y', ax = ax, alpha = 0.5)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('X vs. Y')\n return 'Done'", "def plot_states(F, U, X_hat, x0=np.array([-0.72, -0.64])):\n n = len(U)\n\n X = [x0]\n for i in range(n):\n X.append(F(X[-1], u(i)))\n X = np.array(X)\n\n fig, ax = plt.subplots()\n ax.plot(X[:, 0], X[:, 1], '.', color='blue')\n ax.plot(X_hat[:, 0], X_hat[:, 1], '+', color='black')\n ax.set_xlim(-2, 1)\n ax.set_ylim(-2, 1)\n\n return fig, ax", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_data(x_plot, X_train, X_test, y_train, y_test, low, high):\n s = 15\n plt.plot(x_plot, ground_truth(x_plot), alpha=0.5, label='ground truth')\n plt.scatter(X_train, y_train, s=s, alpha=0.2)\n plt.scatter(X_test, y_test, s=s, alpha=0.2, color='red')\n plt.xlim((low, high))\n plt.ylabel('y')\n plt.xlabel('x')\n plt.legend(loc='upper left')\n plt.show()", "def plot_line(ax, p1, p2, *args, **kwargs):\n ax.plot(*zip(p1, p2), *args, **kwargs)", "def line_plot():\n # generate data\n x = np.arange(0, 4 * np.pi, 0.1) # x in [0, 4* pi)\n y_cos = np.cos(x)\n\n plt.figure()\n plt.plot(x, y_cos)\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.title('Cosine function in $[0, 4\\pi)$ with line plot')\n plt.show()\n\n return None", "def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n val = val or self.compute()[0]\n return self._plot(val, ax)", "def exercise11():\n x = np.arange(0, 10, 0.01);\n y = np.sin(x)\n\n plt.figure()\n plt.xlabel(\"x values\")\n plt.ylabel(\"sin(x)\")\n plt.title(\"Sine Function for x from 0.0 to 10.0\")\n plt.plot(x, y)\n plt.show()", "def liveplot(x, y, xlim, ylim, title):\n plt.plot(x,y,'b.')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('North-South Axis')\n plt.ylabel('East-West Axis')\n plt.title(title)\n plt.show()", "def graph():\r\n \r\n function = input(\"Enter a function f(x):\\n\")\r\n \r\n for y_axis in range(10,-11,-1):\r\n for i in range (-10,11):\r\n x = i\r\n y = round(eval(function))\r\n if (y - math.floor(y)) >= 0.25:\r\n y= math.ceil(y)\r\n else:\r\n y = math.floor(y)\r\n if y_axis == y:\r\n print('o',end=\"\")\r\n elif y_axis == i == 0 and y!=0:\r\n print(\"+\",end=\"\")\r\n elif y_axis == 0 and y!=0:\r\n print(\"-\",end=\"\")\r\n elif i == 0:\r\n print(\"|\",end=\"\")\r\n else:\r\n print(\" \",end=\"\")\r\n print()", "def graph(x, y, xlabel = \"\", ylabel = \"\", legend = \"\", color = \"\"):\n plt.plot(x, y, color, label = legend)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.legend(loc = 'best')\n plt.grid()", "def plot_single_variogram(tup,sill,title,opath):\n fig = plt.figure(figsize=(8,8),dpi=120)\n plt.scatter(tup[1],tup[0],color='green',s=220)\n plt.title(title)\n plt.plot([0,tup[1].max()],[sill,sill],color='red',linewidth=3)\n plt.grid()\n plt.xlabel('Distancia (h)')\n plt.ylabel('semi-variograma')\n plt.xlim(0,tup[1].max())\n plt.ylim(0,sill+0.05*sill)\n plt.savefig(opath+'.png')\n plt.close()", "def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf", "def plot(self, x_coords, y_coords=None, options=''):\n # TODO\n # add options for line width, marker size\n\n # If y-vector is str, then only x vector given\n if isinstance(y_coords, str):\n options = y_coords\n y_coords = None\n\n one_set_data = False\n # Set y-vector to default if None\n if y_coords is None:\n one_set_data = True\n y_coords = [*range(0, len(x_coords))]\n\n # Verify x, y coords have same length\n if len(x_coords) != len(y_coords):\n raise ValueError(\n \"Number of X coordinates does not equal \"\n \"number of Y coordinates.\")\n\n # Verify options given (and save settings to be applied)\n verified_options = self.__verify_plot_options(options)\n\n if one_set_data:\n # Draw plot for one list of data\n self.__draw_path(\n y_coords, # Y is default x-coords in one data set\n x_coords, # User input\n verified_options[0], # Line\n verified_options[1], # Marker\n verified_options[2], # Colour\n )\n else:\n # Draw plot for two lists of data\n self.__draw_path(\n x_coords, # User input\n y_coords, # User input\n verified_options[0], # Line\n verified_options[1], # Marker\n verified_options[2], # Colour\n )", "def plot_piecewise_functions(x, a, name=\"piecewise_functions\", save=False,\n dirname=DEFAULT_DIR):\n plt.figure()\n plt.xlabel(\"$x$\")\n plt.ylabel(\"$f(x)$\")\n plt.plot(x, v_function(x, a), label=\"$u(x)$\")\n plt.plot(x, step_function(x, a), label=\"$u'(x)$\")\n plt.legend()\n if save:\n os.makedirs(dirname, exist_ok=True)\n plt.savefig(os.path.join(dirname, f\"{name}.png\"), dpi=300)\n else:\n plt.show()", "def showGraph(self,function,window,color=None):\n if not color: color=window.randomColor()\n wsx,wsy=window.size\n points=[]\n for X in range(0,wsx):\n x,y=self.getFromScreen([X,0],window)\n X,Y=self.getToScreen([x,function(x)],window)\n points.append((X,Y))\n window.draw.lines(window.screen,color,False,points,1)", "def plot(self, *, ax=..., n_cols=..., line_kw=..., contour_kw=...):\n ...", "def add_plot(self, state, data, y_axis, function=lambda x: x, x_axis='freq', ax=None, title=None, log_axis='x', save=True, show=False):\n\n functions_dict = {'x': plt.semilogx, 'y': plt.semilogx, 'both': plt.loglog, 'none': plt.plot}\n #TODO: Unfinished\n\n if ax is None:\n fig = plt.figure()\n ax = plt.gca()\n\n if title is None:\n title = y_axis\n\n # import IPython\n sweep_kwrds = data['sweep_params'][y_axis]\n sweep_kwrds = [kwrd for kwrd in sweep_kwrds if kwrd != x_axis]\n # combos = itertools.product(*(list(range(len(data[swp_kwrd]))) for swp_kwrd in sweep_kwrds))\n\n # IPython.embed()\n # if combos:\n # for index in combos:\n # functions_dict[log_axis](data[x_axis], np.abs(data[y_axis][index, :]))\n # else:\n functions_dict[log_axis](data[x_axis], function(data[y_axis]))\n plt.ylabel(y_axis)\n plt.xlabel(x_axis)\n ax.grid()\n if save:\n fname = os.path.join(self.data_dir, title + \".png\")\n if os.path.isfile(fname):\n os.remove(fname)\n plt.savefig(fname, dpi=200)\n plt.close()\n if show:\n plt.show()", "def _plot_model(params, label, range_=None):\n b, a = params\n if range_ is None:\n x = np.linspace(0,1)\n else:\n u, v = range_\n x = np.linspace(u,v)\n y = a*x + b\n return plt.plot(x, y, label=label)", "def plot_do(cls, plot_func, *args, **kwargs):\n pf_too=kwargs.pop(\"pf_too\", False)\n plotter=kwargs.pop(\"plotter\", None)\n if plotter is None:\n plotter=kwargs.pop(\"pl\", None)\n if plotter is None:\n plotter=Plotter()\n elif isinstance(plotter, basestring):\n if plotter in cls.agent_dict:\n plotter=cls.agent_dict[plotter]\n else:\n plotter=Plotter(name=plotter)\n pf=getattr(plotter, plot_func)(*args, **kwargs)\n if pf_too:\n return plotter, pf\n return plotter", "def main():\r\n plot = Plotter(0.5, 1.2)\r\n plot.plot_func()", "def plot(self, routePoints=None):\n return plot(routePoints, self.profiles)", "def plot(self, center=0, xmin=-1, xmax=1):\n if self.eps == 0:\n return [xmin, center, center, xmax], [0, 0, 1, 1]\n else:\n n = 200./self.eps\n x = concatenate(\n linspace(xmin, center-self.eps, 21),\n linspace(center-self.eps, center+self.eps, n+1),\n linspace(center+self.eps, xmax, 21))\n y = self(x)\n return x, y" ]
[ "0.7233913", "0.67269236", "0.6719971", "0.643809", "0.63938314", "0.63545954", "0.62653416", "0.61198306", "0.6113964", "0.6108317", "0.60705554", "0.6022874", "0.5939718", "0.59299445", "0.59191865", "0.5891549", "0.58682585", "0.58335316", "0.5833347", "0.5831018", "0.58123714", "0.5807514", "0.5777953", "0.57212555", "0.5707899", "0.570736", "0.56929064", "0.568099", "0.5675708", "0.56659776", "0.56654936", "0.56389785", "0.55813867", "0.5580435", "0.55796975", "0.5579652", "0.55762017", "0.55762017", "0.55762017", "0.55683565", "0.55621886", "0.55532426", "0.55532426", "0.55532426", "0.5547005", "0.5535817", "0.5517697", "0.5517046", "0.5507649", "0.5503572", "0.5502629", "0.549891", "0.5467793", "0.5461849", "0.54546404", "0.54522234", "0.543873", "0.5435471", "0.5432868", "0.5432093", "0.5431587", "0.54270554", "0.54247004", "0.54126084", "0.54028255", "0.53950256", "0.53913736", "0.5390109", "0.5383792", "0.53615135", "0.5361476", "0.5354461", "0.53536344", "0.5353633", "0.5352048", "0.53513813", "0.5348467", "0.5344265", "0.5334655", "0.5326121", "0.5315932", "0.5314248", "0.52962404", "0.52946156", "0.52871424", "0.52760506", "0.52737755", "0.526605", "0.52634925", "0.52596873", "0.5259562", "0.52592653", "0.525926", "0.52575713", "0.52544844", "0.5245874", "0.524448", "0.52409893", "0.523931", "0.52377" ]
0.8709098
0
Plot values along direction dir={0,1,2}, through point pt=[x,y,z]
def plot_values_along(pp,pt=[0.5,0.5,0.5],**kw): kv = {'dir':0, 'verbose':0, 'all':False, 'iv':0, 'i4':0, 'var':None} kw,kv = _kw_extract(kw,kv) plot(ds.values_along(pp,pt,iv=kv['iv'],dir=kv['dir'],all=kv['all']),**kw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_point(axis, pnt3d, color):\n xy = bc2xy(pnt3d)\n axis.scatter(xy[0], xy[1], c=color, marker='o')", "def plot_direction(ax, phi_deg, psi_deg, \n color='black', marker='d', markersize=3,\n label=None, label_position='right', weight='normal'):\n phi_rad = phi_deg*np.pi/180 + 0.001\n psi_stereo = 2*np.tan(psi_deg*np.pi/180 /2) # Stereographic projection\n psi_stereo_annotate = psi_stereo if psi_stereo > 0.1 else 0.1 # Bug... ?\n\n if label:\n va = 'center' if label_position == 'right' else 'baseline'\n ha = 'center' if label_position == 'center' else 'left'\n \n ax.annotate(label, (phi_rad, psi_stereo_annotate),\n textcoords='offset points', xytext=(0, 5),\n rotation=0, alpha=0.9, color=color, family='sans-serif',\n horizontalalignment=ha, va=va, weight=weight)\n\n ax.plot(phi_rad, psi_stereo, marker, color=color, markersize=markersize)", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def plot(X, colors=None, l=1, origin=None, ax=None, linestyle='-'):\n if origin is not None: pts = origin\n else: pts = np.array([[0, 0, 1], [l, 0, 1], [0, l, 1]]) # 3 x 3\n if ax is None: ax = plt.gca()\n # x = inv(X).dot(pts.T).T\n x = (X.dot(pts.T)).T\n\n # x = X.dot(pts.T).T\n\n if colors is None:\n white = np.array([1, 1, 1])\n alpha = 0.3\n red = alpha * np.array([1, 0, 0]) + (1-alpha) * white\n blue = alpha * np.array([0, 0, 1]) + (1-alpha) * white\n red = np.maximum([0, 0, 0], np.minimum(red, [1,1,1]))\n blue = np.maximum([0, 0, 0], np.minimum(blue, [1,1,1]))\n colors = np.stack((red,blue))\n\n ax.arrow(x[0,0], x[0,1], x[1,0]-x[0,0], x[1,1]-x[0,1], color=colors[0], linestyle=linestyle)\n ax.arrow(x[0,0], x[0,1], x[2,0]-x[0,0], x[2,1]-x[0,1], color=colors[1], linestyle=linestyle)\n plt.scatter(x[:,0], x[:,1], s=0)\n\n ax.set_aspect('equal', 'box')", "def plot_results_traj_3d(p_x, p_y, p_z, xmin, xmax, ymin, ymax, zmin, zmax):\n fig, ax = plt.subplots(2 , 2, figsize = (10, 10))\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n for t in np.arange(0, p_x.shape[1], step = 1): \n ax[0,0].plot(t, p_x[p, t], 'rx') \n ax[0,1].plot(t, p_y[p, t], 'gx') \n ax[1,0].plot(t, p_z[p, t], 'bx') \n ax[1,1].plot(t, p_x[p, t], 'rx') \n ax[1,1].plot(t, p_y[p, t], 'gx') \n ax[1,1].plot(t, p_z[p, t], 'bx') \n for a in ax.flat: \n a.set(xlabel = 'Time steps', ylabel = 'Position')\n ax[0,0].set_title('X (pix)') \n ax[0,0].set_ylim([xmin, xmax]) \n ax[0,1].set_title('Y (pix)') \n ax[0,1].set_ylim([ymin, ymax]) \n ax[1,0].set_title('Z (pix)') \n ax[1,0].set_ylim([zmin, zmax])\n ax[1,1].set_title('Positions combined') \n ax[1,1].set_ylim([np.array([xmin, ymin, zmin]).min(), np.array([xmax, ymax, zmax]).max()])", "def plot_xy(nc,params,tms,lev=None):\n \n import matplotlib.pyplot as plt\n import ggWRFutils as gW\n from datetime import datetime\n import numpy as np\n wvar={}\n for p in params:\n if p != 'Times':\n if p=='WS10':\n wvar[p]=np.sqrt(nc.variables['U10'][:]**2+nc.variables['U10'][:]**2)\n elif p=='UV10': \n wvar['U10']=nc.variables['U10'][:,:,:] \n wvar['V10']=nc.variables['V10'][:,:,:] \n elif p=='UV':\n wvar['U']=nc.variables['U'][:,lev,:,:] \n wvar['V']=nc.variables['V'][:,lev,:,:] \n elif len(nc.variables[p].shape) > 3:\n wvar[p]=nc.variables[p][:,lev,:,:] \n else: \n wvar[p]=nc.variables[p][:] \n Nx,Ny,Nz,lon,lat,dx,dy=gW.getDimensions(nc)\n for p in params:\n if params[p]=='pcolor':\n plt.pcolor(lon,lat,wvar[p][tms,:,:],shading='flat')\n plt.colorbar()\n if params[p]=='contourf':\n plt.contourf(lon,lat,wvar[p][tms,:,:],50)\n plt.colorbar()\n if params[p]=='contour':\n plt.contourf(lon,lat,wvar[p][tms,:,:])\n plt.colorbar()\n if params[p]=='quiver':\n if p=='UV10':\n plt.quiver(lon[::10,::10],lat[::10,::10],wvar['U10'][tms,::10,::10],wvar['V10'][tms,::10,::10],units='width')\n elif p=='UV':\n plt.quiver(lon,lat,wvar['U'][tms,:,:],wvar['V'][tms,:,:])\n plt.hold(True)\n plt.xlim(lon.min(),lon.max())\n plt.ylim(lat.min(),lat.max())\n fig=plt.gcf()\n return fig", "def plot_points(points, ax=None, style={'marker': 'o', 'color': 'b'}, label=False):\n if ax == None:\n ax = plt.gca()\n for ind, p in enumerate(points):\n ax.plot(p.real, p.imag, **style)\n if label:\n ax.text(p.real, p.imag, s=ind, horizontalalignment='center', verticalalignment='center')\n ax.set_xlim(-1.1, 1.1)\n ax.set_ylim(-1.1, 1.1)", "def userToPlot(x=0,y=0,z=0):\n return dislin.nxposn(x), dislin.nyposn(y), dislin.nzposn(z)", "def plotPoints(x,y):\n display = PacmanPlot(x,y)\n display.takeControl()", "def plot_meas_points(self, ang, run, pos=None):\n dats = self.get_dats(ang, run, pos=pos)\n if pos is not None:\n self.plot_points(dats[1])\n else:\n for _pos, _pts in dats:\n self.plot_points(_pts)", "def plot_sources(azimuth, elevation, distance=1.6):\n ax = Axes3D(plt.figure())\n azimuth = np.deg2rad(azimuth)\n elevation = np.deg2rad(elevation-90)\n x = distance * np.sin(elevation) * np.cos(azimuth)\n y = distance * np.sin(elevation) * np.sin(azimuth)\n z = distance * np.cos(elevation)\n ax.scatter(x, y, z, c='b', marker='.')\n ax.scatter(0, 0, 0, c='r', marker='o')", "def plot_points(self, _pts, color='b', marker='o'):\n xs, ys, zs = _pts[:,0], _pts[:,1], _pts[:,2]\n self.fig_ax.scatter(xs, ys, zs, color=color, marker=marker)\n plt.draw()", "def plotProperty(self, x, y, z = [], idx = None, col = 1, row = 1, N = 1, ax = None,\\\n save = False, dpi = 100, format = \"pdf\", verbose = 1, handle = False,\\\n translation = None, title = None, other = None, ab = [],\\\n m = \"o\", ms = 2, leg = True, ylim = None, xlim = None, xscale = \"linear\",\\\n yscale = \"linear\", **kwargs):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if translation is None: translation = [0]\n if isinstance(translation, (int, np.integer)): translation = [translation]\n \n if type(x) == str: x = [x]\n if type(y) == str: y = [y]\n if type(z) == str: z = [z]\n if len(x) != len(y):\n string = \"Length x (%i) and y (%i) must be the same\" % (len(x), len(y))\n ut.infoPrint(string)\n return\n\n if len(z) > 0 and len(x) != len(z):\n string = \"Length x (%i) and y (%i) and z (%i) must be the same\"\\\n % (len(x), len(y), len(z))\n ut.infoPrint(string)\n return\n\n m = kwargs.pop(\"marker\", m)\n ls = kwargs.pop(\"linestyle\", \"none\")\n ms = kwargs.pop(\"markersize\", ms)\n\n if len(m) == 1: m = m * len(x)\n if isinstance(ab, (int, np.integer)): ab = [ab]\n\n x_data, x_lbl, x_leg = self.getData(idx = idx, var = x, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n y_data, y_lbl, y_leg = self.getData(idx = idx, var = y, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n if len(x_data) != len(y_data): return\n\n if len(z) > 0:\n z_data, z_lbl, z_leg = self.getData(idx = idx, var = z, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n\n if len(x_data) != len(y_data) != len(z_data) or z_data == []: return\n else:\n z_data = None\n\n hP = []\n if not handle:\n hFig = plt.figure()\n hAx = plt.subplot(row, col, N)\n else:\n hAx = ax\n\n if z_data is None:\n\n kwargs.pop(\"vmin\", None)\n kwargs.pop(\"vmax\", None)\n kwargs.pop(\"colormap\", None)\n\n for i in range(len(x_data)):\n\n tP = hAx.plot(x_data[i].T, y_data[i].T, linestyle = ls, marker = m[i],\\\n markersize = ms, **kwargs)\n\n [hP.append(lines) for lines in tP]\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 5: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 5: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n\n else:\n zmin = np.min([np.min(i) for i in z_data])\n zmax = np.max([np.max(i) for i in z_data])\n\n cm = kwargs.pop(\"colormap\", \"plasma\")\n cmap = plt.cm.get_cmap(cm)\n vmin = kwargs.pop(\"vmin\", zmin)\n vmax = kwargs.pop(\"vmax\", zmax)\n c = kwargs.pop(\"color\", 'b')\n lw = kwargs.pop(\"linewidth\", 1.2)\n\n\n for i in range(len(x_data)):\n\n if np.ndim(x_data[i]) == 1: x_data[i] = x_data[i][None, :]\n if np.ndim(y_data[i]) == 1: y_data[i] = y_data[i][None, :]\n if np.ndim(z_data[i]) == 1: z_data[i] = z_data[i][None, :]\n\n if (np.shape(z_data[i]) != np.shape(x_data[i])) and\\\n (np.shape(z_data[i]) != np.shape(y_data[i])) and\\\n (z_data[i].shape[0] != 1):\n string = \"Ambiguous z data %s with x %s and y %s\"\\\n % (np.shape(z_data[i]), np.shape(x_data[i]), np.shape(y_data[i]))\n ut.infoPrint(string)\n return\n \n j,k,l = (0, 0, 0)\n for ii, t in enumerate(translation):\n\n tP = hAx.scatter(x_data[i][j, :], y_data[i][k, :], c = z_data[i][l, :],\\\n vmin = vmin, vmax = vmax, cmap = cmap, marker = m[i],\\\n label = \"\", s = ms, linewidth = lw, **kwargs)\n\n hP.append(tP)\n\n if np.shape(x_data[i])[0] > 1: j += 1\n if np.shape(y_data[i])[0] > 1: k += 1\n if np.shape(z_data[i])[0] > 1: l += 1\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 4: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 4: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n \n if not handle: plt.colorbar(hP[0], label = z_lbl[0])\n\n if ylim is not None:\n hAx.set_ylim(bottom = ylim[0], top = ylim[1])\n if xlim is not None:\n hAx.set_xlim(left = xlim[0], right = xlim[1])\n\n hAx.set_yscale(yscale)\n hAx.set_xscale(xscale)\n hAx.set_xlabel(x_lbl[0])\n hAx.set_ylabel(y_lbl[0])\n if title is None:\n hAx.set_title(self.filename)\n else:\n hAx.set_title(title)\n\n if handle: \n return\n\n \"\"\"Annotating plot marker\"\"\"\n hP[0].set_pickradius(2)\n anP = hAx.plot([], [], marker = 'o', ms = 6, color = 'k', mew = 2, mfc = 'None',\\\n linestyle = 'None')\n\n plt.tight_layout()\n\n \"\"\"Function to allow clickable points to display information\"\"\"\n def click(event):\n if event.inaxes == hAx:\n\n for line in hP:\n cont, ind = line.contains(event)\n if cont:\n break\n\n if cont:\n if z_data is not None:\n x = line.get_offsets()[:, 0]\n y = line.get_offsets()[:, 1]\n else:\n x, y = line.get_data()\n\n xSel = x[ind[\"ind\"]]\n ySel = y[ind[\"ind\"]]\n\n pPos = hAx.transData.transform((xSel, ySel))\n pDist = np.linalg.norm(pPos - [[event.x, event.y]], axis = 1)\n index = ind[\"ind\"][np.argmin(pDist)]\n anP[0].set_data(x[ind[\"ind\"]], y[ind[\"ind\"]])\n for n, i in enumerate(ind[\"ind\"]):\n string = \"Idx: %i (%.4f, %.4f) | Nr Points: %i\"\\\n % (idx[i], x[i], y[i], len(ind[\"ind\"]))\n\n if n == 0: \n print(\"=\" * len(string))\n print(string)\n if n == len(ind[\"ind\"]) - 1: \n print(\"=\" * len(string))\n\n hFig.canvas.draw_idle()\n else:\n anP[0].set_data([], [])\n hFig.canvas.draw_idle()\n\n if save:\n if save is True:\n ut.save_fig(filename = \"PropertyPlot.%s\" % format, format = format,\\\n dpi = dpi, verbose = verbose)\n else:\n ut.save_fig(filename = save, format = format, dpi = dpi,\\\n verbose = verbose)\n plt.close()\n else:\n hFig.canvas.mpl_connect(\"button_release_event\", click)\n plt.show()", "def draw_point(self, pos: Vec3, properties: Properties) -> None:\n raise NotImplementedError", "def plot_xyzt(grbdir,ax, x, y, z, t):\n global runconf\n\t\n colors = ['blue', 'gray', 'red', 'black']\n names = ['X', 'Y', 'Z', grbdir]\n zdirs = ['x', 'y', 'z', None]\n\n mkffile = runconf['mkffile']\n trigtime = runconf['trigtime']\n ra_tran = runconf['ra']\n dec_tran = runconf['dec']\n mkfdata = fits.getdata(mkffile, 1)\n window = 10\n sel = abs(mkfdata['time'] - trigtime) < window\t\n \n earthx = -np.median(mkfdata['posx'][sel])\n earthy = -np.median(mkfdata['posy'][sel]) \n earthz = -np.median(mkfdata['posz'][sel]) \n \n earth_vec_mag = np.sqrt(earthx**2 + earthy**2 + earthz**2)\n \n earth = coo.SkyCoord(earthx, earthy, earthz, frame='icrs', representation='cartesian')\n \t\t\t\n ax.set_xlim(-1.2,1.2)\n ax.set_ylim(-1.2,1.2)\n ax.set_zlim(-1.2,1.2)\n\n for count, dirn in enumerate([x, y, z, t]):\n xx, yy, zz = dirn.cartesian.x.value, dirn.cartesian.y.value, dirn.cartesian.z.value\n ax.quiver(0, 0, 0, xx, yy, zz, color=colors[count])\n ax.text(xx, yy, zz, names[count], zdirs[count])\n\t\n ax.quiver(0,0,0,earthx/earth_vec_mag,earthy/earth_vec_mag,earthz/earth_vec_mag,color='green') \n ax.text(earthx/earth_vec_mag,earthy/earth_vec_mag,earthz/earth_vec_mag,'Earth')\n \n #ax.set_xlabel(\"RA = 0\")\n #ax.set_zlabel(\"Pole\")\n return", "def draw_waypoints(world, waypoints, z=0.5):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.3, life_time=1.0)", "def draw_waypoints(world, waypoints, z=0.01):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.1, life_time=1.0)", "def show_traces(x_data, z_data, fig=100, direction='h', title=None):\n plt.figure(fig)\n plt.clf()\n if direction == 'v' or direction == 'vertical':\n for ii, l in enumerate(z_data.T):\n c = []\n c = plt.cm.jet(float(ii) / z_data.shape[1])\n plt.plot(x_data, l, '', color=c)\n if title is None:\n title = 'Blue: left vertical lines, red: right lines'\n plt.title(title)\n else:\n for ii, l in enumerate(z_data):\n c = []\n c = plt.cm.jet(float(ii) / z_data.shape[0])\n plt.plot(x_data, l, '', color=c)\n if title is None:\n title = 'Blue: top lines, red: bottom lines'\n plt.title(title)\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Signal (a.u.)')", "def point (p, direction: str):\n def wrap (magnitude: int):\n change = changes [direction]\n return (\n p [0] + (change [0] * magnitude),\n p [1] + (change [1] * magnitude),\n )\n return wrap", "def plot_vector(c, color='k', start=0, linestyle='-'):\n return plt.arrow(np.real(start), np.imag(start), np.real(c), np.imag(c),\n linestyle=linestyle, head_width=0.05,\n fc=color, ec=color, overhang=0.3, length_includes_head=True)", "def plot(self):\n #prepare the marker list\n marker = itertools.cycle((',', '+', '.', 'o', '*',\n '^', 'v', '<', '>', '8',\n 's', 'p', 'h', 'H', 'D',\n 'd'))\n # first categorised with plane\n for each_plane in self.plane_list:\n if self.is_literal:\n label = \"[\" + \"{0} {1} {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"]\"\n else:\n label = \"{\"+\"{0}, {1}, {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"}\"\n x_list = []\n y_list = []\n if self.is_literal:\n tmp = [each_plane]\n opposite_plane = [-item for item in each_plane]\n tmp.append(opposite_plane)\n else:\n tmp = PoleFigure.get_permutations(each_plane)\n # second categorised with grain ID\n my_marker = \".\" # default marker\n for i in range(len(self.__data)):\n each_euler = self.__data[i]\n if self.unique_marker:\n my_marker = marker.next()\n plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble\n euler = EulerAngle(each_euler[0], each_euler[1], each_euler[2])\n rot_m = np.dot(self.__ref, euler.rotation_matrix)\n self.__data[i] = RotationMatrix(rot_m).euler_angle\n for each_pole in tmp:\n tmp_pole = np.array(each_pole) / self.lattice_vector\n tmp_pole /= np.linalg.norm(tmp_pole)\n coord = np.dot(rot_m, tmp_pole)\n if coord[2] < 0:\n continue # not pointing up, moving on\n else:\n x = coord[0] / (1.0 + float(coord[2]))\n y = coord[1] / (1.0 + float(coord[2]))\n # need to rotate 90 degree\n x_list.append(y)\n y_list.append(-x)\n # start plotting\n if self.__clr_list is not None:\n clr = self.__clr_list.next()\n else:\n clr = np.random.rand(3, 1)\n plt.scatter(x_list, y_list, marker=my_marker, c=clr, label=label, edgecolor='none')\n # label x/y axis\n plt.text(1.1, 0.0, \"y\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n plt.text(0.0, -1.1, \"x\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n # set legend\n plt.legend(loc='upper left', numpoints=1, ncol=6, fontsize=8, bbox_to_anchor=(0, 0))\n plt.title(self.title)\n plt.savefig(self.title + \".\" + self.output)\n plt.close()", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def mri_point_plot(self, vcol=1):\n img = self.voxels\n points = self.point_position \n ax = []\n fig = plt.figure(figsize=(9, 8))\n # TODO make this setable in the function call\n columns = 3\n rows = 2\n\n for i in range(points.shape[0]):\n im_slice = int(np.round(points[i, vcol]))\n if vcol == 0:\n im = img[im_slice, :, :]\n elif vcol == 1:\n im = img[:, im_slice, :]\n else:\n im = img[:, :, im_slice]\n ax.append( fig.add_subplot(rows, columns, i+1))\n ax[-1].set_title(\"Image depth: \"+str(im_slice)) # set title\n plt.imshow(im)\n plot_cols = np.array([0, 1, 2])\n plot_cols = plot_cols[plot_cols != vcol]\n plt.plot(points[i, min(plot_cols)], points[i, max(plot_cols)], 'ro')\n\n plt.show()", "def SpeedDirPlot(t,u,v,convention='current',units='m s^{-1}',color1='b',color2='r'):\r\n import airsea\r\n \r\n Dir, Spd = airsea.convertUV2SpeedDirn(u,v,convention=convention)\r\n \r\n \r\n ax = range(2)\r\n h = range(2)\r\n fig = plt.gcf()\r\n ax[0] = fig.gca()\r\n \r\n \r\n # Left axes\r\n h[0] = ax[0].fill_between(t, Spd, color=color1,alpha=0.7)\r\n # Make the y-axis label and tick labels match the line color.\r\n ax[0].set_ylabel('Speed [$%s$]'%units, color=color1)\r\n for tl in ax[0].get_yticklabels():\r\n tl.set_color(color1)\r\n\r\n #Right axes\r\n ax[1] = ax[0].twinx() # This sets up the second axes\r\n ax[1].plot(t, Dir, '.',color=color2)\r\n ax[1].set_ylabel(\"Dir'n [$\\circ$]\", color=color2)\r\n ax[1].set_ylim([0,360])\r\n ax[1].set_yticks([0,90,180,270])\r\n ax[1].set_yticklabels(['N','E','S','W'])\r\n for tl in ax[1].get_yticklabels():\r\n tl.set_color(color2)\r\n \r\n plt.setp( ax[0].xaxis.get_majorticklabels(), rotation=17 )\r\n \r\n return ax, h", "def vect_fig():\r\n ax = move_spines()\r\n \r\n ax.set_xlim(-5, 5)\r\n ax.set_ylim(-5, 5)\r\n ax.grid()\r\n vecs = [[2, 4], [-3, 3], [-4, -3.5]] # lista de vectores\r\n for v in vecs:\r\n ax.annotate(\" \", xy=v, xytext=[0, 0],\r\n arrowprops=dict(facecolor=\"blue\",\r\n shrink=0,\r\n alpha=0.7,\r\n width=0.5))\r\n ax.text(1.1 * v[0], 1.1 * v[1], v)", "def xyplane(draw, r, x, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [x,-extent,0],\n [x,extent,0],\n [x,extent,extent*2],\n [x,-extent,extent*2]\n ]\n )\n pln = np.dot(pln,np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))", "def plot(self,\n name: str,\n G_list: list = None,\n V_goal_list: list = None,\n opt_path: list = None):\n colorscales = ['Reds', 'Greens', 'Blues', 'Magentas']\n color = ['red', 'green', 'blue', 'magenta']\n pd = []\n\n if self.d == 3:\n X = []\n Y = []\n Z = []\n if opt_path:\n for i, path in enumerate(opt_path):\n X.clear(), Y.clear(), Z.clear()\n for state in path:\n X += [state[0]]\n Y += [state[1]]\n Z += [state[2]]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, marker=dict(color=color[i], size=5), name='Path_M' + str(i)))\n\n if G_list:\n X.clear(), Y.clear(), Z.clear()\n for G in G_list:\n for e in G.E.values():\n X += [G.V[e.node_a].value[0], G.V[e.node_b].value[0], None]\n Y += [G.V[e.node_a].value[1], G.V[e.node_b].value[1], None]\n Z += [G.V[e.node_a].value[2], G.V[e.node_b].value[2], None]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, mode='lines', showlegend=True,\n line=dict(color='rgb(125,125,125)', width=0.5),\n hoverinfo='none', name='Tree'))\n pd.append(go.Scatter3d(x=[self.start[0]], y=[self.start[1]], z=[self.start[2]],\n mode='markers', marker=dict(color='red', size=5), name='Start'))\n\n if V_goal_list:\n X.clear(), Y.clear(), Z.clear()\n for i, V in enumerate(V_goal_list):\n for j in V:\n X += [G_list[i].V[j].value[0]]\n Y += [G_list[i].V[j].value[1]]\n Z += [G_list[i].V[j].value[2]]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, mode='markers',\n marker=dict(color='magenta', size=5),\n name='Intersection nodes'))\n\n if self.name in ['3d_point_wo_obstacles', '3d_point_w_obstacles']:\n for i, m in enumerate(self.manifolds):\n limits = [self.lim_lo[0], self.lim_up[0], self.lim_lo[1], self.lim_up[1]]\n X_m, Y_m, Z_m = m.draw(limits=limits)\n\n if m.draw_type == \"Scatter\":\n pd.append(go.Scatter3d(x=X_m, y=Y_m, z=Z_m, showlegend=False, mode='markers',\n marker=dict(color=color[i], size=5)))\n elif m.draw_type == \"Surface\":\n pd.append(go.Surface(x=X_m, y=Y_m, z=Z_m, opacity=0.8, showscale=False,\n colorscale=colorscales[i]))\n\n for obs in self.obstacles:\n plot_box(pd=pd, pos=np.array([0., 0., obs[0]]), quat=np.array([0., 0., 0., 1.]), size=np.array(obs[1:]))\n\n fig = go.Figure(data=pd, layout=go.Layout(yaxis=dict(scaleanchor=\"x\", scaleratio=1)))\n plot(fig, filename='plots/task_' + self.name + '_' + name + '.html', auto_open=True)", "def plot_points(points,**kw):\n try:\n fmt = kw.pop('fmt')\n except:\n fmt='k'\n try:\n label = kw.pop('label')\n except:\n label = None\n npts = len(points)\n if npts == 0: return\n xy = num.zeros((npts,2))\n for j in range(npts):\n v = points[j]\n xy[j,0] = v[0]\n xy[j,1] = v[1]\n idx = num.argsort(xy[:,0])\n xy = xy[idx]\n for j in range(len(xy)):\n if j < npts - 1:\n pyplot.plot([0.,xy[j,0]],[0,xy[j,1]],fmt,**kw)\n else:\n pyplot.plot([0.,xy[j,0]],[0,xy[j,1]],fmt,label=label,**kw)", "def plotTimeDepthInteract(d2,d3,v1,v2,v3):\n d = np.array((0.,d2,d3), dtype=float)\n v = np.array((v1,v2,v3), dtype=float)\n plotTimeDepth(d,v)", "def plot_3d_vector(pa):\n\n # Changeable variables\n al = 0.01 # arrow length\n rgba = (0.3, 0.3, 0.3, 0.8) # rgba for panels\n lw = 1.5 # changes thickness of arrow\n\n X, Y, Z, U, V, W = zip(*pa)\n A = np.sqrt(np.power(X, 2) + np.power(Y, 2))\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n q = ax.quiver(X[::peo3], Y[::peo3], Z[::peo3], U[::peo3], V[::peo3], W[::peo3], A,\n length=al, lw=lw)\n q.set_array(np.random.rand(10))\n plt.colorbar(q)\n ax.w_xaxis.set_pane_color(rgba)\n ax.w_yaxis.set_pane_color(rgba)\n ax.w_zaxis.set_pane_color(rgba)\n ax.set_zlabel(\"Height\")\n ax.set_title(r\"$\\mu$-PIV vector plot, %s, %s\" % (shark_species, sample_area))\n\n plt.show()\n return", "def plot_traj(ax,traj,style,color,label,mode, timestep = None):\n x = []\n y = []\n i = 0.0\n # traj = traj +zs [traj[0]]\n for co in traj:\n rotX, rotY, rotZ = quaternion_to_euler(co[4], co[5], co[6], co[7])\n if mode == 'xy':\n x.append(co[0+1])\n y.append(co[1+1])\n if mode == 'xz':\n x.append(co[0+1])\n y.append(co[2+1])\n if mode == 'yz':\n x.append(co[1+1])\n y.append(co[2+1])\n\n if mode == 'rotx':\n x.append(i)\n y.append(rotX)\n if mode == 'roty':\n x.append(i)\n y.append(rotY)\n if mode == 'rotz':\n x.append(i)\n y.append(rotZ)\n\n if mode == 'ax':\n x.append(i)\n y.append(co[1])\n if mode == 'ay':\n x.append(i)\n y.append(co[2])\n if mode == 'az':\n x.append(i)\n y.append(co[3])\n i += timestep\n\n ax.plot(x,y,style,color=color,label=label)", "def path(T = 100, period = 1):\n #source\n x, y, z=source \n\n answer=[[0, 0, 0]]\n X=[x]\n Y=[y]\n Z=[z]\n \n # plotting\n fig=plt.figure()\n ax=Axes3D(fig)\n \n # plotting (axes)\n ax.set_xlabel('XX')\n ax.set_ylabel('YY')\n ax.set_zlabel('ZZ')\n \n \n d=0\n x1, y1, z1=x0, y0, z0\n \n #TODO: do while runs < nps\n #TODO: do while E > e_cutoff\n for t in xrange(T):\n \n # direction cossines\n costheta_x=random.uniform(-1, 1)\n costheta_y=random.uniform(-1, 1)\n costheta_z=random.uniform(-1, 1) \n \n dx=(1/mu)*costheta_x\n dy=(1/mu)*costheta_y\n dz=(1/mu)*costheta_z\n# print dx, mu\n# print dy, mu\n# print dz, mu \n \n \n x+=dx\n y+=dy\n z+=dz\n \n d+=distance(x1, y1, z1, x, y, z)\n \n x1, y1, z1=x, y, z\n \n answer.append([t+1, x, y, z])\n X.append(x)\n Y.append(y)\n Z.append(z) \n\n \n ax.scatter3D(X[1:2], Y[1:2], Z[1:2], c = 'r', marker = 's', linewidths = 1)\n ax.scatter3D(X, Y, Z, c = 'b', marker = 'o', linewidths = 1)\n \n d=distance(x, y, z, x0, y0, z0) \n \n print \"Initial position: \", \"(%.2f,%.2f,%.2f)\"%(X[0], Y[0], Z[0])\n print \"Final position: \", \"(%.2f,%.2f,%.2f)\"%(X[-1], Y[-1], Z[-1])\n print \"N� steps: \", t+1\n print \"Total path length: \", d\n \n \n fig=plt.figure()\n ax=fig.add_subplot(111)\n ax.plot(X[0], Y[0], 's', color = 'g', ms = 9)\n ax.plot(X, Y, '-o', color = 'b', ms = 4)\n ax.plot(X[-1], Y[-1], 's', color = 'r', ms = 9)\n plt.show()\n \n #summary() \n \n \n return answer", "def draw_point(self, p):\n length = 3\n self.set_line_width(0.1)\n self.set_source_rgba(0, 0, 1, 1)\n self.move_to(p.x + length, p.y)\n self.line_to(p.x - length, p.y)\n self.stroke()\n self.move_to(p.x, p.y + length)\n self.line_to(p.x, p.y - length)\n self.stroke()", "def PointDir(point, dir):\n\n point = arg.getvector(point, 3)\n dir = arg.getvector(dir, 3)\n # self.P = B;\n # self.Q = A+B;\n \n return Plucker(np.r_[np.cross(dir, point), dir])", "def point(pt, angle, dist):\n x, y = pt\n return dist * cos(angle) + x, dist * sin(angle) + y,", "def PlotAntsPlane():\n\n # load and shrink airplane\n airplane = vtkInterface.PolyData(planefile)\n airplane.points /= 10\n # pts = airplane.GetNumpyPoints() # gets pointer to array\n # pts /= 10 # shrink\n\n # rotate and translate ant so it is on the plane\n ant = vtkInterface.PolyData(antfile)\n ant.RotateX(90)\n ant.Translate([90, 60, 15])\n\n # Make a copy and add another ant\n ant_copy = ant.Copy()\n ant_copy.Translate([30, 0, -10])\n\n # Create plotting object\n plobj = vtkInterface.PlotClass()\n plobj.AddMesh(ant, 'r')\n plobj.AddMesh(ant_copy, 'b')\n\n # Add airplane mesh and make the color equal to the Y position\n plane_scalars = airplane.points[:, 1]\n plobj.AddMesh(airplane, scalars=plane_scalars, stitle='Plane Y\\nLocation')\n plobj.AddText('Ants and Plane Example')\n plobj.Plot()", "def draw_point(env, point, size=10, color=(0, 1, 0)):\n iktype = orpy.IkParameterizationType.Translation3D\n ikparam = orpy.IkParameterization(point, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, linewidth=size, coloradd=color)\n return h", "def trajectoire(self):\n trajx = []\n trajy = []\n for i in range(0, len(self.pos)):\n trajx.append(self.pos[i].x)\n trajy.append(self.pos[i].y)\n plt.plot(trajx, trajy) # color=self.color)\n plt.show()", "def draw_points():\n\n for node in self._nodes:\n\n x = node_properties[\"node_x\"][node]\n y = node_properties[\"node_y\"][node]\n ax.scatter(\n x,\n y,\n zorder=10,\n edgecolors=\"k\",\n linewidths=0.5,\n **self.get_node_data(node),\n )\n\n for label in self._nodes:\n\n x = node_properties[\"label_x\"][label]\n y = node_properties[\"label_y\"][label]\n rotation = node_properties[\"rotation\"][label]\n ha = node_properties[\"ha\"][label]\n\n attr = {**dict(backgroundcolor=\"white\"), **text_attr}\n ax.text(\n x,\n y,\n textwrap.shorten(text=label, width=TEXTLEN),\n rotation=rotation,\n ha=ha,\n va=\"center\",\n rotation_mode=\"anchor\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n **attr,\n )", "def winddir_scatter(metdat, catinfo, category, vertloc=80, basecolor='red', exclude_angles=[(46, 228)]):\n\n # set up data\n dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)\n varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)\n\n colors = utils.get_nrelcolors()\n \n fig = plt.figure(figsize=(8,2.5))\n ax = fig.add_subplot(111)\n\n ax.scatter(metdat[dircol], metdat[varcol], marker='o',facecolor='w',color='k',lw=0.5,alpha=0.7)\n ax.set_xlim([0,360])\n\n for ii in range(len(exclude_angles)):\n ax.axvspan(exclude_angles[ii][0], exclude_angles[ii][1], alpha=0.1, color=colors[basecolor][0])\n ax.set_title(r'$z={}$ m'.format(vertloc))\n ax.set_xlabel(r'Wind Direction [$^\\circ$]')\n ax.set_ylabel(catinfo['labels'][category])\n \n return fig, ax#, leg", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def move_point(p, direction, d=1):\n direction_guard(direction)\n x, y = p\n dx, dy = directions[direction]\n return (x + dx * d, y + dy * d)", "def translate_point(pt, length, direction):\n if isinstance(direction,float):\n # direction is a float (in radians)\n return (pt[0]+length*np.cos(direction), pt[1]+length*np.sin(direction))\n elif str(direction)==\"NORTH\":\n return (pt[0], pt[1]+length)\n elif str(direction)==\"SOUTH\":\n return (pt[0], pt[1]-length)\n elif str(direction)==\"WEST\":\n return (pt[0]-length, pt[1])\n elif str(direction)==\"EAST\":\n return (pt[0]+length, pt[1])", "def plot_points(L, color):\r\n \r\n X = list()\r\n Y = list()\r\n for p in L:\r\n X.append(p[0])\r\n Y.append(p[1])\r\n plt.scatter(X, Y, c=color)", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts) \n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def vis_points(data,f1,f2):\n if np.isnan(data).any():\n return\n \n plt.scatter(data[:,f1], data[:,f2], alpha=0.2, c='b')\n plt.xlim(lims)\n plt.ylim(lims)", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts)\n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def plot2DSet(desc, lab):\n positif = desc[lab == 1]\n negatif = desc[lab == -1]\n\n plt.scatter(positif[:,0], positif[:,1], marker='o', c='#0000FF')\n plt.scatter(negatif[:,0], negatif[:,1], marker='x', c='#FF0000')\n plt.grid(True)", "def drawPoints(self, points, color):\n for p in points:\n Point\n p.color = color\n p.radius = self.points_radius\n p.conversion = False\n p.show(self.context)", "def translate_direction(self):\n xpart = math.sin(self.direction)\n ypart = math.cos(self.direction)\n if ypart > 0:\n print(\"oben \", end='')\n else:\n print(\"unten \", end='')\n if xpart > 0:\n print(\"rechts\")\n else:\n print(\"links\")", "def plot_line(unit_vect, point, array):\n x_vals = []\n y_vals = []\n z_vals = []\n for i in array:\n x_vals.append(unit_vect[0] * i + point[0])\n y_vals.append(unit_vect[1] * i + point[1])\n z_vals.append(unit_vect[2] * i + point[2])\n\n return [x_vals, y_vals, z_vals]", "def showPoints(surface, points):\n for point in points:\n point.show(surface)", "def plotPoints(img, points, radius = 3, color= (0, 0, 255)):\n\tfor pt in points:\n\t\tdrawCircle(img, pt, radius = radius, color = color)", "def plot_directory_numex(path, vals, param='density', outname=None, show=True,\n xscale=1e-3,yscale=1e2):\n #vals = arange(2300.0, 2800.0, 50.0)\n outdirs = np.sort(os.listdir(path))\n plt.figure()\n\n # Plot surface profiles for each parameter\n for val,outdir in zip(vals,outdirs):\n pointsFile = os.path.join(path, outdir, 'points.h5')\n print(pointsFile)\n x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile, output=True, adjustRadial=True)\n x_fem = x_fem / xscale\n ur_fem = ur_fem / yscale\n uz_fem = uz_fem / yscale\n l, = plt.plot(x_fem,uz_fem,'.-',label=str(val))\n plt.plot(x_fem,ur_fem,'.-',color=l.get_color())\n\n # Annotate\n plt.axhline(color='k') #zero displacement line\n plt.title(param)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n plt.legend()\n\n if outname: plt.savefig(outname)\n if show: plt.show()", "def plotVector(self, prop, graph, vec, rangex=[], rangey=[]):\r\n if (not self.doMPL):\r\n graph('set data style linespoints')\r\n miny = 0; maxy = 0; minx = 0; maxx = 0;\r\n for i in range(0, vec.__len__()):\r\n if (vec[i][0] < minx):\r\n minx = vec[i][0]\r\n elif (vec[i][0] > maxx):\r\n maxx = vec[i][0]\r\n if (vec[i][1] < miny):\r\n miny = vec[i][1]\r\n elif (vec[i][1] > maxy):\r\n maxy = vec[i][1]\r\n if (vec.__len__() == 1):\r\n if (rangex.__len__() == 0):\r\n graph.set_range('xrange', (minx-0.5, maxx+0.5))\r\n else:\r\n graph.set_range('xrange', (rangex[0], rangex[1]))\r\n if (rangey.__len__() == 0):\r\n graph.set_range('yrange', (miny-0.5, maxy+0.5))\r\n else:\r\n graph.set_range('yrange', (rangey[0], rangey[1]))\r\n else:\r\n if (rangex.__len__() == 0):\r\n graph.set_range('xrange', (minx-0.1, maxx+0.1))\r\n else:\r\n graph.set_range('xrange', (rangex[0], rangex[1]))\r\n if (rangey.__len__() == 0):\r\n graph.set_range('yrange', (miny-0.1, maxy+0.1))\r\n else:\r\n graph.set_range('yrange', (rangey[0], rangey[1]))\r\n graph.plot(vec)\r\n else:\r\n figure(graph, (6,4))\r\n xlabel(self.graphLabelsX[graph])\r\n ylabel(self.graphLabelsY[graph])\r\n hh = 0\r\n datax = []\r\n datay = []\r\n while (hh < vec.__len__()):\r\n datax.append(vec[hh][0])\r\n datay.append(vec[hh][1])\r\n hh = hh + 1\r\n plot(datax, datay)\r\n draw()\r\n if ((self.pause != 0) and (prop.myStep % self.pause == 0)):\r\n \t print \"PRESS <RETURN> TO CONTINUE\"\r\n \t raw_input()", "def drawPointCloud(points, ax, color=None):\n if len(points.shape) != 2 or points.shape[0] != 3:\n raise ValueError(\"'points' must be 3xN\")\n if color == None:\n color = __color_cycle.next()\n elif color in (0, 1, 2):\n color = points[color, :]\n ax.scatter(points[0,:].T, points[1,:].T, points[2,:].T, c=color)", "def plot_vor(self,x,ax,tri=False):\n\n L = self.L\n grid_x, grid_y = np.mgrid[-1:2, -1:2]\n grid_x[0, 0], grid_x[1, 1] = grid_x[1, 1], grid_x[0, 0]\n grid_y[0, 0], grid_y[1, 1] = grid_y[1, 1], grid_y[0, 0]\n y = np.vstack([x + np.array([i * L, j * L]) for i, j in np.array([grid_x.ravel(), grid_y.ravel()]).T])\n\n c_types_print = np.tile(self.c_types,9)\n bleed = 0.1\n c_types_print = c_types_print[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n y = y[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n regions, vertices = self.voronoi_finite_polygons_2d(Voronoi(y))\n\n\n ax.set(aspect=1,xlim=(0,self.L),ylim=(0,self.L))\n if type(self.c_types) is list:\n # ax.scatter(x[:, 0], x[:, 1],color=\"grey\",zorder=1000)\n for region in regions:\n polygon = vertices[region]\n plt.fill(*zip(*polygon), alpha=0.4, color=\"grey\")\n\n else:\n cols = self.cols\n if self.plot_scatter is True:\n for j,i in enumerate(np.unique(self.c_types)):\n ax.scatter(x[self.c_types==i, 0], x[self.c_types==i, 1],color=cols[i],zorder=1000)\n patches = []\n for i, region in enumerate(regions):\n patches.append(Polygon(vertices[region], True,facecolor=cols[c_types_print[i]],ec=(1,1,1,1)))\n\n p = PatchCollection(patches, match_original=True)\n # p.set_array(c_types_print)\n ax.add_collection(p)\n if tri is not False:\n for TRI in tri:\n for j in range(3):\n a, b = TRI[j], TRI[np.mod(j + 1, 3)]\n if (a >= 0) and (b >= 0):\n X = np.stack((x[a], x[b])).T\n ax.plot(X[0], X[1], color=\"black\")", "def plotPoints(vec, centers = None, numcolors = 2):\n\n try:\n import matplotlib.pyplot\n from mpl_toolkits.mplot3d import Axes3D\n except:\n raise ImportError, \"matplotlib package not found.\"\n\n markers = ['o', '^', 'x']\n labels = [\"Cluster 1\", \"Cluster 2\", \"Morning Data\", \"Evening Data\"]\n handles = []\n count = 0\n \n fig = matplotlib.pyplot.figure()\n ax1 = fig.add_subplot(111)\n\n for i in range(len(vec)):\n for v in vec[i]:\n col = getColor(i, numcolors, decimal = True)\n col = (0, max(col[1] - 0.2, 0), col[2] - 0.1)\n ax1.scatter(v[1], v[2], color = col, s = 100, marker = markers[v[0]])\n\n matplotlib.pyplot.show()", "def draw_point(turt, pos, count):\r\n turt.goto(pos)\r\n turt.color(\"lawngreen\")\r\n turt.dot(8)\r\n turt.pu()\r\n turt.forward(5)\r\n turt.color(\"HotPink1\")\r\n turt.write(count, True, align=\"left\")\r\n turt.hideturtle()", "def plot_control_points(self, fig, ax, linewidth=1.25, linestyle='-.', color='red', markersize=5, markerstyle='o'):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n Px = np.real(self.P)\n u = np.linspace(0, 1, Px.size)\n line, = ax.plot(u, Px[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n Px, Py = np.real(self.P)\n line, = ax.plot(Px, Py)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n Px, Py, Pz = np.real(self.P)\n line, = ax.plot(Px, Py, Pz)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 2 or 3')\n\n return fig, ax", "def plot_quadrant_scatter(self, skip=None, prefix=''):\n\n return quadrant_scatter( self.map_properties(), skip=skip, prefix=prefix )", "def plot_swim_vel_scatter(self, ax1, ax2, masked=False, transpose_coords=False,\r\n age_scale='log10',\r\n add_cbar=True, **kwargs):\r\n rec_swim = self.rec_swim\r\n nsegments = len(rec_swim)\r\n\r\n swim_u = rec_swim.swim_u\r\n swim_v = rec_swim.swim_v\r\n u_rel,v_rel = self.vel_from_hydro(vel=[swim_u,swim_v], \r\n rec_swim=rec_swim)\r\n #swim_spd = self.rec_swim['swim_spd']\r\n\r\n if nsegments==0:\r\n # happens when a the model output hasn't been extracted for this tag.\r\n print(\"Tag %s has no segments. Skipping plot_swim_vel_scatter\"%self.ID)\r\n return\r\n time_from_entry = rec_swim.tm - rec_swim.tm[0] + 1\r\n if age_scale=='log10':\r\n age_scaled = np.log10(time_from_entry)\r\n elif age_scale=='linear':\r\n age_scaled = time_from_entry\r\n else:\r\n raise Exception(\"Bad age_scale: %s\"%age_scale)\r\n\r\n u,v=u_rel,v_rel\r\n x_label=\"Up $\\leftrightarrow$ Down\"\r\n y_label=\"Left $\\leftrightarrow$ Right\"\r\n if transpose_coords:\r\n u,v=-v,u # this gives left=left\r\n x_label,y_label=y_label,x_label\r\n \r\n scat = ax1.scatter(u, v, marker='o', s=2.0, c=age_scaled)\r\n\r\n ax1.set_xlabel(x_label)\r\n ax1.set_ylabel(y_label)\r\n \r\n fsize = 6\r\n ax1.text(1.01, 0.90,\r\n \"\\n\".join( [ \"n = %d\"%nsegments, \r\n \"avg|u| = %4.2f\"%np.average(np.abs(u_rel)),\r\n \"avg|v| = %4.2f\"%np.average(np.abs(v_rel)) ]),\r\n va='top',transform=ax1.transAxes,\r\n fontsize=fsize)\r\n ax1.set_xlim([-0.5,0.5])\r\n ax1.set_ylim([-0.5,0.5])\r\n ax1.axhline(0, color='k',zorder=0)\r\n ax1.axvline(0, color='k',zorder=0)\r\n ax1.set_aspect('equal')\r\n clims = [ticks[0],ticks[-1]]\r\n scat.set_clim(clims)\r\n if add_cbar:\r\n cax = plt.gcf().add_axes([0.41,0.12,0.16,0.025]) # hardwired\r\n label = 'Time (seconds)'\r\n c1 = plt.gcf().colorbar(scat, cax=cax, orientation='horizontal')\r\n #c1 = plt.gcf().colorbar(scat)\r\n c1.set_label(label)\r\n c1.set_ticks(ticks)\r\n c1.set_ticklabels(tick_labels)\r\n # smoothed speed over ground\r\n us = rec_swim.us\r\n vs = rec_swim.vs\r\n uh,vh = self.vel_from_hydro(vel=[us,vs], rec_swim=rec_swim)\r\n u,v=uh,vh\r\n \r\n if transpose_coords:\r\n u,v=-v,u\r\n \r\n scat2 = ax2.scatter(u, v, marker='o', s=2.0, c=age_scaled)\r\n ax2.set_xlabel(x_label) \r\n ax2.set_ylabel(y_label)\r\n ax2.text(1.01, 0.90,\r\n \"\\n\".join( [\"n = %d\"%nsegments,\r\n \"avg|u| = %4.2f\"%np.average(np.abs(uh)),\r\n \"avg|v| = %4.2f\"%np.average(np.abs(vh))]),\r\n transform=ax2.transAxes,\r\n va='top',\r\n fontsize=fsize)\r\n ax2.axhline(0, color='k',zorder=0)\r\n ax2.axvline(0, color='k',zorder=0)\r\n ax2.set_xlim([-1,1])\r\n ax2.set_ylim([-1,1])\r\n ax2.set_aspect('equal')\r\n scat2.set_clim(clims)\r\n\r\n return", "def draw(self):\n pt = self.getPoint() # Centre of prism\n\n # Form top,left,right corners\n top = Vector2d(pt.z, pt.y + self.height/2)\n d = self.height*math.tan(self.angle/2)\n left = Vector2d(pt.z - d , pt.y - self.height/2)\n right = Vector2d(pt.z + d, pt.y - self.height/2)\n\n\n top.rotate(self.tilt)\n left.rotate(self.tilt)\n right.rotate(self.tilt)\n\n # Plot them out with plt.plot\n plot([top[0],left[0],right[0],top[0]],[top[1],left[1],right[1],top[1]],\"k\",lw=2.0)", "def plot(self, values=None):\r\n plt.cla()\r\n plt.xlim([0, self.pond_size[0]])\r\n plt.ylim([0, self.pond_size[1]])\r\n plt.xticks(np.arange(self.pond_size[0]), [])\r\n for i in range(self.pond_size[0]):\r\n plt.text(i+0.4, -0.5, str(i))\r\n plt.yticks(np.arange(self.pond_size[1]), [])\r\n for i in range(self.pond_size[1]):\r\n plt.text(-0.5, i+0.4, str(i))\r\n\r\n # Draw the trajectory\r\n t_x = np.array([t[0] for t in self.trajectory])\r\n t_y = np.array([t[1] for t in self.trajectory])\r\n plt.plot(t_x+0.5, t_y+0.5, 'r-o')\r\n\r\n # Draw currents and values\r\n for x in range(self.pond_size[0]):\r\n for y in range(self.pond_size[1]):\r\n if values is not None:\r\n plt.text(x, y, '%.1f'%values[y, x])\r\n c = self.currents[y][x]\r\n assert len(c)==4\r\n for i in range(4):\r\n if c[i] != '0':\r\n head_size = 0.15 if c[i] == '1' else 0.3\r\n d = self.current_directions[i]\r\n plt.arrow(x+0.5-0.4*d[0], y+0.5-0.4*d[1], (0.8-head_size)*d[0], (0.8-head_size)*d[1],\r\n head_width=head_size, head_length=head_size, overhang=1.0)\r\n\r\n # Draw start and end states\r\n plt.gcf().gca().add_artist(plt.Circle((self.start_state[0]+0.5, self.start_state[1]+0.5), 0.4, color='r', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.end_state[0]+0.5, self.end_state[1]+0.5), 0.4, color='g', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.current_state[0]+0.5, self.current_state[1]+0.5), 0.25, color='b', alpha=0.5))\r\n plt.grid(True)\r\n plt.pause(0.2)", "def plotScatter(verts, data, coords=(1,2), comp=2):\n z = data[:,:,comp].flatten()\n x = verts[:,coords[0]]\n y = verts[:,coords[1]]\n\n # NOTE: either scatter or pcolor should work\n plt.figure()\n compDict = {0:'X',1:'Y',2:'Z'}\n #plt.gca().set_aspect('equal')\n plt.scatter(x, y, c=z, s=80, cmap=plt.cm.bwr)\n plt.title( compDict[comp] + ' Displacement' )\n plt.xlabel(compDict[coords[0]] + ' Distance [m]')\n plt.ylabel(compDict[coords[1]] + ' Distance [m]')\n cb = plt.colorbar()\n cb.set_label('[m]')", "def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]", "def plotTrajectory(arg, color = sf.cyan, xyRate=True, radiusRate = 80.0\n , blAxes = True):\n if not(hasattr(arg, '__getitem__')) and hasattr(arg, '__iter__'):\n arg = list(arg)\n\n vs = sf.vs_()\n\n color = tuple(color) # color argment may be list/vector\n if isinstance(arg,list) or isinstance(arg,tuple) or isinstance(\n arg,type(sf.sc.array([0,]))):\n from octnOp import ClOctonion\n if not(hasattr(arg[0],'__len__')) and isinstance(arg[0], complex):\n arg = [ (x.real, x.imag) for x in arg]\n elif not(hasattr(arg[0],'__len__')) and isinstance(arg[0], ClOctonion):\n arg = [ x[1:4] for x in arg]\n\n if len(arg[0])==2:\n import visual.graph as vg\n global __obj2dDisplayGeneratedStt\n\n maxX = max([abs(elm[0]) for elm in arg])\n maxY = max([abs(elm[1]) for elm in arg])\n\n print \"maxX:\",maxX, \" maxY:\",maxY\n\n if (__obj2dDisplayGeneratedStt == None):\n if xyRate == True: # 11.01.16 to \n maxAt = max(maxX, maxY)\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600*maxX/maxAt,height=600*maxY/maxAt)\n else:\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600,height=600)\n #__bl2dDisplayGeneratedStt = True\n grphAt = vg.gcurve(color = color)\n for i in range(len(arg)):\n assert len(arg[i])==2, \"unexpeted length data:\"+str(arg[i])\n grphAt.plot(pos = arg[i])\n\n #return __obj2dDisplayGeneratedStt\n #import pdb; pdb.set_trace()\n #print \"debug:\",grphAt.gcurve.pos\n\n # plot start mark\n grphSqAt = vg.gcurve(color = color)\n pos0At = grphAt.gcurve.pos[0,:][:2]\n rateAt = 50\n for x,y in sf.mitr([-maxX/rateAt, maxX/rateAt]\n , [-maxY/rateAt, maxY/rateAt]):\n grphSqAt.plot(pos = pos0At+[x,y])\n \n grphSqAt.plot(pos = pos0At+[-maxX/rateAt,-maxY/rateAt])\n\n return grphAt # 09.02.04 to animate graph\n elif len(arg[0])==3:\n vs.scene.forward=(-1,+1,-1)\n vs.scene.up=(0,0,1)\n\n c = vs.curve( color = color )\n\n maxX, maxY, maxZ = 0,0,0\n for i in range(len(arg)):\n if maxX < abs(arg[i][0]):\n maxX = abs(arg[i][0])\n if maxY < abs(arg[i][1]):\n maxY = abs(arg[i][1])\n if maxZ < abs(arg[i][2]):\n maxZ = abs(arg[i][2])\n c.append( arg[i] )\n #print c.pos\n print \"maxX:\",maxX, \" maxY:\",maxY, \" maxZ:\",maxZ\n maxAt = max(maxX,maxY,maxZ)\n c.radius = maxAt/radiusRate\n\n vs.sphere(pos = arg[0], radius = 3*c.radius, color = color)\n\n if blAxes == True:\n # draw axise\n vs.curve( pos=[(0,0,0), (maxAt,0,0)]\n , color=(1,0,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,maxAt,0)]\n , color=(0,1,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,0,maxAt)]\n , color=(0,1,1)\n , radius = maxAt/100 )\n #return vs.scene\n return c # 09.02.04 to animate graph\n else:\n assert False,\"unexpeted data:\"+str(arg)", "def PlotAnt():\n ant = vtkInterface.PolyData(antfile)\n ant.Plot(color='r', style='wireframe')", "def linecut_points( **kwargs ):\n npoints = kwargs.get('npoints', 320)\n extents = kwargs.get('extents',None)\n lims = kwargs.get('lims', (-80.,80.))\n direc = kwargs.get('direc', (np.pi/2, 0.))\n origin = kwargs.get('origin', vec3(0.,0.,0.))\n\n if extents is not None:\n lims = (-extents, extents)\n\n # Prepare set of points for plot \n t = np.linspace( lims[0], lims[1], npoints )\n unit = vec3()\n th = direc[0]\n ph = direc[1] \n unit.set_spherical(1, th, ph) \n # Convert vec3s to ndarray\n unit = np.array(unit)\n origin = np.array(origin) \n #\n XYZ = origin + np.outer(t, unit)\n X = XYZ[:,0]\n Y = XYZ[:,1]\n Z = XYZ[:,2]\n \n return t, X, Y, Z, lims", "def DrawDottedLine(self, dc, point, length, vertical):\r\n\r\n for i in xrange(0, length, 2):\r\n dc.DrawPoint(point.x, point.y)\r\n if vertical:\r\n point.y += 2\r\n else:\r\n point.x += 2", "def drawVector3D(x0,y0,z0,x1,y1,z1, vtype='normal'):\n dislin.vectr3(x0,y0,z0,x1,y1,z1, vectordict[vtype])", "def _plot(self, **kwargs):\n XY = self.positions\n plt.plot(XY[0,:], XY[1,:], 'o')\n plt.gca().set_aspect('equal')\n SS = np.abs(self.S)\n SS /= SS.max()\n\n for i in range(self.N):\n for j in range(self.N):\n if i == j or SS[i,j] < 1e-2:\n continue\n clr = 'r' if self.S[i,j]<0 else 'b'\n x, y = XY[:,i]\n r = XY[:,j] - XY[:,i]\n dx, dy = r\n rhat = r / np.sqrt((r**2).sum())\n ofsx, ofsy = 0.03 * rhat\n perpx, perpy = 0.005 * np.array([-rhat[1], rhat[0]])\n plt.arrow(x + ofsx + perpx, y + ofsy + perpy,\n r[0] - 2*ofsx, r[1] - 2*ofsy, color=clr,\n shape='right', width=0.01*SS[i,j],\n length_includes_head=True, head_width=0.02,\n linewidth=0, **kwargs)", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()", "def plot_desired_velocity(self,id=1,dpi=150):\n fig = plt.figure(id)\n ax1 = fig.add_subplot(111)\n ax1.imshow(self.image,interpolation='nearest',\n extent=[self.xmin,self.xmax,self.ymin,self.ymax], origin='lower')\n ax1.imshow(self.door_distance,interpolation='nearest',\n extent=[self.xmin,self.xmax,self.ymin,self.ymax],alpha=0.7,\n origin='lower')\n step = 10\n ax1.quiver(self.X[::step, ::step],self.Y[::step, ::step],\n self.desired_velocity_X[::step, ::step],\n self.desired_velocity_Y[::step, ::step])\n #plt.savefig('.png',dpi=dpi)\n plt.draw()", "def plot_positives(positives):\n plt.scatter(positives[:,0], positives[:,1], label='Goal examples', marker='*', color='g', s=200)", "def plot_scatter_points(self):\n self.plot(1)", "def plot_scalp(v, channel):\n\n channelpos = [tts.channels[c] for c in channel]\n points = [calculate_stereographic_projection(i) for i in channelpos]\n x = [i[0] for i in points]\n y = [i[1] for i in points]\n z = v\n X, Y, Z = interpolate_2d(x, y, z)\n plt.contour(X, Y, Z, 20)\n plt.contourf(X, Y, Z, 20)\n #plt.clabel(im)\n plt.colorbar()\n plt.gca().add_artist(plt.Circle((0, 0), radius=1, linewidth=3, fill=False))\n plt.plot(x, y, 'bo')\n for i in zip(channel, zip(x,y)):\n plt.annotate(i[0], i[1])", "def vector_plot(tvects,is_vect=True,orig=[0,0,0]):\n\n if is_vect:\n if not hasattr(orig[0],\"__iter__\"):\n coords = [[orig,np.sum([orig,v],axis=0)] for v in tvects]\n else:\n coords = [[o,np.sum([o,v],axis=0)] for o,v in zip(orig,tvects)]\n else:\n coords = tvects\n\n data = []\n for i,c in enumerate(coords):\n X1, Y1, Z1 = zip(c[0])\n X2, Y2, Z2 = zip(c[1])\n vector = go.Scatter3d(x = [X1[0],X2[0]],\n y = [Y1[0],Y2[0]],\n z = [Z1[0],Z2[0]],\n marker = dict(size = [0,5],\n color = ['blue'],\n line=dict(width=5,\n color='DarkSlateGrey')),\n name = 'Vector'+str(i+1))\n data.append(vector)\n\n layout = go.Layout(\n margin = dict(l = 4,\n r = 4,\n b = 4,\n t = 4)\n )\n fig = go.Figure(data=data,layout=layout)\n #pio.write_html(fig,file='index.html',auto_open=False)\n #py.plot(fig, filename = 'gdp_per_cap4', auto_open=True)\n fig.show()", "def rotatePoint(self, point, dir=+1):\n pnew = np.zeros([len(point), point.shape[1], 2])\n pnew[:, :, 0] = point[:, :, 0]*self.ctheta + point[:, :, 1]*self.stheta*dir\n pnew[:, :, 1] = -point[:, :, 0] * \\\n self.stheta*dir + point[:, :, 1]*self.ctheta\n return pnew", "def plot_dist_ony(z, dz, om, dom, dist, dh, name, mathname, filename=None):\n\n\n dist = dist/dh\n z = z * numpy.ones(dist.shape)\n om = om * numpy.ones(dist.shape)\n\n pylab.figure(figsize=(5.5,4.5)) \n\n\n pylab.contour(z, dist, om, 50)\n cb = pylab.colorbar()\n cb.ax.set_ylabel(r'$\\Omega_M = 1 - \\Omega_\\lambda$')\n \n pylab.xlim(z.min(), z.max())\n pylab.ylim(dist.min(), dist.max()) \n pylab.xlabel(\"redshift z\")\n pylab.ylabel(name + r': $'+mathname+'/D_H$')\n pylab.title(name)\n if filename is not None:\n prefix, extension = filename.split('.')\n pylab.savefig(prefix + '_' + mathname + '_ony.' + extension,\n bbox_inches=\"tight\")", "def plot_iter(V, Pi, params):\n n_rows = params['n_rows']\n n_cols = params['n_cols'] \n occ_grid = params['occ_grid']\n R = params['R']\n\n goal = params['goal']\n sink = params['sink']\n\n actions = ['left','right','up','down']\n\n fig1 = plt.figure(1, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if occ_grid[row, col] == 1:\n plt.text(col, n_rows - 1 - row, '0.0', color='k', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n else:\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(V[row, col]), \n color='b', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n\n fig2 = plt.figure(2, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if not Pi[row, col] == -1:\n plt.text(col, n_rows - 1 - row, actions[Pi[row, col]], \n color='k', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n fig1.canvas.draw()\n fig1.canvas.flush_events()\n fig2.canvas.draw()\n fig2.canvas.flush_events()", "def plot_3d(trj: TrajaDataFrame, **kwargs) -> matplotlib.collections.PathCollection:\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_xlabel(\"x\", fontsize=15)\n ax.set_zlabel(\"time\", fontsize=15)\n ax.set_ylabel(\"y\", fontsize=15)\n title = kwargs.pop(\"title\", \"Trajectory\")\n ax.set_title(f\"{title}\", fontsize=20)\n ax.plot(trj.x, trj.y, trj.index)\n cmap = kwargs.pop(\"cmap\", \"winter\")\n cm = plt.get_cmap(cmap)\n NPOINTS = len(trj)\n ax.set_prop_cycle(color=[cm(1.0 * i / (NPOINTS - 1)) for i in range(NPOINTS - 1)])\n for i in range(NPOINTS - 1):\n ax.plot(trj.x[i : i + 2], trj.y[i : i + 2], trj.index[i : i + 2])\n\n dist = kwargs.pop(\"dist\", None)\n if dist:\n ax.dist = dist\n labelpad = kwargs.pop(\"labelpad\", None)\n if labelpad:\n from matplotlib import rcParams\n\n rcParams[\"axes.labelpad\"] = labelpad\n\n return ax", "def plot_phase(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,np.angle(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "def scatter(self, points):\n assert points.shape[0] == 3\n self.points = points\n self.remove_scatter_plot()\n x, y, z = zip(*points.T)\n self.base_fig.add_trace(go.Scatter3d(\n x=x, y=y, z=z,\n mode='markers',\n marker=dict(\n size=2,\n color=np.zeros(points.shape[1]),\n colorscale=\"RdBu\",\n colorbar=dict(\n title=\"Samples\",\n x=-0.2\n ),\n )\n ))", "def plot_pointing(self, *args, coord=\"tl\", **kwargs):\n # TODO: Generalize that function\n warnings.warn(\"Deprecated function needs update.\", DeprecationWarning)\n self.__check_attributes([\"F_{}_az\".format(coord), \" F_{}_el\".format(coord)])\n return kids_plots.checkPointing(self, *args, **kwargs)", "def plot_points(x, y):\n x_one = x[y == -1, :]\n x_two = x[y == 1, :]\n\n plt.scatter(x_one[:, 1], x_one[:, 2], marker='x', color='red')\n plt.scatter(x_two[:, 1], x_two[:, 2], marker='o', color='blue')", "def plot_initial_pos(pos, p_init):\n \n\tplt.figure(figsize=(8,8))\n\tfor posi in pos:\n\t\tplt.scatter(posi[0], posi[1])\n\t\n\tplt.xlabel('a', fontsize=16)\n\tplt.ylabel('b', fontsize=16)\n\tplt.xlim(p_init[0]-0.0005,p_init[0]+0.0005)\n\tplt.ylim(p_init[1]-0.0005,p_init[1]+0.0005)\n\tplt.show()", "def visualizeTrajectory(y, g):\n visualizeObs()\n x = np.linspace(-1.5, 1.5, 13)[1:-1]\n plt.plot(np.concatenate(([-1.5],x,[1.5])), np.concatenate(([0],y,[0])), color='black', marker='+')\n if g is not None:\n for i in range(y.size):\n plt.arrow(x[i], y[i], 0, -0.5*g[i], color='blue', head_width=0.05)", "def plot3D(self, diaphragmpoints=None, lungpoints=None, fig=None, ax=None, diaphragmcolor='r', lungcolor='g', size=2, howplot=0, dots=0):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n if diaphragmpoints is not None and lungpoints is not None:\n points = diaphragmpoints + lungpoints\n elif diaphragmpoints is not None:\n points = diaphragmpoints\n elif lungpoints is not None:\n points = lungpoints\n\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(points)):\n xpts.append(points[i][0])\n ypts.append(points[i][1])\n zpts.append(points[i][2])\n\n X = np.asarray(xpts)\n Y = np.asarray(ypts)\n Z = np.asarray(zpts)\n\n if howplot == 'wireframe':\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(pts)):\n xpts.append(pts[i][0])\n ypts.append(pts[i][1])\n zpts.append(pts[i][2])\n\n X = np.asarray([xpts])\n Y = np.asarray([ypts])\n Z = np.asarray([zpts])\n\n if dots == 1:\n ax.scatter(X, Y, Z, s=size, c='r', marker='o')\n\n ax.plot_wireframe(X, Y, Z)\n elif howplot == 1:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n else:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n ax.plot_trisurf(X, Y, Z, linewidth=0.2, antialiased=True)\n\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())\n\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\n plt.show()\n # fig.savefig('{}/diaphragm/{}.png'.format(DIR_RESULT))", "def scatter3d(self, x, y, z, filename=None, spot_cols=None, label=False, stem=False, \n label_font_size=6, rotation=134, elevation=48, interactive=False, squish_scales=False, \n spot_size=40, **kargs):\n assert filename, \"scatter(): Must provide a filename\" \n \n xdata = self.__v[x-1]\n ydata = self.__v[y-1]\n zdata = self.__v[z-1]\n \n fig = self.__draw.getfigure(**kargs)\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elevation, azim=rotation)\n \n cols = self.cols\n if spot_cols:\n cols = spot_cols \n \n ax.scatter(xdata, ydata, zdata, edgecolors=\"none\", c=cols, s=spot_size)\n if label:\n for i, lab in enumerate(self.labels):\n ax.text(xdata[i], ydata[i], zdata[i], lab, size=label_font_size, ha=\"center\", va=\"bottom\")\n \n if stem: # stem must go after scatter for sorting. Actually, not true right? matplotlib uses zorder for that...\n z_min = min(zdata)\n for x_, y_, z_ in zip(xdata, ydata, zdata): \n line = art3d.Line3D(*list(zip((x_, y_, z_min), (x_, y_, z_))), marker=None, c=\"grey\", alpha=0.1)\n ax.add_line(line)\n \n ax.set_xlabel(\"PC%s\" % (x,)) # can be overridden via do_common_args()\n ax.set_ylabel(\"PC%s\" % (y,))\n ax.set_zlabel(\"PC%s\" % (z,))\n \n if \"logx\" in kargs and kargs[\"logx\"]:\n ax.set_xscale(\"log\", basex=kargs[\"logx\"])\n if \"logy\" in kargs and kargs[\"logy\"]:\n ax.set_yscale(\"log\", basey=kargs[\"logy\"])\n \n if squish_scales: \n # Don't worry about kargs, do_common_args will overwrite.\n ax.set_xlim([min(xdata), max(xdata)])\n ax.set_ylim([min(ydata), max(ydata)])\n ax.set_zlim([min(zdata), max(zdata)])\n \n self.__draw.do_common_args(ax, **kargs)\n if \"zlims\" in kargs:\n ax.set_zlim([kargs[\"zlim\"][0], kargs[\"zlim\"][1]])\n \n if interactive:\n fig.show() # hope you are not on a cluster!\n \n real_filename = self.__draw.savefigure(fig, filename)\n \n config.log.info(\"scatter3d(): Saved 'PC%s' vs 'PC%s' vs 'PC%s' scatter to '%s'\" % (x, y, z, real_filename))", "def phase_space_l(data, save=False):\n x, y, z, px, py, pz, t = data\n\n fig, ax = plt.subplots(1, 1, figsize=(5, 4))\n\n dz = z - np.mean(z)\n dp = pz / np.mean(pz) - 1\n colors = _cal_color(np.array([dz, dp]), True)\n\n ax.set(xlabel='dz (mm)', ylabel='dpz/pz')\n ax.scatter(dz, dp, c=colors, marker='+', label='phase space z',\n vmin=0, vmax=1, cmap='Reds')\n\n ax.axhline(0, color='k', ls=':', lw=0.5)\n ax.axvline(0, color='k', ls=':', lw=0.5)\n\n leg = ax.legend(loc=0)\n leg.legendHandles[0].set_color('r')\n x0, x1 = ax.get_xlim()\n y0, y1 = ax.get_ylim()\n ax.set_aspect((x1 - x0) / (y1 - y0))\n\n fig.tight_layout()\n\n if save:\n fig.savefig('phase_space_l.pdf', bbox_inches='tight')\n plt.show()", "def plot_1D_edp(self, start=(-10,25), end=(30,-20), N=100):\n rho = []\n x0, z0 = start\n x1, z1 = end\n xpoints = np.linspace(x0, x1, N)\n zpoints = np.linspace(z0, z1, N)\n for x, z in zip(xpoints, zpoints):\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n dist = np.sqrt((x-x0)**2 + (z-z0)**2)\n rho.append([dist, tmp.sum(axis=0)])\n rho = np.array(rho, float)\n X = rho[:,0]\n Y = rho[:,1]\n plt.figure()\n plt.plot(X, Y)", "def plot_points_simple(ax, points, paths=[], path_labels=[]):\n cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n \n ax.scatter(*points, alpha=0.1, c='k')\n# add functionality to ignore labels\n for path, label, col in zip(paths, path_labels, cols):\n path_points = np.array([points[:, u] for u in path]).transpose()\n ax.plot(*path_points, alpha=.8,label=label, c=col)\n ax.scatter(*path_points, c=col, alpha=0.6)\n \n ax.set(xlim=[0, 1], ylim=[0, 1])\n ax.set_axis_off()\n \n if path_labels:\n ax.legend()\n\n return ax", "def plot_distances(self, params=None, **kw):\n from ...ipyutils import plot_data\n if params is None:\n params = self.collocation_points()\n if 'distances' in kw:\n distances = kw.pop('distances')\n else:\n distances = self.point_distances(params=params)\n return plot_data(\n params[:-1], distances,\n **insert_missing(\n kw,\n figsize=(4, 2.4), dpi=110,\n ylog=True,\n xlabel=r\"curve parameter $\\lambda$\",\n ylabel=r\"coordinate distance\",\n )\n )", "def _get_visual_position(self, point: int) -> float:\n return point / self._param[\"n_v\"] + np.random.uniform() / \\\n self._param[\"n_v\"]", "def draw_pointcloud(ax, example):\n points = example['points'].cpu().detach().numpy()\n points_num = len(points)\n xs = np.empty([points_num])\n ys = np.empty([points_num])\n zs = np.empty([points_num])\n intensity = np.empty([len(points)])\n for j, point in enumerate(points):\n xs[j] = point[1]\n ys[j] = point[2]\n zs[j] = point[3]\n intensity[j] = point[4]\n\n intensity = intensity\n ax.scatter3D(xs, ys, zs, c=intensity, marker='.', s=0.3, cmap=plt.get_cmap('jet'))", "def printXY(self):\n print zip(self.x, self.y)", "def plot_activity(opts, points, activity, labels, plot_state=False):\n sort_ix = sort_weights(opts)\n activity[:,opts.state_size:] = activity[:,opts.state_size+sort_ix]\n\n x = np.arange(0, opts.state_size)\n # x = np.linspace(np.amin(points[:, 0]), np.amax(points[:, 0]))\n scale = 2 * np.pi / opts.state_size\n x_rad = x * scale\n cos, sin = np.cos(x_rad), np.sin(x_rad)\n if opts.velocity:\n y = np.linspace(np.amin(points[:, 1]), np.amax(points[:, 1]))\n else:\n y = np.zeros(1)\n\n x_mesh, y_mesh = np.meshgrid(x, y)\n cos, _ = np.meshgrid(cos, y)\n sin, _ = np.meshgrid(sin, y)\n if plot_state:\n nc, nr = 5, 4\n neurons = np.arange(opts.state_size) # state neurons\n else:\n nc, nr = 5, 8\n neurons = np.arange(opts.state_size, opts.rnn_size) # extra neurons\n\n\n f_linear, ax_linear = plt.subplots(ncols=nc, nrows=nr)\n # plt.suptitle('Linear Interpolated Data')\n\n c, r = 0, 0\n for i, n in enumerate(neurons):\n z_lin = griddata(points[:, :2], activity[:, n], (x_mesh, y_mesh), method='linear')\n plt.sca(ax_linear[r, c])\n # plt.title('Neuron {}'.format(n))\n plt.contourf(x, y, z_lin, cmap='RdBu_r')\n plt.axis('off')\n\n # find the global centroid\n if np.nanmax(z_lin) <= 0:\n z_lin -= np.nanmean(z_lin) # center activations at the median\n\n z_lin[np.isnan(z_lin)] = 0\n z_lin[z_lin < 0] = 0\n norm = np.sum(z_lin)\n\n cos_mean = np.sum(cos * z_lin) / norm\n sin_mean = np.sum(sin * z_lin) / norm\n com_rad = np.arctan2(sin_mean, cos_mean)\n com_x = (com_rad / scale) % 20\n com_y = np.sum(y_mesh * z_lin) / norm\n # plt.scatter(com_x, com_y, c='k')\n\n c += 1\n if c == nc:\n c = 0\n r += 1\n if r == nr:\n break\n # plt.tight_layout()\n plt.show()" ]
[ "0.6262929", "0.6006915", "0.57216173", "0.5702731", "0.56610227", "0.5660082", "0.5647964", "0.5641175", "0.56411433", "0.5629691", "0.56232494", "0.5620824", "0.5591335", "0.5566767", "0.5546428", "0.5539211", "0.553336", "0.553327", "0.552634", "0.5522603", "0.55138886", "0.5501403", "0.5495351", "0.54736555", "0.5454024", "0.5442609", "0.543557", "0.5407632", "0.54051584", "0.540337", "0.53776664", "0.53497636", "0.5348198", "0.53467304", "0.53459555", "0.53447896", "0.53434616", "0.5337344", "0.5331579", "0.53253883", "0.5307427", "0.5302427", "0.52976036", "0.5284044", "0.52747655", "0.52670056", "0.5262701", "0.52556795", "0.5252648", "0.52473724", "0.52464116", "0.52434146", "0.5227192", "0.52190244", "0.52167547", "0.521179", "0.52097803", "0.5209678", "0.5199627", "0.51938874", "0.5192662", "0.5187322", "0.5184433", "0.5181122", "0.51802444", "0.5177626", "0.5174483", "0.5171218", "0.5166514", "0.5162914", "0.51623327", "0.515842", "0.51538", "0.5152469", "0.5145516", "0.5140161", "0.5131913", "0.5131049", "0.51302564", "0.5127205", "0.5124044", "0.5117432", "0.51160645", "0.51104224", "0.51074386", "0.5103264", "0.5103101", "0.51016724", "0.50976574", "0.50925153", "0.5086083", "0.5085028", "0.50830424", "0.5077436", "0.50735277", "0.5064551", "0.50634", "0.5059962", "0.5056582", "0.5055851" ]
0.605601
1
Plot values along direction dir={0,1,2}, through point pt=[x,y,z]
def plot_patch_values_along(pp_in,pt=[0.5,0.5,0.5],hold=False,**kw): kv = {'dir':0, 'verbose':0, 'all':False, 'iv':0, 'i4':0, 'var':None} kw,kv = _kw_extract(kw,kv) pp = ds.patches_along(pp_in,pt,dir=kv['dir'],verbose=kv['verbose']) xmin,xmax = ds.minmax_patches(pp,dir=kv['dir']) if not hold: pl.xlim(xmin,xmax) for p in pp: plot(ds.values_in(p,pt,**kv),**kw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_point(axis, pnt3d, color):\n xy = bc2xy(pnt3d)\n axis.scatter(xy[0], xy[1], c=color, marker='o')", "def plot_values_along(pp,pt=[0.5,0.5,0.5],**kw):\n kv = {'dir':0, 'verbose':0, 'all':False, 'iv':0, 'i4':0, 'var':None}\n kw,kv = _kw_extract(kw,kv)\n plot(ds.values_along(pp,pt,iv=kv['iv'],dir=kv['dir'],all=kv['all']),**kw)", "def plot_direction(ax, phi_deg, psi_deg, \n color='black', marker='d', markersize=3,\n label=None, label_position='right', weight='normal'):\n phi_rad = phi_deg*np.pi/180 + 0.001\n psi_stereo = 2*np.tan(psi_deg*np.pi/180 /2) # Stereographic projection\n psi_stereo_annotate = psi_stereo if psi_stereo > 0.1 else 0.1 # Bug... ?\n\n if label:\n va = 'center' if label_position == 'right' else 'baseline'\n ha = 'center' if label_position == 'center' else 'left'\n \n ax.annotate(label, (phi_rad, psi_stereo_annotate),\n textcoords='offset points', xytext=(0, 5),\n rotation=0, alpha=0.9, color=color, family='sans-serif',\n horizontalalignment=ha, va=va, weight=weight)\n\n ax.plot(phi_rad, psi_stereo, marker, color=color, markersize=markersize)", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def plot(X, colors=None, l=1, origin=None, ax=None, linestyle='-'):\n if origin is not None: pts = origin\n else: pts = np.array([[0, 0, 1], [l, 0, 1], [0, l, 1]]) # 3 x 3\n if ax is None: ax = plt.gca()\n # x = inv(X).dot(pts.T).T\n x = (X.dot(pts.T)).T\n\n # x = X.dot(pts.T).T\n\n if colors is None:\n white = np.array([1, 1, 1])\n alpha = 0.3\n red = alpha * np.array([1, 0, 0]) + (1-alpha) * white\n blue = alpha * np.array([0, 0, 1]) + (1-alpha) * white\n red = np.maximum([0, 0, 0], np.minimum(red, [1,1,1]))\n blue = np.maximum([0, 0, 0], np.minimum(blue, [1,1,1]))\n colors = np.stack((red,blue))\n\n ax.arrow(x[0,0], x[0,1], x[1,0]-x[0,0], x[1,1]-x[0,1], color=colors[0], linestyle=linestyle)\n ax.arrow(x[0,0], x[0,1], x[2,0]-x[0,0], x[2,1]-x[0,1], color=colors[1], linestyle=linestyle)\n plt.scatter(x[:,0], x[:,1], s=0)\n\n ax.set_aspect('equal', 'box')", "def plot_results_traj_3d(p_x, p_y, p_z, xmin, xmax, ymin, ymax, zmin, zmax):\n fig, ax = plt.subplots(2 , 2, figsize = (10, 10))\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n for t in np.arange(0, p_x.shape[1], step = 1): \n ax[0,0].plot(t, p_x[p, t], 'rx') \n ax[0,1].plot(t, p_y[p, t], 'gx') \n ax[1,0].plot(t, p_z[p, t], 'bx') \n ax[1,1].plot(t, p_x[p, t], 'rx') \n ax[1,1].plot(t, p_y[p, t], 'gx') \n ax[1,1].plot(t, p_z[p, t], 'bx') \n for a in ax.flat: \n a.set(xlabel = 'Time steps', ylabel = 'Position')\n ax[0,0].set_title('X (pix)') \n ax[0,0].set_ylim([xmin, xmax]) \n ax[0,1].set_title('Y (pix)') \n ax[0,1].set_ylim([ymin, ymax]) \n ax[1,0].set_title('Z (pix)') \n ax[1,0].set_ylim([zmin, zmax])\n ax[1,1].set_title('Positions combined') \n ax[1,1].set_ylim([np.array([xmin, ymin, zmin]).min(), np.array([xmax, ymax, zmax]).max()])", "def plot_xy(nc,params,tms,lev=None):\n \n import matplotlib.pyplot as plt\n import ggWRFutils as gW\n from datetime import datetime\n import numpy as np\n wvar={}\n for p in params:\n if p != 'Times':\n if p=='WS10':\n wvar[p]=np.sqrt(nc.variables['U10'][:]**2+nc.variables['U10'][:]**2)\n elif p=='UV10': \n wvar['U10']=nc.variables['U10'][:,:,:] \n wvar['V10']=nc.variables['V10'][:,:,:] \n elif p=='UV':\n wvar['U']=nc.variables['U'][:,lev,:,:] \n wvar['V']=nc.variables['V'][:,lev,:,:] \n elif len(nc.variables[p].shape) > 3:\n wvar[p]=nc.variables[p][:,lev,:,:] \n else: \n wvar[p]=nc.variables[p][:] \n Nx,Ny,Nz,lon,lat,dx,dy=gW.getDimensions(nc)\n for p in params:\n if params[p]=='pcolor':\n plt.pcolor(lon,lat,wvar[p][tms,:,:],shading='flat')\n plt.colorbar()\n if params[p]=='contourf':\n plt.contourf(lon,lat,wvar[p][tms,:,:],50)\n plt.colorbar()\n if params[p]=='contour':\n plt.contourf(lon,lat,wvar[p][tms,:,:])\n plt.colorbar()\n if params[p]=='quiver':\n if p=='UV10':\n plt.quiver(lon[::10,::10],lat[::10,::10],wvar['U10'][tms,::10,::10],wvar['V10'][tms,::10,::10],units='width')\n elif p=='UV':\n plt.quiver(lon,lat,wvar['U'][tms,:,:],wvar['V'][tms,:,:])\n plt.hold(True)\n plt.xlim(lon.min(),lon.max())\n plt.ylim(lat.min(),lat.max())\n fig=plt.gcf()\n return fig", "def plot_points(points, ax=None, style={'marker': 'o', 'color': 'b'}, label=False):\n if ax == None:\n ax = plt.gca()\n for ind, p in enumerate(points):\n ax.plot(p.real, p.imag, **style)\n if label:\n ax.text(p.real, p.imag, s=ind, horizontalalignment='center', verticalalignment='center')\n ax.set_xlim(-1.1, 1.1)\n ax.set_ylim(-1.1, 1.1)", "def plotPoints(x,y):\n display = PacmanPlot(x,y)\n display.takeControl()", "def userToPlot(x=0,y=0,z=0):\n return dislin.nxposn(x), dislin.nyposn(y), dislin.nzposn(z)", "def plot_meas_points(self, ang, run, pos=None):\n dats = self.get_dats(ang, run, pos=pos)\n if pos is not None:\n self.plot_points(dats[1])\n else:\n for _pos, _pts in dats:\n self.plot_points(_pts)", "def plot_sources(azimuth, elevation, distance=1.6):\n ax = Axes3D(plt.figure())\n azimuth = np.deg2rad(azimuth)\n elevation = np.deg2rad(elevation-90)\n x = distance * np.sin(elevation) * np.cos(azimuth)\n y = distance * np.sin(elevation) * np.sin(azimuth)\n z = distance * np.cos(elevation)\n ax.scatter(x, y, z, c='b', marker='.')\n ax.scatter(0, 0, 0, c='r', marker='o')", "def plot_points(self, _pts, color='b', marker='o'):\n xs, ys, zs = _pts[:,0], _pts[:,1], _pts[:,2]\n self.fig_ax.scatter(xs, ys, zs, color=color, marker=marker)\n plt.draw()", "def plotProperty(self, x, y, z = [], idx = None, col = 1, row = 1, N = 1, ax = None,\\\n save = False, dpi = 100, format = \"pdf\", verbose = 1, handle = False,\\\n translation = None, title = None, other = None, ab = [],\\\n m = \"o\", ms = 2, leg = True, ylim = None, xlim = None, xscale = \"linear\",\\\n yscale = \"linear\", **kwargs):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if translation is None: translation = [0]\n if isinstance(translation, (int, np.integer)): translation = [translation]\n \n if type(x) == str: x = [x]\n if type(y) == str: y = [y]\n if type(z) == str: z = [z]\n if len(x) != len(y):\n string = \"Length x (%i) and y (%i) must be the same\" % (len(x), len(y))\n ut.infoPrint(string)\n return\n\n if len(z) > 0 and len(x) != len(z):\n string = \"Length x (%i) and y (%i) and z (%i) must be the same\"\\\n % (len(x), len(y), len(z))\n ut.infoPrint(string)\n return\n\n m = kwargs.pop(\"marker\", m)\n ls = kwargs.pop(\"linestyle\", \"none\")\n ms = kwargs.pop(\"markersize\", ms)\n\n if len(m) == 1: m = m * len(x)\n if isinstance(ab, (int, np.integer)): ab = [ab]\n\n x_data, x_lbl, x_leg = self.getData(idx = idx, var = x, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n y_data, y_lbl, y_leg = self.getData(idx = idx, var = y, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n if len(x_data) != len(y_data): return\n\n if len(z) > 0:\n z_data, z_lbl, z_leg = self.getData(idx = idx, var = z, ab = ab, translation = translation,\\\n compact = True, verbose = verbose, other = other)\n\n if len(x_data) != len(y_data) != len(z_data) or z_data == []: return\n else:\n z_data = None\n\n hP = []\n if not handle:\n hFig = plt.figure()\n hAx = plt.subplot(row, col, N)\n else:\n hAx = ax\n\n if z_data is None:\n\n kwargs.pop(\"vmin\", None)\n kwargs.pop(\"vmax\", None)\n kwargs.pop(\"colormap\", None)\n\n for i in range(len(x_data)):\n\n tP = hAx.plot(x_data[i].T, y_data[i].T, linestyle = ls, marker = m[i],\\\n markersize = ms, **kwargs)\n\n [hP.append(lines) for lines in tP]\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 5: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 5: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n\n else:\n zmin = np.min([np.min(i) for i in z_data])\n zmax = np.max([np.max(i) for i in z_data])\n\n cm = kwargs.pop(\"colormap\", \"plasma\")\n cmap = plt.cm.get_cmap(cm)\n vmin = kwargs.pop(\"vmin\", zmin)\n vmax = kwargs.pop(\"vmax\", zmax)\n c = kwargs.pop(\"color\", 'b')\n lw = kwargs.pop(\"linewidth\", 1.2)\n\n\n for i in range(len(x_data)):\n\n if np.ndim(x_data[i]) == 1: x_data[i] = x_data[i][None, :]\n if np.ndim(y_data[i]) == 1: y_data[i] = y_data[i][None, :]\n if np.ndim(z_data[i]) == 1: z_data[i] = z_data[i][None, :]\n\n if (np.shape(z_data[i]) != np.shape(x_data[i])) and\\\n (np.shape(z_data[i]) != np.shape(y_data[i])) and\\\n (z_data[i].shape[0] != 1):\n string = \"Ambiguous z data %s with x %s and y %s\"\\\n % (np.shape(z_data[i]), np.shape(x_data[i]), np.shape(y_data[i]))\n ut.infoPrint(string)\n return\n \n j,k,l = (0, 0, 0)\n for ii, t in enumerate(translation):\n\n tP = hAx.scatter(x_data[i][j, :], y_data[i][k, :], c = z_data[i][l, :],\\\n vmin = vmin, vmax = vmax, cmap = cmap, marker = m[i],\\\n label = \"\", s = ms, linewidth = lw, **kwargs)\n\n hP.append(tP)\n\n if np.shape(x_data[i])[0] > 1: j += 1\n if np.shape(y_data[i])[0] > 1: k += 1\n if np.shape(z_data[i])[0] > 1: l += 1\n\n if leg:\n ncol = 1\n if len(x_leg) > len(y_leg):\n if len(x_leg) > 4: ncol = 2\n hAx.legend(x_leg, ncol = ncol)\n else:\n if len(y_leg) > 4: ncol = 2\n hAx.legend(y_leg, ncol = ncol)\n \n if not handle: plt.colorbar(hP[0], label = z_lbl[0])\n\n if ylim is not None:\n hAx.set_ylim(bottom = ylim[0], top = ylim[1])\n if xlim is not None:\n hAx.set_xlim(left = xlim[0], right = xlim[1])\n\n hAx.set_yscale(yscale)\n hAx.set_xscale(xscale)\n hAx.set_xlabel(x_lbl[0])\n hAx.set_ylabel(y_lbl[0])\n if title is None:\n hAx.set_title(self.filename)\n else:\n hAx.set_title(title)\n\n if handle: \n return\n\n \"\"\"Annotating plot marker\"\"\"\n hP[0].set_pickradius(2)\n anP = hAx.plot([], [], marker = 'o', ms = 6, color = 'k', mew = 2, mfc = 'None',\\\n linestyle = 'None')\n\n plt.tight_layout()\n\n \"\"\"Function to allow clickable points to display information\"\"\"\n def click(event):\n if event.inaxes == hAx:\n\n for line in hP:\n cont, ind = line.contains(event)\n if cont:\n break\n\n if cont:\n if z_data is not None:\n x = line.get_offsets()[:, 0]\n y = line.get_offsets()[:, 1]\n else:\n x, y = line.get_data()\n\n xSel = x[ind[\"ind\"]]\n ySel = y[ind[\"ind\"]]\n\n pPos = hAx.transData.transform((xSel, ySel))\n pDist = np.linalg.norm(pPos - [[event.x, event.y]], axis = 1)\n index = ind[\"ind\"][np.argmin(pDist)]\n anP[0].set_data(x[ind[\"ind\"]], y[ind[\"ind\"]])\n for n, i in enumerate(ind[\"ind\"]):\n string = \"Idx: %i (%.4f, %.4f) | Nr Points: %i\"\\\n % (idx[i], x[i], y[i], len(ind[\"ind\"]))\n\n if n == 0: \n print(\"=\" * len(string))\n print(string)\n if n == len(ind[\"ind\"]) - 1: \n print(\"=\" * len(string))\n\n hFig.canvas.draw_idle()\n else:\n anP[0].set_data([], [])\n hFig.canvas.draw_idle()\n\n if save:\n if save is True:\n ut.save_fig(filename = \"PropertyPlot.%s\" % format, format = format,\\\n dpi = dpi, verbose = verbose)\n else:\n ut.save_fig(filename = save, format = format, dpi = dpi,\\\n verbose = verbose)\n plt.close()\n else:\n hFig.canvas.mpl_connect(\"button_release_event\", click)\n plt.show()", "def draw_point(self, pos: Vec3, properties: Properties) -> None:\n raise NotImplementedError", "def plot_xyzt(grbdir,ax, x, y, z, t):\n global runconf\n\t\n colors = ['blue', 'gray', 'red', 'black']\n names = ['X', 'Y', 'Z', grbdir]\n zdirs = ['x', 'y', 'z', None]\n\n mkffile = runconf['mkffile']\n trigtime = runconf['trigtime']\n ra_tran = runconf['ra']\n dec_tran = runconf['dec']\n mkfdata = fits.getdata(mkffile, 1)\n window = 10\n sel = abs(mkfdata['time'] - trigtime) < window\t\n \n earthx = -np.median(mkfdata['posx'][sel])\n earthy = -np.median(mkfdata['posy'][sel]) \n earthz = -np.median(mkfdata['posz'][sel]) \n \n earth_vec_mag = np.sqrt(earthx**2 + earthy**2 + earthz**2)\n \n earth = coo.SkyCoord(earthx, earthy, earthz, frame='icrs', representation='cartesian')\n \t\t\t\n ax.set_xlim(-1.2,1.2)\n ax.set_ylim(-1.2,1.2)\n ax.set_zlim(-1.2,1.2)\n\n for count, dirn in enumerate([x, y, z, t]):\n xx, yy, zz = dirn.cartesian.x.value, dirn.cartesian.y.value, dirn.cartesian.z.value\n ax.quiver(0, 0, 0, xx, yy, zz, color=colors[count])\n ax.text(xx, yy, zz, names[count], zdirs[count])\n\t\n ax.quiver(0,0,0,earthx/earth_vec_mag,earthy/earth_vec_mag,earthz/earth_vec_mag,color='green') \n ax.text(earthx/earth_vec_mag,earthy/earth_vec_mag,earthz/earth_vec_mag,'Earth')\n \n #ax.set_xlabel(\"RA = 0\")\n #ax.set_zlabel(\"Pole\")\n return", "def draw_waypoints(world, waypoints, z=0.5):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.3, life_time=1.0)", "def draw_waypoints(world, waypoints, z=0.01):\n for wpt in waypoints:\n wpt_t = wpt.transform\n begin = wpt_t.location + carla.Location(z=z)\n angle = math.radians(wpt_t.rotation.yaw)\n end = begin + carla.Location(x=math.cos(angle), y=math.sin(angle))\n world.debug.draw_arrow(begin, end, arrow_size=0.1, life_time=1.0)", "def show_traces(x_data, z_data, fig=100, direction='h', title=None):\n plt.figure(fig)\n plt.clf()\n if direction == 'v' or direction == 'vertical':\n for ii, l in enumerate(z_data.T):\n c = []\n c = plt.cm.jet(float(ii) / z_data.shape[1])\n plt.plot(x_data, l, '', color=c)\n if title is None:\n title = 'Blue: left vertical lines, red: right lines'\n plt.title(title)\n else:\n for ii, l in enumerate(z_data):\n c = []\n c = plt.cm.jet(float(ii) / z_data.shape[0])\n plt.plot(x_data, l, '', color=c)\n if title is None:\n title = 'Blue: top lines, red: bottom lines'\n plt.title(title)\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Signal (a.u.)')", "def point (p, direction: str):\n def wrap (magnitude: int):\n change = changes [direction]\n return (\n p [0] + (change [0] * magnitude),\n p [1] + (change [1] * magnitude),\n )\n return wrap", "def plot_vector(c, color='k', start=0, linestyle='-'):\n return plt.arrow(np.real(start), np.imag(start), np.real(c), np.imag(c),\n linestyle=linestyle, head_width=0.05,\n fc=color, ec=color, overhang=0.3, length_includes_head=True)", "def plot(self):\n #prepare the marker list\n marker = itertools.cycle((',', '+', '.', 'o', '*',\n '^', 'v', '<', '>', '8',\n 's', 'p', 'h', 'H', 'D',\n 'd'))\n # first categorised with plane\n for each_plane in self.plane_list:\n if self.is_literal:\n label = \"[\" + \"{0} {1} {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"]\"\n else:\n label = \"{\"+\"{0}, {1}, {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"}\"\n x_list = []\n y_list = []\n if self.is_literal:\n tmp = [each_plane]\n opposite_plane = [-item for item in each_plane]\n tmp.append(opposite_plane)\n else:\n tmp = PoleFigure.get_permutations(each_plane)\n # second categorised with grain ID\n my_marker = \".\" # default marker\n for i in range(len(self.__data)):\n each_euler = self.__data[i]\n if self.unique_marker:\n my_marker = marker.next()\n plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble\n euler = EulerAngle(each_euler[0], each_euler[1], each_euler[2])\n rot_m = np.dot(self.__ref, euler.rotation_matrix)\n self.__data[i] = RotationMatrix(rot_m).euler_angle\n for each_pole in tmp:\n tmp_pole = np.array(each_pole) / self.lattice_vector\n tmp_pole /= np.linalg.norm(tmp_pole)\n coord = np.dot(rot_m, tmp_pole)\n if coord[2] < 0:\n continue # not pointing up, moving on\n else:\n x = coord[0] / (1.0 + float(coord[2]))\n y = coord[1] / (1.0 + float(coord[2]))\n # need to rotate 90 degree\n x_list.append(y)\n y_list.append(-x)\n # start plotting\n if self.__clr_list is not None:\n clr = self.__clr_list.next()\n else:\n clr = np.random.rand(3, 1)\n plt.scatter(x_list, y_list, marker=my_marker, c=clr, label=label, edgecolor='none')\n # label x/y axis\n plt.text(1.1, 0.0, \"y\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n plt.text(0.0, -1.1, \"x\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n # set legend\n plt.legend(loc='upper left', numpoints=1, ncol=6, fontsize=8, bbox_to_anchor=(0, 0))\n plt.title(self.title)\n plt.savefig(self.title + \".\" + self.output)\n plt.close()", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def visualize_3d(grbdir,x, y, z, t, thetax, thetay, name):\n # Set ax.azim and ax.elev to ra, dec\n global runconf\n\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n plt.suptitle(r\"Visualisation of {name} in 3d:$\\theta_x$={tx:0.1f},$\\theta_y$={ty:0.1f}\".format(name=name, tx=thetax, ty=thetay))\n # Z\n ax = plt.subplot(2, 2, 1, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = z.ra.deg\n ax.elev = z.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI pointing (z)\")\n\n # Transient\n ax = plt.subplot(2, 2, 2, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = t.ra.deg\n ax.elev = t.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from nominal \\n transient direction\")\n\n # X\n ax = plt.subplot(2, 2, 3, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = x.ra.deg\n ax.elev = x.dec.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI X axis\")\n\n # Z\n ax = plt.subplot(2, 2, 4, projection='3d')\n plot_xyzt(grbdir,ax, x, y, z, t)\n ax.azim = y.ra.deg\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n add_satellite(ax, x, y, z)\n ax.set_title(\"View from CZTI Y axis\")\n\n return", "def mri_point_plot(self, vcol=1):\n img = self.voxels\n points = self.point_position \n ax = []\n fig = plt.figure(figsize=(9, 8))\n # TODO make this setable in the function call\n columns = 3\n rows = 2\n\n for i in range(points.shape[0]):\n im_slice = int(np.round(points[i, vcol]))\n if vcol == 0:\n im = img[im_slice, :, :]\n elif vcol == 1:\n im = img[:, im_slice, :]\n else:\n im = img[:, :, im_slice]\n ax.append( fig.add_subplot(rows, columns, i+1))\n ax[-1].set_title(\"Image depth: \"+str(im_slice)) # set title\n plt.imshow(im)\n plot_cols = np.array([0, 1, 2])\n plot_cols = plot_cols[plot_cols != vcol]\n plt.plot(points[i, min(plot_cols)], points[i, max(plot_cols)], 'ro')\n\n plt.show()", "def SpeedDirPlot(t,u,v,convention='current',units='m s^{-1}',color1='b',color2='r'):\r\n import airsea\r\n \r\n Dir, Spd = airsea.convertUV2SpeedDirn(u,v,convention=convention)\r\n \r\n \r\n ax = range(2)\r\n h = range(2)\r\n fig = plt.gcf()\r\n ax[0] = fig.gca()\r\n \r\n \r\n # Left axes\r\n h[0] = ax[0].fill_between(t, Spd, color=color1,alpha=0.7)\r\n # Make the y-axis label and tick labels match the line color.\r\n ax[0].set_ylabel('Speed [$%s$]'%units, color=color1)\r\n for tl in ax[0].get_yticklabels():\r\n tl.set_color(color1)\r\n\r\n #Right axes\r\n ax[1] = ax[0].twinx() # This sets up the second axes\r\n ax[1].plot(t, Dir, '.',color=color2)\r\n ax[1].set_ylabel(\"Dir'n [$\\circ$]\", color=color2)\r\n ax[1].set_ylim([0,360])\r\n ax[1].set_yticks([0,90,180,270])\r\n ax[1].set_yticklabels(['N','E','S','W'])\r\n for tl in ax[1].get_yticklabels():\r\n tl.set_color(color2)\r\n \r\n plt.setp( ax[0].xaxis.get_majorticklabels(), rotation=17 )\r\n \r\n return ax, h", "def vect_fig():\r\n ax = move_spines()\r\n \r\n ax.set_xlim(-5, 5)\r\n ax.set_ylim(-5, 5)\r\n ax.grid()\r\n vecs = [[2, 4], [-3, 3], [-4, -3.5]] # lista de vectores\r\n for v in vecs:\r\n ax.annotate(\" \", xy=v, xytext=[0, 0],\r\n arrowprops=dict(facecolor=\"blue\",\r\n shrink=0,\r\n alpha=0.7,\r\n width=0.5))\r\n ax.text(1.1 * v[0], 1.1 * v[1], v)", "def xyplane(draw, r, x, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [x,-extent,0],\n [x,extent,0],\n [x,extent,extent*2],\n [x,-extent,extent*2]\n ]\n )\n pln = np.dot(pln,np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))", "def plot(self,\n name: str,\n G_list: list = None,\n V_goal_list: list = None,\n opt_path: list = None):\n colorscales = ['Reds', 'Greens', 'Blues', 'Magentas']\n color = ['red', 'green', 'blue', 'magenta']\n pd = []\n\n if self.d == 3:\n X = []\n Y = []\n Z = []\n if opt_path:\n for i, path in enumerate(opt_path):\n X.clear(), Y.clear(), Z.clear()\n for state in path:\n X += [state[0]]\n Y += [state[1]]\n Z += [state[2]]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, marker=dict(color=color[i], size=5), name='Path_M' + str(i)))\n\n if G_list:\n X.clear(), Y.clear(), Z.clear()\n for G in G_list:\n for e in G.E.values():\n X += [G.V[e.node_a].value[0], G.V[e.node_b].value[0], None]\n Y += [G.V[e.node_a].value[1], G.V[e.node_b].value[1], None]\n Z += [G.V[e.node_a].value[2], G.V[e.node_b].value[2], None]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, mode='lines', showlegend=True,\n line=dict(color='rgb(125,125,125)', width=0.5),\n hoverinfo='none', name='Tree'))\n pd.append(go.Scatter3d(x=[self.start[0]], y=[self.start[1]], z=[self.start[2]],\n mode='markers', marker=dict(color='red', size=5), name='Start'))\n\n if V_goal_list:\n X.clear(), Y.clear(), Z.clear()\n for i, V in enumerate(V_goal_list):\n for j in V:\n X += [G_list[i].V[j].value[0]]\n Y += [G_list[i].V[j].value[1]]\n Z += [G_list[i].V[j].value[2]]\n pd.append(go.Scatter3d(x=X, y=Y, z=Z, mode='markers',\n marker=dict(color='magenta', size=5),\n name='Intersection nodes'))\n\n if self.name in ['3d_point_wo_obstacles', '3d_point_w_obstacles']:\n for i, m in enumerate(self.manifolds):\n limits = [self.lim_lo[0], self.lim_up[0], self.lim_lo[1], self.lim_up[1]]\n X_m, Y_m, Z_m = m.draw(limits=limits)\n\n if m.draw_type == \"Scatter\":\n pd.append(go.Scatter3d(x=X_m, y=Y_m, z=Z_m, showlegend=False, mode='markers',\n marker=dict(color=color[i], size=5)))\n elif m.draw_type == \"Surface\":\n pd.append(go.Surface(x=X_m, y=Y_m, z=Z_m, opacity=0.8, showscale=False,\n colorscale=colorscales[i]))\n\n for obs in self.obstacles:\n plot_box(pd=pd, pos=np.array([0., 0., obs[0]]), quat=np.array([0., 0., 0., 1.]), size=np.array(obs[1:]))\n\n fig = go.Figure(data=pd, layout=go.Layout(yaxis=dict(scaleanchor=\"x\", scaleratio=1)))\n plot(fig, filename='plots/task_' + self.name + '_' + name + '.html', auto_open=True)", "def plot_points(points,**kw):\n try:\n fmt = kw.pop('fmt')\n except:\n fmt='k'\n try:\n label = kw.pop('label')\n except:\n label = None\n npts = len(points)\n if npts == 0: return\n xy = num.zeros((npts,2))\n for j in range(npts):\n v = points[j]\n xy[j,0] = v[0]\n xy[j,1] = v[1]\n idx = num.argsort(xy[:,0])\n xy = xy[idx]\n for j in range(len(xy)):\n if j < npts - 1:\n pyplot.plot([0.,xy[j,0]],[0,xy[j,1]],fmt,**kw)\n else:\n pyplot.plot([0.,xy[j,0]],[0,xy[j,1]],fmt,label=label,**kw)", "def plotTimeDepthInteract(d2,d3,v1,v2,v3):\n d = np.array((0.,d2,d3), dtype=float)\n v = np.array((v1,v2,v3), dtype=float)\n plotTimeDepth(d,v)", "def plot_3d_vector(pa):\n\n # Changeable variables\n al = 0.01 # arrow length\n rgba = (0.3, 0.3, 0.3, 0.8) # rgba for panels\n lw = 1.5 # changes thickness of arrow\n\n X, Y, Z, U, V, W = zip(*pa)\n A = np.sqrt(np.power(X, 2) + np.power(Y, 2))\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n q = ax.quiver(X[::peo3], Y[::peo3], Z[::peo3], U[::peo3], V[::peo3], W[::peo3], A,\n length=al, lw=lw)\n q.set_array(np.random.rand(10))\n plt.colorbar(q)\n ax.w_xaxis.set_pane_color(rgba)\n ax.w_yaxis.set_pane_color(rgba)\n ax.w_zaxis.set_pane_color(rgba)\n ax.set_zlabel(\"Height\")\n ax.set_title(r\"$\\mu$-PIV vector plot, %s, %s\" % (shark_species, sample_area))\n\n plt.show()\n return", "def path(T = 100, period = 1):\n #source\n x, y, z=source \n\n answer=[[0, 0, 0]]\n X=[x]\n Y=[y]\n Z=[z]\n \n # plotting\n fig=plt.figure()\n ax=Axes3D(fig)\n \n # plotting (axes)\n ax.set_xlabel('XX')\n ax.set_ylabel('YY')\n ax.set_zlabel('ZZ')\n \n \n d=0\n x1, y1, z1=x0, y0, z0\n \n #TODO: do while runs < nps\n #TODO: do while E > e_cutoff\n for t in xrange(T):\n \n # direction cossines\n costheta_x=random.uniform(-1, 1)\n costheta_y=random.uniform(-1, 1)\n costheta_z=random.uniform(-1, 1) \n \n dx=(1/mu)*costheta_x\n dy=(1/mu)*costheta_y\n dz=(1/mu)*costheta_z\n# print dx, mu\n# print dy, mu\n# print dz, mu \n \n \n x+=dx\n y+=dy\n z+=dz\n \n d+=distance(x1, y1, z1, x, y, z)\n \n x1, y1, z1=x, y, z\n \n answer.append([t+1, x, y, z])\n X.append(x)\n Y.append(y)\n Z.append(z) \n\n \n ax.scatter3D(X[1:2], Y[1:2], Z[1:2], c = 'r', marker = 's', linewidths = 1)\n ax.scatter3D(X, Y, Z, c = 'b', marker = 'o', linewidths = 1)\n \n d=distance(x, y, z, x0, y0, z0) \n \n print \"Initial position: \", \"(%.2f,%.2f,%.2f)\"%(X[0], Y[0], Z[0])\n print \"Final position: \", \"(%.2f,%.2f,%.2f)\"%(X[-1], Y[-1], Z[-1])\n print \"N� steps: \", t+1\n print \"Total path length: \", d\n \n \n fig=plt.figure()\n ax=fig.add_subplot(111)\n ax.plot(X[0], Y[0], 's', color = 'g', ms = 9)\n ax.plot(X, Y, '-o', color = 'b', ms = 4)\n ax.plot(X[-1], Y[-1], 's', color = 'r', ms = 9)\n plt.show()\n \n #summary() \n \n \n return answer", "def plot_traj(ax,traj,style,color,label,mode, timestep = None):\n x = []\n y = []\n i = 0.0\n # traj = traj +zs [traj[0]]\n for co in traj:\n rotX, rotY, rotZ = quaternion_to_euler(co[4], co[5], co[6], co[7])\n if mode == 'xy':\n x.append(co[0+1])\n y.append(co[1+1])\n if mode == 'xz':\n x.append(co[0+1])\n y.append(co[2+1])\n if mode == 'yz':\n x.append(co[1+1])\n y.append(co[2+1])\n\n if mode == 'rotx':\n x.append(i)\n y.append(rotX)\n if mode == 'roty':\n x.append(i)\n y.append(rotY)\n if mode == 'rotz':\n x.append(i)\n y.append(rotZ)\n\n if mode == 'ax':\n x.append(i)\n y.append(co[1])\n if mode == 'ay':\n x.append(i)\n y.append(co[2])\n if mode == 'az':\n x.append(i)\n y.append(co[3])\n i += timestep\n\n ax.plot(x,y,style,color=color,label=label)", "def draw_point(self, p):\n length = 3\n self.set_line_width(0.1)\n self.set_source_rgba(0, 0, 1, 1)\n self.move_to(p.x + length, p.y)\n self.line_to(p.x - length, p.y)\n self.stroke()\n self.move_to(p.x, p.y + length)\n self.line_to(p.x, p.y - length)\n self.stroke()", "def PointDir(point, dir):\n\n point = arg.getvector(point, 3)\n dir = arg.getvector(dir, 3)\n # self.P = B;\n # self.Q = A+B;\n \n return Plucker(np.r_[np.cross(dir, point), dir])", "def point(pt, angle, dist):\n x, y = pt\n return dist * cos(angle) + x, dist * sin(angle) + y,", "def PlotAntsPlane():\n\n # load and shrink airplane\n airplane = vtkInterface.PolyData(planefile)\n airplane.points /= 10\n # pts = airplane.GetNumpyPoints() # gets pointer to array\n # pts /= 10 # shrink\n\n # rotate and translate ant so it is on the plane\n ant = vtkInterface.PolyData(antfile)\n ant.RotateX(90)\n ant.Translate([90, 60, 15])\n\n # Make a copy and add another ant\n ant_copy = ant.Copy()\n ant_copy.Translate([30, 0, -10])\n\n # Create plotting object\n plobj = vtkInterface.PlotClass()\n plobj.AddMesh(ant, 'r')\n plobj.AddMesh(ant_copy, 'b')\n\n # Add airplane mesh and make the color equal to the Y position\n plane_scalars = airplane.points[:, 1]\n plobj.AddMesh(airplane, scalars=plane_scalars, stitle='Plane Y\\nLocation')\n plobj.AddText('Ants and Plane Example')\n plobj.Plot()", "def draw_point(env, point, size=10, color=(0, 1, 0)):\n iktype = orpy.IkParameterizationType.Translation3D\n ikparam = orpy.IkParameterization(point, iktype)\n h = orpy.misc.DrawIkparam2(env, ikparam, linewidth=size, coloradd=color)\n return h", "def trajectoire(self):\n trajx = []\n trajy = []\n for i in range(0, len(self.pos)):\n trajx.append(self.pos[i].x)\n trajy.append(self.pos[i].y)\n plt.plot(trajx, trajy) # color=self.color)\n plt.show()", "def draw_points():\n\n for node in self._nodes:\n\n x = node_properties[\"node_x\"][node]\n y = node_properties[\"node_y\"][node]\n ax.scatter(\n x,\n y,\n zorder=10,\n edgecolors=\"k\",\n linewidths=0.5,\n **self.get_node_data(node),\n )\n\n for label in self._nodes:\n\n x = node_properties[\"label_x\"][label]\n y = node_properties[\"label_y\"][label]\n rotation = node_properties[\"rotation\"][label]\n ha = node_properties[\"ha\"][label]\n\n attr = {**dict(backgroundcolor=\"white\"), **text_attr}\n ax.text(\n x,\n y,\n textwrap.shorten(text=label, width=TEXTLEN),\n rotation=rotation,\n ha=ha,\n va=\"center\",\n rotation_mode=\"anchor\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n **attr,\n )", "def winddir_scatter(metdat, catinfo, category, vertloc=80, basecolor='red', exclude_angles=[(46, 228)]):\n\n # set up data\n dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)\n varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)\n\n colors = utils.get_nrelcolors()\n \n fig = plt.figure(figsize=(8,2.5))\n ax = fig.add_subplot(111)\n\n ax.scatter(metdat[dircol], metdat[varcol], marker='o',facecolor='w',color='k',lw=0.5,alpha=0.7)\n ax.set_xlim([0,360])\n\n for ii in range(len(exclude_angles)):\n ax.axvspan(exclude_angles[ii][0], exclude_angles[ii][1], alpha=0.1, color=colors[basecolor][0])\n ax.set_title(r'$z={}$ m'.format(vertloc))\n ax.set_xlabel(r'Wind Direction [$^\\circ$]')\n ax.set_ylabel(catinfo['labels'][category])\n \n return fig, ax#, leg", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def move_point(p, direction, d=1):\n direction_guard(direction)\n x, y = p\n dx, dy = directions[direction]\n return (x + dx * d, y + dy * d)", "def translate_point(pt, length, direction):\n if isinstance(direction,float):\n # direction is a float (in radians)\n return (pt[0]+length*np.cos(direction), pt[1]+length*np.sin(direction))\n elif str(direction)==\"NORTH\":\n return (pt[0], pt[1]+length)\n elif str(direction)==\"SOUTH\":\n return (pt[0], pt[1]-length)\n elif str(direction)==\"WEST\":\n return (pt[0]-length, pt[1])\n elif str(direction)==\"EAST\":\n return (pt[0]+length, pt[1])", "def plot_points(L, color):\r\n \r\n X = list()\r\n Y = list()\r\n for p in L:\r\n X.append(p[0])\r\n Y.append(p[1])\r\n plt.scatter(X, Y, c=color)", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts) \n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def vis_points(data,f1,f2):\n if np.isnan(data).any():\n return\n \n plt.scatter(data[:,f1], data[:,f2], alpha=0.2, c='b')\n plt.xlim(lims)\n plt.ylim(lims)", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts)\n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def drawPoints(self, points, color):\n for p in points:\n Point\n p.color = color\n p.radius = self.points_radius\n p.conversion = False\n p.show(self.context)", "def plot2DSet(desc, lab):\n positif = desc[lab == 1]\n negatif = desc[lab == -1]\n\n plt.scatter(positif[:,0], positif[:,1], marker='o', c='#0000FF')\n plt.scatter(negatif[:,0], negatif[:,1], marker='x', c='#FF0000')\n plt.grid(True)", "def translate_direction(self):\n xpart = math.sin(self.direction)\n ypart = math.cos(self.direction)\n if ypart > 0:\n print(\"oben \", end='')\n else:\n print(\"unten \", end='')\n if xpart > 0:\n print(\"rechts\")\n else:\n print(\"links\")", "def plot_line(unit_vect, point, array):\n x_vals = []\n y_vals = []\n z_vals = []\n for i in array:\n x_vals.append(unit_vect[0] * i + point[0])\n y_vals.append(unit_vect[1] * i + point[1])\n z_vals.append(unit_vect[2] * i + point[2])\n\n return [x_vals, y_vals, z_vals]", "def showPoints(surface, points):\n for point in points:\n point.show(surface)", "def plotPoints(img, points, radius = 3, color= (0, 0, 255)):\n\tfor pt in points:\n\t\tdrawCircle(img, pt, radius = radius, color = color)", "def drawPointCloud(points, ax, color=None):\n if len(points.shape) != 2 or points.shape[0] != 3:\n raise ValueError(\"'points' must be 3xN\")\n if color == None:\n color = __color_cycle.next()\n elif color in (0, 1, 2):\n color = points[color, :]\n ax.scatter(points[0,:].T, points[1,:].T, points[2,:].T, c=color)", "def plot_directory_numex(path, vals, param='density', outname=None, show=True,\n xscale=1e-3,yscale=1e2):\n #vals = arange(2300.0, 2800.0, 50.0)\n outdirs = np.sort(os.listdir(path))\n plt.figure()\n\n # Plot surface profiles for each parameter\n for val,outdir in zip(vals,outdirs):\n pointsFile = os.path.join(path, outdir, 'points.h5')\n print(pointsFile)\n x_fem, ur_fem, uz_fem = pu.extract_points(pointsFile, output=True, adjustRadial=True)\n x_fem = x_fem / xscale\n ur_fem = ur_fem / yscale\n uz_fem = uz_fem / yscale\n l, = plt.plot(x_fem,uz_fem,'.-',label=str(val))\n plt.plot(x_fem,ur_fem,'.-',color=l.get_color())\n\n # Annotate\n plt.axhline(color='k') #zero displacement line\n plt.title(param)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n plt.legend()\n\n if outname: plt.savefig(outname)\n if show: plt.show()", "def plotVector(self, prop, graph, vec, rangex=[], rangey=[]):\r\n if (not self.doMPL):\r\n graph('set data style linespoints')\r\n miny = 0; maxy = 0; minx = 0; maxx = 0;\r\n for i in range(0, vec.__len__()):\r\n if (vec[i][0] < minx):\r\n minx = vec[i][0]\r\n elif (vec[i][0] > maxx):\r\n maxx = vec[i][0]\r\n if (vec[i][1] < miny):\r\n miny = vec[i][1]\r\n elif (vec[i][1] > maxy):\r\n maxy = vec[i][1]\r\n if (vec.__len__() == 1):\r\n if (rangex.__len__() == 0):\r\n graph.set_range('xrange', (minx-0.5, maxx+0.5))\r\n else:\r\n graph.set_range('xrange', (rangex[0], rangex[1]))\r\n if (rangey.__len__() == 0):\r\n graph.set_range('yrange', (miny-0.5, maxy+0.5))\r\n else:\r\n graph.set_range('yrange', (rangey[0], rangey[1]))\r\n else:\r\n if (rangex.__len__() == 0):\r\n graph.set_range('xrange', (minx-0.1, maxx+0.1))\r\n else:\r\n graph.set_range('xrange', (rangex[0], rangex[1]))\r\n if (rangey.__len__() == 0):\r\n graph.set_range('yrange', (miny-0.1, maxy+0.1))\r\n else:\r\n graph.set_range('yrange', (rangey[0], rangey[1]))\r\n graph.plot(vec)\r\n else:\r\n figure(graph, (6,4))\r\n xlabel(self.graphLabelsX[graph])\r\n ylabel(self.graphLabelsY[graph])\r\n hh = 0\r\n datax = []\r\n datay = []\r\n while (hh < vec.__len__()):\r\n datax.append(vec[hh][0])\r\n datay.append(vec[hh][1])\r\n hh = hh + 1\r\n plot(datax, datay)\r\n draw()\r\n if ((self.pause != 0) and (prop.myStep % self.pause == 0)):\r\n \t print \"PRESS <RETURN> TO CONTINUE\"\r\n \t raw_input()", "def plot_vor(self,x,ax,tri=False):\n\n L = self.L\n grid_x, grid_y = np.mgrid[-1:2, -1:2]\n grid_x[0, 0], grid_x[1, 1] = grid_x[1, 1], grid_x[0, 0]\n grid_y[0, 0], grid_y[1, 1] = grid_y[1, 1], grid_y[0, 0]\n y = np.vstack([x + np.array([i * L, j * L]) for i, j in np.array([grid_x.ravel(), grid_y.ravel()]).T])\n\n c_types_print = np.tile(self.c_types,9)\n bleed = 0.1\n c_types_print = c_types_print[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n y = y[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n regions, vertices = self.voronoi_finite_polygons_2d(Voronoi(y))\n\n\n ax.set(aspect=1,xlim=(0,self.L),ylim=(0,self.L))\n if type(self.c_types) is list:\n # ax.scatter(x[:, 0], x[:, 1],color=\"grey\",zorder=1000)\n for region in regions:\n polygon = vertices[region]\n plt.fill(*zip(*polygon), alpha=0.4, color=\"grey\")\n\n else:\n cols = self.cols\n if self.plot_scatter is True:\n for j,i in enumerate(np.unique(self.c_types)):\n ax.scatter(x[self.c_types==i, 0], x[self.c_types==i, 1],color=cols[i],zorder=1000)\n patches = []\n for i, region in enumerate(regions):\n patches.append(Polygon(vertices[region], True,facecolor=cols[c_types_print[i]],ec=(1,1,1,1)))\n\n p = PatchCollection(patches, match_original=True)\n # p.set_array(c_types_print)\n ax.add_collection(p)\n if tri is not False:\n for TRI in tri:\n for j in range(3):\n a, b = TRI[j], TRI[np.mod(j + 1, 3)]\n if (a >= 0) and (b >= 0):\n X = np.stack((x[a], x[b])).T\n ax.plot(X[0], X[1], color=\"black\")", "def plotPoints(vec, centers = None, numcolors = 2):\n\n try:\n import matplotlib.pyplot\n from mpl_toolkits.mplot3d import Axes3D\n except:\n raise ImportError, \"matplotlib package not found.\"\n\n markers = ['o', '^', 'x']\n labels = [\"Cluster 1\", \"Cluster 2\", \"Morning Data\", \"Evening Data\"]\n handles = []\n count = 0\n \n fig = matplotlib.pyplot.figure()\n ax1 = fig.add_subplot(111)\n\n for i in range(len(vec)):\n for v in vec[i]:\n col = getColor(i, numcolors, decimal = True)\n col = (0, max(col[1] - 0.2, 0), col[2] - 0.1)\n ax1.scatter(v[1], v[2], color = col, s = 100, marker = markers[v[0]])\n\n matplotlib.pyplot.show()", "def draw_point(turt, pos, count):\r\n turt.goto(pos)\r\n turt.color(\"lawngreen\")\r\n turt.dot(8)\r\n turt.pu()\r\n turt.forward(5)\r\n turt.color(\"HotPink1\")\r\n turt.write(count, True, align=\"left\")\r\n turt.hideturtle()", "def plot_control_points(self, fig, ax, linewidth=1.25, linestyle='-.', color='red', markersize=5, markerstyle='o'):\n\n # One dimension (law of evolution)\n if self.ndim == 1:\n Px = np.real(self.P)\n u = np.linspace(0, 1, Px.size)\n line, = ax.plot(u, Px[0,:])\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n\n # Two dimensions (plane curve)\n elif self.ndim == 2:\n Px, Py = np.real(self.P)\n line, = ax.plot(Px, Py)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n # Three dimensions (space curve)\n elif self.ndim == 3:\n Px, Py, Pz = np.real(self.P)\n line, = ax.plot(Px, Py, Pz)\n line.set_linewidth(linewidth)\n line.set_linestyle(linestyle)\n line.set_color(color)\n line.set_marker(markerstyle)\n line.set_markersize(markersize)\n line.set_markeredgewidth(linewidth)\n line.set_markeredgecolor(color)\n line.set_markerfacecolor('w')\n line.set_zorder(4)\n # line.set_label(' ')\n\n else: raise Exception('The number of dimensions must be 2 or 3')\n\n return fig, ax", "def plot_quadrant_scatter(self, skip=None, prefix=''):\n\n return quadrant_scatter( self.map_properties(), skip=skip, prefix=prefix )", "def draw(self):\n pt = self.getPoint() # Centre of prism\n\n # Form top,left,right corners\n top = Vector2d(pt.z, pt.y + self.height/2)\n d = self.height*math.tan(self.angle/2)\n left = Vector2d(pt.z - d , pt.y - self.height/2)\n right = Vector2d(pt.z + d, pt.y - self.height/2)\n\n\n top.rotate(self.tilt)\n left.rotate(self.tilt)\n right.rotate(self.tilt)\n\n # Plot them out with plt.plot\n plot([top[0],left[0],right[0],top[0]],[top[1],left[1],right[1],top[1]],\"k\",lw=2.0)", "def plot_swim_vel_scatter(self, ax1, ax2, masked=False, transpose_coords=False,\r\n age_scale='log10',\r\n add_cbar=True, **kwargs):\r\n rec_swim = self.rec_swim\r\n nsegments = len(rec_swim)\r\n\r\n swim_u = rec_swim.swim_u\r\n swim_v = rec_swim.swim_v\r\n u_rel,v_rel = self.vel_from_hydro(vel=[swim_u,swim_v], \r\n rec_swim=rec_swim)\r\n #swim_spd = self.rec_swim['swim_spd']\r\n\r\n if nsegments==0:\r\n # happens when a the model output hasn't been extracted for this tag.\r\n print(\"Tag %s has no segments. Skipping plot_swim_vel_scatter\"%self.ID)\r\n return\r\n time_from_entry = rec_swim.tm - rec_swim.tm[0] + 1\r\n if age_scale=='log10':\r\n age_scaled = np.log10(time_from_entry)\r\n elif age_scale=='linear':\r\n age_scaled = time_from_entry\r\n else:\r\n raise Exception(\"Bad age_scale: %s\"%age_scale)\r\n\r\n u,v=u_rel,v_rel\r\n x_label=\"Up $\\leftrightarrow$ Down\"\r\n y_label=\"Left $\\leftrightarrow$ Right\"\r\n if transpose_coords:\r\n u,v=-v,u # this gives left=left\r\n x_label,y_label=y_label,x_label\r\n \r\n scat = ax1.scatter(u, v, marker='o', s=2.0, c=age_scaled)\r\n\r\n ax1.set_xlabel(x_label)\r\n ax1.set_ylabel(y_label)\r\n \r\n fsize = 6\r\n ax1.text(1.01, 0.90,\r\n \"\\n\".join( [ \"n = %d\"%nsegments, \r\n \"avg|u| = %4.2f\"%np.average(np.abs(u_rel)),\r\n \"avg|v| = %4.2f\"%np.average(np.abs(v_rel)) ]),\r\n va='top',transform=ax1.transAxes,\r\n fontsize=fsize)\r\n ax1.set_xlim([-0.5,0.5])\r\n ax1.set_ylim([-0.5,0.5])\r\n ax1.axhline(0, color='k',zorder=0)\r\n ax1.axvline(0, color='k',zorder=0)\r\n ax1.set_aspect('equal')\r\n clims = [ticks[0],ticks[-1]]\r\n scat.set_clim(clims)\r\n if add_cbar:\r\n cax = plt.gcf().add_axes([0.41,0.12,0.16,0.025]) # hardwired\r\n label = 'Time (seconds)'\r\n c1 = plt.gcf().colorbar(scat, cax=cax, orientation='horizontal')\r\n #c1 = plt.gcf().colorbar(scat)\r\n c1.set_label(label)\r\n c1.set_ticks(ticks)\r\n c1.set_ticklabels(tick_labels)\r\n # smoothed speed over ground\r\n us = rec_swim.us\r\n vs = rec_swim.vs\r\n uh,vh = self.vel_from_hydro(vel=[us,vs], rec_swim=rec_swim)\r\n u,v=uh,vh\r\n \r\n if transpose_coords:\r\n u,v=-v,u\r\n \r\n scat2 = ax2.scatter(u, v, marker='o', s=2.0, c=age_scaled)\r\n ax2.set_xlabel(x_label) \r\n ax2.set_ylabel(y_label)\r\n ax2.text(1.01, 0.90,\r\n \"\\n\".join( [\"n = %d\"%nsegments,\r\n \"avg|u| = %4.2f\"%np.average(np.abs(uh)),\r\n \"avg|v| = %4.2f\"%np.average(np.abs(vh))]),\r\n transform=ax2.transAxes,\r\n va='top',\r\n fontsize=fsize)\r\n ax2.axhline(0, color='k',zorder=0)\r\n ax2.axvline(0, color='k',zorder=0)\r\n ax2.set_xlim([-1,1])\r\n ax2.set_ylim([-1,1])\r\n ax2.set_aspect('equal')\r\n scat2.set_clim(clims)\r\n\r\n return", "def plot(self, values=None):\r\n plt.cla()\r\n plt.xlim([0, self.pond_size[0]])\r\n plt.ylim([0, self.pond_size[1]])\r\n plt.xticks(np.arange(self.pond_size[0]), [])\r\n for i in range(self.pond_size[0]):\r\n plt.text(i+0.4, -0.5, str(i))\r\n plt.yticks(np.arange(self.pond_size[1]), [])\r\n for i in range(self.pond_size[1]):\r\n plt.text(-0.5, i+0.4, str(i))\r\n\r\n # Draw the trajectory\r\n t_x = np.array([t[0] for t in self.trajectory])\r\n t_y = np.array([t[1] for t in self.trajectory])\r\n plt.plot(t_x+0.5, t_y+0.5, 'r-o')\r\n\r\n # Draw currents and values\r\n for x in range(self.pond_size[0]):\r\n for y in range(self.pond_size[1]):\r\n if values is not None:\r\n plt.text(x, y, '%.1f'%values[y, x])\r\n c = self.currents[y][x]\r\n assert len(c)==4\r\n for i in range(4):\r\n if c[i] != '0':\r\n head_size = 0.15 if c[i] == '1' else 0.3\r\n d = self.current_directions[i]\r\n plt.arrow(x+0.5-0.4*d[0], y+0.5-0.4*d[1], (0.8-head_size)*d[0], (0.8-head_size)*d[1],\r\n head_width=head_size, head_length=head_size, overhang=1.0)\r\n\r\n # Draw start and end states\r\n plt.gcf().gca().add_artist(plt.Circle((self.start_state[0]+0.5, self.start_state[1]+0.5), 0.4, color='r', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.end_state[0]+0.5, self.end_state[1]+0.5), 0.4, color='g', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.current_state[0]+0.5, self.current_state[1]+0.5), 0.25, color='b', alpha=0.5))\r\n plt.grid(True)\r\n plt.pause(0.2)", "def plotScatter(verts, data, coords=(1,2), comp=2):\n z = data[:,:,comp].flatten()\n x = verts[:,coords[0]]\n y = verts[:,coords[1]]\n\n # NOTE: either scatter or pcolor should work\n plt.figure()\n compDict = {0:'X',1:'Y',2:'Z'}\n #plt.gca().set_aspect('equal')\n plt.scatter(x, y, c=z, s=80, cmap=plt.cm.bwr)\n plt.title( compDict[comp] + ' Displacement' )\n plt.xlabel(compDict[coords[0]] + ' Distance [m]')\n plt.ylabel(compDict[coords[1]] + ' Distance [m]')\n cb = plt.colorbar()\n cb.set_label('[m]')", "def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]", "def plotTrajectory(arg, color = sf.cyan, xyRate=True, radiusRate = 80.0\n , blAxes = True):\n if not(hasattr(arg, '__getitem__')) and hasattr(arg, '__iter__'):\n arg = list(arg)\n\n vs = sf.vs_()\n\n color = tuple(color) # color argment may be list/vector\n if isinstance(arg,list) or isinstance(arg,tuple) or isinstance(\n arg,type(sf.sc.array([0,]))):\n from octnOp import ClOctonion\n if not(hasattr(arg[0],'__len__')) and isinstance(arg[0], complex):\n arg = [ (x.real, x.imag) for x in arg]\n elif not(hasattr(arg[0],'__len__')) and isinstance(arg[0], ClOctonion):\n arg = [ x[1:4] for x in arg]\n\n if len(arg[0])==2:\n import visual.graph as vg\n global __obj2dDisplayGeneratedStt\n\n maxX = max([abs(elm[0]) for elm in arg])\n maxY = max([abs(elm[1]) for elm in arg])\n\n print \"maxX:\",maxX, \" maxY:\",maxY\n\n if (__obj2dDisplayGeneratedStt == None):\n if xyRate == True: # 11.01.16 to \n maxAt = max(maxX, maxY)\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600*maxX/maxAt,height=600*maxY/maxAt)\n else:\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600,height=600)\n #__bl2dDisplayGeneratedStt = True\n grphAt = vg.gcurve(color = color)\n for i in range(len(arg)):\n assert len(arg[i])==2, \"unexpeted length data:\"+str(arg[i])\n grphAt.plot(pos = arg[i])\n\n #return __obj2dDisplayGeneratedStt\n #import pdb; pdb.set_trace()\n #print \"debug:\",grphAt.gcurve.pos\n\n # plot start mark\n grphSqAt = vg.gcurve(color = color)\n pos0At = grphAt.gcurve.pos[0,:][:2]\n rateAt = 50\n for x,y in sf.mitr([-maxX/rateAt, maxX/rateAt]\n , [-maxY/rateAt, maxY/rateAt]):\n grphSqAt.plot(pos = pos0At+[x,y])\n \n grphSqAt.plot(pos = pos0At+[-maxX/rateAt,-maxY/rateAt])\n\n return grphAt # 09.02.04 to animate graph\n elif len(arg[0])==3:\n vs.scene.forward=(-1,+1,-1)\n vs.scene.up=(0,0,1)\n\n c = vs.curve( color = color )\n\n maxX, maxY, maxZ = 0,0,0\n for i in range(len(arg)):\n if maxX < abs(arg[i][0]):\n maxX = abs(arg[i][0])\n if maxY < abs(arg[i][1]):\n maxY = abs(arg[i][1])\n if maxZ < abs(arg[i][2]):\n maxZ = abs(arg[i][2])\n c.append( arg[i] )\n #print c.pos\n print \"maxX:\",maxX, \" maxY:\",maxY, \" maxZ:\",maxZ\n maxAt = max(maxX,maxY,maxZ)\n c.radius = maxAt/radiusRate\n\n vs.sphere(pos = arg[0], radius = 3*c.radius, color = color)\n\n if blAxes == True:\n # draw axise\n vs.curve( pos=[(0,0,0), (maxAt,0,0)]\n , color=(1,0,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,maxAt,0)]\n , color=(0,1,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,0,maxAt)]\n , color=(0,1,1)\n , radius = maxAt/100 )\n #return vs.scene\n return c # 09.02.04 to animate graph\n else:\n assert False,\"unexpeted data:\"+str(arg)", "def linecut_points( **kwargs ):\n npoints = kwargs.get('npoints', 320)\n extents = kwargs.get('extents',None)\n lims = kwargs.get('lims', (-80.,80.))\n direc = kwargs.get('direc', (np.pi/2, 0.))\n origin = kwargs.get('origin', vec3(0.,0.,0.))\n\n if extents is not None:\n lims = (-extents, extents)\n\n # Prepare set of points for plot \n t = np.linspace( lims[0], lims[1], npoints )\n unit = vec3()\n th = direc[0]\n ph = direc[1] \n unit.set_spherical(1, th, ph) \n # Convert vec3s to ndarray\n unit = np.array(unit)\n origin = np.array(origin) \n #\n XYZ = origin + np.outer(t, unit)\n X = XYZ[:,0]\n Y = XYZ[:,1]\n Z = XYZ[:,2]\n \n return t, X, Y, Z, lims", "def PlotAnt():\n ant = vtkInterface.PolyData(antfile)\n ant.Plot(color='r', style='wireframe')", "def DrawDottedLine(self, dc, point, length, vertical):\r\n\r\n for i in xrange(0, length, 2):\r\n dc.DrawPoint(point.x, point.y)\r\n if vertical:\r\n point.y += 2\r\n else:\r\n point.x += 2", "def drawVector3D(x0,y0,z0,x1,y1,z1, vtype='normal'):\n dislin.vectr3(x0,y0,z0,x1,y1,z1, vectordict[vtype])", "def _plot(self, **kwargs):\n XY = self.positions\n plt.plot(XY[0,:], XY[1,:], 'o')\n plt.gca().set_aspect('equal')\n SS = np.abs(self.S)\n SS /= SS.max()\n\n for i in range(self.N):\n for j in range(self.N):\n if i == j or SS[i,j] < 1e-2:\n continue\n clr = 'r' if self.S[i,j]<0 else 'b'\n x, y = XY[:,i]\n r = XY[:,j] - XY[:,i]\n dx, dy = r\n rhat = r / np.sqrt((r**2).sum())\n ofsx, ofsy = 0.03 * rhat\n perpx, perpy = 0.005 * np.array([-rhat[1], rhat[0]])\n plt.arrow(x + ofsx + perpx, y + ofsy + perpy,\n r[0] - 2*ofsx, r[1] - 2*ofsy, color=clr,\n shape='right', width=0.01*SS[i,j],\n length_includes_head=True, head_width=0.02,\n linewidth=0, **kwargs)", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()", "def plot_desired_velocity(self,id=1,dpi=150):\n fig = plt.figure(id)\n ax1 = fig.add_subplot(111)\n ax1.imshow(self.image,interpolation='nearest',\n extent=[self.xmin,self.xmax,self.ymin,self.ymax], origin='lower')\n ax1.imshow(self.door_distance,interpolation='nearest',\n extent=[self.xmin,self.xmax,self.ymin,self.ymax],alpha=0.7,\n origin='lower')\n step = 10\n ax1.quiver(self.X[::step, ::step],self.Y[::step, ::step],\n self.desired_velocity_X[::step, ::step],\n self.desired_velocity_Y[::step, ::step])\n #plt.savefig('.png',dpi=dpi)\n plt.draw()", "def plot_positives(positives):\n plt.scatter(positives[:,0], positives[:,1], label='Goal examples', marker='*', color='g', s=200)", "def plot_scatter_points(self):\n self.plot(1)", "def plot_scalp(v, channel):\n\n channelpos = [tts.channels[c] for c in channel]\n points = [calculate_stereographic_projection(i) for i in channelpos]\n x = [i[0] for i in points]\n y = [i[1] for i in points]\n z = v\n X, Y, Z = interpolate_2d(x, y, z)\n plt.contour(X, Y, Z, 20)\n plt.contourf(X, Y, Z, 20)\n #plt.clabel(im)\n plt.colorbar()\n plt.gca().add_artist(plt.Circle((0, 0), radius=1, linewidth=3, fill=False))\n plt.plot(x, y, 'bo')\n for i in zip(channel, zip(x,y)):\n plt.annotate(i[0], i[1])", "def vector_plot(tvects,is_vect=True,orig=[0,0,0]):\n\n if is_vect:\n if not hasattr(orig[0],\"__iter__\"):\n coords = [[orig,np.sum([orig,v],axis=0)] for v in tvects]\n else:\n coords = [[o,np.sum([o,v],axis=0)] for o,v in zip(orig,tvects)]\n else:\n coords = tvects\n\n data = []\n for i,c in enumerate(coords):\n X1, Y1, Z1 = zip(c[0])\n X2, Y2, Z2 = zip(c[1])\n vector = go.Scatter3d(x = [X1[0],X2[0]],\n y = [Y1[0],Y2[0]],\n z = [Z1[0],Z2[0]],\n marker = dict(size = [0,5],\n color = ['blue'],\n line=dict(width=5,\n color='DarkSlateGrey')),\n name = 'Vector'+str(i+1))\n data.append(vector)\n\n layout = go.Layout(\n margin = dict(l = 4,\n r = 4,\n b = 4,\n t = 4)\n )\n fig = go.Figure(data=data,layout=layout)\n #pio.write_html(fig,file='index.html',auto_open=False)\n #py.plot(fig, filename = 'gdp_per_cap4', auto_open=True)\n fig.show()", "def rotatePoint(self, point, dir=+1):\n pnew = np.zeros([len(point), point.shape[1], 2])\n pnew[:, :, 0] = point[:, :, 0]*self.ctheta + point[:, :, 1]*self.stheta*dir\n pnew[:, :, 1] = -point[:, :, 0] * \\\n self.stheta*dir + point[:, :, 1]*self.ctheta\n return pnew", "def plot_iter(V, Pi, params):\n n_rows = params['n_rows']\n n_cols = params['n_cols'] \n occ_grid = params['occ_grid']\n R = params['R']\n\n goal = params['goal']\n sink = params['sink']\n\n actions = ['left','right','up','down']\n\n fig1 = plt.figure(1, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if occ_grid[row, col] == 1:\n plt.text(col, n_rows - 1 - row, '0.0', color='k', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n else:\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(V[row, col]), \n color='b', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n\n fig2 = plt.figure(2, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if not Pi[row, col] == -1:\n plt.text(col, n_rows - 1 - row, actions[Pi[row, col]], \n color='k', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n fig1.canvas.draw()\n fig1.canvas.flush_events()\n fig2.canvas.draw()\n fig2.canvas.flush_events()", "def plot_dist_ony(z, dz, om, dom, dist, dh, name, mathname, filename=None):\n\n\n dist = dist/dh\n z = z * numpy.ones(dist.shape)\n om = om * numpy.ones(dist.shape)\n\n pylab.figure(figsize=(5.5,4.5)) \n\n\n pylab.contour(z, dist, om, 50)\n cb = pylab.colorbar()\n cb.ax.set_ylabel(r'$\\Omega_M = 1 - \\Omega_\\lambda$')\n \n pylab.xlim(z.min(), z.max())\n pylab.ylim(dist.min(), dist.max()) \n pylab.xlabel(\"redshift z\")\n pylab.ylabel(name + r': $'+mathname+'/D_H$')\n pylab.title(name)\n if filename is not None:\n prefix, extension = filename.split('.')\n pylab.savefig(prefix + '_' + mathname + '_ony.' + extension,\n bbox_inches=\"tight\")", "def plot_3d(trj: TrajaDataFrame, **kwargs) -> matplotlib.collections.PathCollection:\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_xlabel(\"x\", fontsize=15)\n ax.set_zlabel(\"time\", fontsize=15)\n ax.set_ylabel(\"y\", fontsize=15)\n title = kwargs.pop(\"title\", \"Trajectory\")\n ax.set_title(f\"{title}\", fontsize=20)\n ax.plot(trj.x, trj.y, trj.index)\n cmap = kwargs.pop(\"cmap\", \"winter\")\n cm = plt.get_cmap(cmap)\n NPOINTS = len(trj)\n ax.set_prop_cycle(color=[cm(1.0 * i / (NPOINTS - 1)) for i in range(NPOINTS - 1)])\n for i in range(NPOINTS - 1):\n ax.plot(trj.x[i : i + 2], trj.y[i : i + 2], trj.index[i : i + 2])\n\n dist = kwargs.pop(\"dist\", None)\n if dist:\n ax.dist = dist\n labelpad = kwargs.pop(\"labelpad\", None)\n if labelpad:\n from matplotlib import rcParams\n\n rcParams[\"axes.labelpad\"] = labelpad\n\n return ax", "def plot_phase(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,np.angle(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "def scatter(self, points):\n assert points.shape[0] == 3\n self.points = points\n self.remove_scatter_plot()\n x, y, z = zip(*points.T)\n self.base_fig.add_trace(go.Scatter3d(\n x=x, y=y, z=z,\n mode='markers',\n marker=dict(\n size=2,\n color=np.zeros(points.shape[1]),\n colorscale=\"RdBu\",\n colorbar=dict(\n title=\"Samples\",\n x=-0.2\n ),\n )\n ))", "def plot_pointing(self, *args, coord=\"tl\", **kwargs):\n # TODO: Generalize that function\n warnings.warn(\"Deprecated function needs update.\", DeprecationWarning)\n self.__check_attributes([\"F_{}_az\".format(coord), \" F_{}_el\".format(coord)])\n return kids_plots.checkPointing(self, *args, **kwargs)", "def plot_points(x, y):\n x_one = x[y == -1, :]\n x_two = x[y == 1, :]\n\n plt.scatter(x_one[:, 1], x_one[:, 2], marker='x', color='red')\n plt.scatter(x_two[:, 1], x_two[:, 2], marker='o', color='blue')", "def plot_initial_pos(pos, p_init):\n \n\tplt.figure(figsize=(8,8))\n\tfor posi in pos:\n\t\tplt.scatter(posi[0], posi[1])\n\t\n\tplt.xlabel('a', fontsize=16)\n\tplt.ylabel('b', fontsize=16)\n\tplt.xlim(p_init[0]-0.0005,p_init[0]+0.0005)\n\tplt.ylim(p_init[1]-0.0005,p_init[1]+0.0005)\n\tplt.show()", "def visualizeTrajectory(y, g):\n visualizeObs()\n x = np.linspace(-1.5, 1.5, 13)[1:-1]\n plt.plot(np.concatenate(([-1.5],x,[1.5])), np.concatenate(([0],y,[0])), color='black', marker='+')\n if g is not None:\n for i in range(y.size):\n plt.arrow(x[i], y[i], 0, -0.5*g[i], color='blue', head_width=0.05)", "def plot3D(self, diaphragmpoints=None, lungpoints=None, fig=None, ax=None, diaphragmcolor='r', lungcolor='g', size=2, howplot=0, dots=0):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n if diaphragmpoints is not None and lungpoints is not None:\n points = diaphragmpoints + lungpoints\n elif diaphragmpoints is not None:\n points = diaphragmpoints\n elif lungpoints is not None:\n points = lungpoints\n\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(points)):\n xpts.append(points[i][0])\n ypts.append(points[i][1])\n zpts.append(points[i][2])\n\n X = np.asarray(xpts)\n Y = np.asarray(ypts)\n Z = np.asarray(zpts)\n\n if howplot == 'wireframe':\n xpts, ypts, zpts = list(), list(), list()\n for i in range(len(pts)):\n xpts.append(pts[i][0])\n ypts.append(pts[i][1])\n zpts.append(pts[i][2])\n\n X = np.asarray([xpts])\n Y = np.asarray([ypts])\n Z = np.asarray([zpts])\n\n if dots == 1:\n ax.scatter(X, Y, Z, s=size, c='r', marker='o')\n\n ax.plot_wireframe(X, Y, Z)\n elif howplot == 1:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n else:\n ax.scatter(X, Y, Z, s=size, c=diaphragmcolor, marker='o')\n ax.plot_trisurf(X, Y, Z, linewidth=0.2, antialiased=True)\n\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max()\n Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min())\n Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())\n Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())\n\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\n plt.show()\n # fig.savefig('{}/diaphragm/{}.png'.format(DIR_RESULT))", "def scatter3d(self, x, y, z, filename=None, spot_cols=None, label=False, stem=False, \n label_font_size=6, rotation=134, elevation=48, interactive=False, squish_scales=False, \n spot_size=40, **kargs):\n assert filename, \"scatter(): Must provide a filename\" \n \n xdata = self.__v[x-1]\n ydata = self.__v[y-1]\n zdata = self.__v[z-1]\n \n fig = self.__draw.getfigure(**kargs)\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elevation, azim=rotation)\n \n cols = self.cols\n if spot_cols:\n cols = spot_cols \n \n ax.scatter(xdata, ydata, zdata, edgecolors=\"none\", c=cols, s=spot_size)\n if label:\n for i, lab in enumerate(self.labels):\n ax.text(xdata[i], ydata[i], zdata[i], lab, size=label_font_size, ha=\"center\", va=\"bottom\")\n \n if stem: # stem must go after scatter for sorting. Actually, not true right? matplotlib uses zorder for that...\n z_min = min(zdata)\n for x_, y_, z_ in zip(xdata, ydata, zdata): \n line = art3d.Line3D(*list(zip((x_, y_, z_min), (x_, y_, z_))), marker=None, c=\"grey\", alpha=0.1)\n ax.add_line(line)\n \n ax.set_xlabel(\"PC%s\" % (x,)) # can be overridden via do_common_args()\n ax.set_ylabel(\"PC%s\" % (y,))\n ax.set_zlabel(\"PC%s\" % (z,))\n \n if \"logx\" in kargs and kargs[\"logx\"]:\n ax.set_xscale(\"log\", basex=kargs[\"logx\"])\n if \"logy\" in kargs and kargs[\"logy\"]:\n ax.set_yscale(\"log\", basey=kargs[\"logy\"])\n \n if squish_scales: \n # Don't worry about kargs, do_common_args will overwrite.\n ax.set_xlim([min(xdata), max(xdata)])\n ax.set_ylim([min(ydata), max(ydata)])\n ax.set_zlim([min(zdata), max(zdata)])\n \n self.__draw.do_common_args(ax, **kargs)\n if \"zlims\" in kargs:\n ax.set_zlim([kargs[\"zlim\"][0], kargs[\"zlim\"][1]])\n \n if interactive:\n fig.show() # hope you are not on a cluster!\n \n real_filename = self.__draw.savefigure(fig, filename)\n \n config.log.info(\"scatter3d(): Saved 'PC%s' vs 'PC%s' vs 'PC%s' scatter to '%s'\" % (x, y, z, real_filename))", "def phase_space_l(data, save=False):\n x, y, z, px, py, pz, t = data\n\n fig, ax = plt.subplots(1, 1, figsize=(5, 4))\n\n dz = z - np.mean(z)\n dp = pz / np.mean(pz) - 1\n colors = _cal_color(np.array([dz, dp]), True)\n\n ax.set(xlabel='dz (mm)', ylabel='dpz/pz')\n ax.scatter(dz, dp, c=colors, marker='+', label='phase space z',\n vmin=0, vmax=1, cmap='Reds')\n\n ax.axhline(0, color='k', ls=':', lw=0.5)\n ax.axvline(0, color='k', ls=':', lw=0.5)\n\n leg = ax.legend(loc=0)\n leg.legendHandles[0].set_color('r')\n x0, x1 = ax.get_xlim()\n y0, y1 = ax.get_ylim()\n ax.set_aspect((x1 - x0) / (y1 - y0))\n\n fig.tight_layout()\n\n if save:\n fig.savefig('phase_space_l.pdf', bbox_inches='tight')\n plt.show()", "def plot_1D_edp(self, start=(-10,25), end=(30,-20), N=100):\n rho = []\n x0, z0 = start\n x1, z1 = end\n xpoints = np.linspace(x0, x1, N)\n zpoints = np.linspace(z0, z1, N)\n for x, z in zip(xpoints, zpoints):\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n dist = np.sqrt((x-x0)**2 + (z-z0)**2)\n rho.append([dist, tmp.sum(axis=0)])\n rho = np.array(rho, float)\n X = rho[:,0]\n Y = rho[:,1]\n plt.figure()\n plt.plot(X, Y)", "def plot_points_simple(ax, points, paths=[], path_labels=[]):\n cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n \n ax.scatter(*points, alpha=0.1, c='k')\n# add functionality to ignore labels\n for path, label, col in zip(paths, path_labels, cols):\n path_points = np.array([points[:, u] for u in path]).transpose()\n ax.plot(*path_points, alpha=.8,label=label, c=col)\n ax.scatter(*path_points, c=col, alpha=0.6)\n \n ax.set(xlim=[0, 1], ylim=[0, 1])\n ax.set_axis_off()\n \n if path_labels:\n ax.legend()\n\n return ax", "def plot_distances(self, params=None, **kw):\n from ...ipyutils import plot_data\n if params is None:\n params = self.collocation_points()\n if 'distances' in kw:\n distances = kw.pop('distances')\n else:\n distances = self.point_distances(params=params)\n return plot_data(\n params[:-1], distances,\n **insert_missing(\n kw,\n figsize=(4, 2.4), dpi=110,\n ylog=True,\n xlabel=r\"curve parameter $\\lambda$\",\n ylabel=r\"coordinate distance\",\n )\n )", "def _get_visual_position(self, point: int) -> float:\n return point / self._param[\"n_v\"] + np.random.uniform() / \\\n self._param[\"n_v\"]", "def draw_pointcloud(ax, example):\n points = example['points'].cpu().detach().numpy()\n points_num = len(points)\n xs = np.empty([points_num])\n ys = np.empty([points_num])\n zs = np.empty([points_num])\n intensity = np.empty([len(points)])\n for j, point in enumerate(points):\n xs[j] = point[1]\n ys[j] = point[2]\n zs[j] = point[3]\n intensity[j] = point[4]\n\n intensity = intensity\n ax.scatter3D(xs, ys, zs, c=intensity, marker='.', s=0.3, cmap=plt.get_cmap('jet'))", "def printXY(self):\n print zip(self.x, self.y)", "def plot_activity(opts, points, activity, labels, plot_state=False):\n sort_ix = sort_weights(opts)\n activity[:,opts.state_size:] = activity[:,opts.state_size+sort_ix]\n\n x = np.arange(0, opts.state_size)\n # x = np.linspace(np.amin(points[:, 0]), np.amax(points[:, 0]))\n scale = 2 * np.pi / opts.state_size\n x_rad = x * scale\n cos, sin = np.cos(x_rad), np.sin(x_rad)\n if opts.velocity:\n y = np.linspace(np.amin(points[:, 1]), np.amax(points[:, 1]))\n else:\n y = np.zeros(1)\n\n x_mesh, y_mesh = np.meshgrid(x, y)\n cos, _ = np.meshgrid(cos, y)\n sin, _ = np.meshgrid(sin, y)\n if plot_state:\n nc, nr = 5, 4\n neurons = np.arange(opts.state_size) # state neurons\n else:\n nc, nr = 5, 8\n neurons = np.arange(opts.state_size, opts.rnn_size) # extra neurons\n\n\n f_linear, ax_linear = plt.subplots(ncols=nc, nrows=nr)\n # plt.suptitle('Linear Interpolated Data')\n\n c, r = 0, 0\n for i, n in enumerate(neurons):\n z_lin = griddata(points[:, :2], activity[:, n], (x_mesh, y_mesh), method='linear')\n plt.sca(ax_linear[r, c])\n # plt.title('Neuron {}'.format(n))\n plt.contourf(x, y, z_lin, cmap='RdBu_r')\n plt.axis('off')\n\n # find the global centroid\n if np.nanmax(z_lin) <= 0:\n z_lin -= np.nanmean(z_lin) # center activations at the median\n\n z_lin[np.isnan(z_lin)] = 0\n z_lin[z_lin < 0] = 0\n norm = np.sum(z_lin)\n\n cos_mean = np.sum(cos * z_lin) / norm\n sin_mean = np.sum(sin * z_lin) / norm\n com_rad = np.arctan2(sin_mean, cos_mean)\n com_x = (com_rad / scale) % 20\n com_y = np.sum(y_mesh * z_lin) / norm\n # plt.scatter(com_x, com_y, c='k')\n\n c += 1\n if c == nc:\n c = 0\n r += 1\n if r == nr:\n break\n # plt.tight_layout()\n plt.show()" ]
[ "0.62647605", "0.60552585", "0.6005312", "0.5723366", "0.57031566", "0.566268", "0.5660064", "0.56475013", "0.56423193", "0.5641629", "0.563067", "0.5624117", "0.56221503", "0.55907345", "0.556801", "0.55449003", "0.5540896", "0.5535063", "0.5534622", "0.5526678", "0.55239534", "0.5513872", "0.550216", "0.5495878", "0.54743713", "0.54531676", "0.54429543", "0.54367363", "0.54084945", "0.5406538", "0.54046977", "0.5378999", "0.53498703", "0.5349138", "0.5348523", "0.53473383", "0.53457475", "0.5343047", "0.53404", "0.5331444", "0.532555", "0.5307259", "0.5301934", "0.5297126", "0.52860904", "0.5276575", "0.5269205", "0.5264272", "0.52558064", "0.52542233", "0.52480763", "0.5247196", "0.52433354", "0.5228734", "0.5221531", "0.52186453", "0.521166", "0.5210928", "0.52097476", "0.5199962", "0.51961464", "0.51931566", "0.51877964", "0.5183999", "0.5181226", "0.51780957", "0.5176695", "0.5175619", "0.5169661", "0.51665646", "0.5164604", "0.51620805", "0.51597965", "0.51558787", "0.51522726", "0.51459354", "0.5140369", "0.5131526", "0.5130937", "0.5130845", "0.5128097", "0.51235414", "0.5116579", "0.5116484", "0.51120836", "0.5107562", "0.5104655", "0.5103442", "0.5100367", "0.5098148", "0.5092146", "0.5087752", "0.5086641", "0.5083785", "0.5078161", "0.50736564", "0.5065751", "0.50648713", "0.50610965", "0.5056216", "0.5055959" ]
0.0
-1
plot power spectrum of 2D array
def power2d(f,*kw): ft2=np.abs(np.fft.fft2(f))**2 m=f.shape k=np.meshgrid(range(m[0]),range(m[1])) k=np.sqrt(k[0]**2+k[1]**2) a=2 k0=1.0/a**0.5 k1=1.0*a**0.5 power=[] kk=[] while(k1 <= m[0]//2): kk.append((k0*k1)**0.5) w=np.where((k>k0) & (k <= k1)) power.append(ft2[w].sum()) k0=k1 k1=k1*a pl.loglog(kk,power,*kw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_spectrum( data ):\n d = data[1]\n # rfft gives positive frequecies. Square to get power spectrum.\n fp = numpy.absolute( numpy.fft.rfft( d ) )**2\n freq = numpy.fft.fftfreq( d.shape[-1] )\n n = len(fp)\n\n # reshape stuff a bit. keep only positive freqs.\n fp = fp[1:-1]\n freq = freq[1:n-1]\n lrslope = linregress( numpy.log(freq[30:]), numpy.log(fp[30:]) )[0]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.loglog( freq, fp, label=\"Lin. reg.=\"+str(round( lrslope,1 )) )\n ax.legend( loc='lower left' )\n return fig", "def plot_spectrum(self, energy_array: np.ndarray) -> None:\n spectrum, bins = np.histogram(\n energy_array,\n range=(0, 400),\n bins=3000)\n plt.plot(bins[:-1], spectrum)\n plt.xlabel(\"Energy (keV)\")\n plt.ylabel(\"Counts\")", "def show_spectrum(image: np.ndarray):\n assert len(image.shape) == 2, 'image must be 2D'\n spectral = np.fft.fft2(image)\n spectral[0, 0] = 0 # Kill DC component\n spectrum = np.fft.fftshift(spectral) # Shift DC to center\n magnitude = np.log(np.abs(spectrum))\n plt.imshow(magnitude, cmap='gray')\n plt.show()", "def plot(self):\n plot_spectrum(self.data, self.fig, self.ax_e, self.ax_s, title = \"Solar spectrum\")", "def powerSpectrum(input, nfft):\n result = fft(input, nfft)\n result = np.power(np.absolute(result), 2)\n\n # myplot(result, 'Power Spectogram')\n\n return result", "def show_spectrum(h, title=\"\"):\n H = fft2(h)\n\n # Remember to plot the abs of the fft2(h)\n plt.imshow(np.abs(H))\n plt.gray()\n plt.title(title)\n plt.show()", "def plot_spectrum(data, name, fig):\n\n det = data.detectors[name]\n val = det.tallies\n\n E = [line[0] for line in det.grids['E']]\n Emax = det.grids['E'][-1][1]\n\n dE = np.roll(E, -1) - E\n dE[-1] = Emax - E[-1]\n inte = sum(val*dE)\n val = val/inte\n\n plt.figure()\n plt.loglog(E, val)\n plt.xlabel('E [MeV]')\n plt.ylabel('Normalized flux')\n plt.grid(True)\n plt.savefig(fig + '-' + name, dpi=300, bbox_inches=\"tight\")", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def plot(self):\n\t\tself.plotOfSpect()", "def plotPower(array):\r\n ny, nx = array.shape\r\n nyc = ny/2 - 1\r\n nxc = nx/2 - 1\r\n array2, r, theta = reproject_image_into_polar(array, [nyc,nxc])\r\n # Let's average over theta\r\n array3 = np.zeros(ny)\r\n for i in range(ny):\r\n array3[i] = np.sum(array2[i,:])/np.float(nx)\r\n \r\n return array3", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def plot_spectrum(self, ax, data, stat=None, label_axes=True):\n if len(data.shape) == 1:\n data = np.reshape(data, (1, len(data)))\n\n x_pow = np.abs(data)\n if stat == None:\n if self.scale_select.currentIndex() == 0:\n ax.plot(x_pow, label='linear')\n elif self.scale_select.currentIndex() == 1:\n ax.plot(10*np.log10(x_pow), label='decibels')\n elif self.scale_select.currentIndex() == 2:\n ax.plot(np.var(x_pow, axis=0), label='variance')\n elif self.scale_select.currentIndex() == 3:\n ax.plot(skew(x_pow, axis=0), label='skew')\n elif self.scale_select.currentIndex() == 4:\n ax.plot(kurtosis(x_pow, axis=0), label='kurtosis')\n else:\n \n if self.scale_select.currentIndex() == 1:\n x_pow = 10*np.log10(x_pow)\n if stat == 'median' or stat == 'med':\n ax.plot(np.median(x_pow, axis=0), label='median')\n if stat == 'min':\n ax.plot(np.min(x_pow, axis=0), label='min')\n if stat == 'max':\n ax.plot(np.max(x_pow, axis=0), label='max')\n \n plt.minorticks_on()\n if label_axes:\n self.updateFreqAxis(ax, n_ticks=10)\n plt.xlabel(\"Frequency\")\n plt.ylabel(\"Amplitude\")\n plt.legend()", "def plot_spectrum(wavetable: np.ndarray) -> None:\n ps = np.abs(np.fft.fft(wavetable)) ** 2\n\n time_step = 1 / 44100\n freqs = np.fft.fftfreq(wavetable.size, time_step)\n idx = np.argsort(freqs)\n\n plt.plot(freqs[idx], ps[idx])\n plt.show()", "def make_spectrum_figure(f):\n nb_freqs = int(f.readline().split()[0])\n freqs = read_list(f, nb_freqs)\n fluxes = read_list(f, nb_freqs)\n plot_spectrum(freqs, fluxes)\n plt.show()", "def plot_spectrum(freqs, fluxes, min_lambda=3700, max_lambda=8000):\n plt.plot(freqs, fluxes)\n plt.xlim((min_lambda, max_lambda))\n plt.xlabel(r'$\\lambda\\, (\\AA)$', size=16)\n plt.ylabel(r'$Flux$', size=16)\n #plt.axes().minorticks_on()", "def plot_spectrum(self,plot_id):\n self.rt_plot.plot_sum(plot_id)", "def plot_clip(self, spectrum, amplitude, peaks):\n freq, amp, noise = zip(*sorted(peaks, reverse=True, key=lambda x: x[0]))\n probability_distribution = self.get_full_distribution(spectrum, peaks)\n \n plot.plot(\n spectrum, amplitude, 'r',\n freq, noise, 'b',\n freq, amp, 'g^',\n spectrum, probability_distribution, 'g'\n )\n\n plot.savefig('./frequency_spectrum.pdf')\n plot.show()", "def fft_viz(image: np.ndarray, shift: bool = True) -> None:\n plt.imshow(img_fft(image, shift=shift), cmap='gray')", "def plot_spectrum(self, sp1d, title=None, color='k'):\n if title is not None:\n self.set_title(title)\n self.set_axis_label(''.join(['Wavelength [', sp1d.wunit.name, ']']),\n 'x')\n self.set_axis_label('Counts', 'y')\n self.axplot.plot(sp1d.wlen, sp1d.counts, color)\n self.axplot.axis('tight')\n self.fig.canvas.draw()\n return", "def specPlot(image, qplot=True):\n # Load image, get dimensions\n im = readImage(image)\n L, W = im.shape\n xmin, xmax = (-W//2, W//2)\n ymin, ymax = (-L//2, L//2)\n xrng = range(xmin, xmax)\n yrng = range(ymin, ymax)\n\n # Calculate power spectrum\n spec = np.abs(fftshift(fft2(im)))**2\n\n # Calculate rotational average of spectrum\n [fx, fy] = np.meshgrid(xrng, yrng)\n sf = (np.sqrt(fx**2 + fy**2)).round().astype(int).flatten()\n rot_spec = np.bincount(sf, weights=spec.flatten()) / np.bincount(sf)\n rot_spec = rot_spec[1:min(L,W)//2]\n\n # Make a plot if requested\n if qplot:\n fig1, ax1 = plt.subplots()\n h = ax1.imshow(np.log10(spec), extent=[xmin, xmax, ymin, ymax])\n ax1.axis('off')\n cb = fig1.colorbar(h)\n cb.set_label(r'$\\log_{10}$(Energy)')\n\n fig2, ax2 = plt.subplots()\n ax2.loglog(np.arange(1, len(rot_spec)+1), rot_spec)\n ax2.set_xlabel('Spatial frequency (cycles/image)')\n ax2.set_ylabel('Energy')\n\n # Return\n if qplot:\n return spec, rot_spec, (fig1, ax1), (fig2, ax2)\n else:\n return spec, rot_spec", "def plot_spectrum(P, neigs=30, **kwargs):\n\t\n\tPP = sage_sparse_to_scipy_sparse(P)\n\tif neigs == 0:\n\t\tL, V = np.linalg.eig(PP.todense())\n\telse:\n\t\tL, V = eigs(PP, neigs)\n\n\treturn scatter_plot([(real(x), imag(x)) for x in L], **kwargs)", "def plotSpectrum(y,Fs):\n n = len(y) # length of the signal\n k = arange(n)\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(n/2)] # one side frequency range\n\n Y = fft(y)/n # fft computing and normalization\n Y = Y[range(n/2)]\n \n plt.plot(frq,abs(Y),'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plotPowerSpectrum(self,l_edges,show_angle=True,angle_unit=u.arcmin,fig=None,ax=None,**kwargs):\n\n\t\tif not matplotlib:\n\t\t\traise ImportError(\"matplotlib is not installed, cannot plot the power spectrum!\")\n\n\t\t#Instantiate figure\n\t\tif (fig is None) or (ax is None):\n\t\t\t\n\t\t\tself.fig,self.ax = plt.subplots()\n\n\t\telse:\n\n\t\t\tself.fig = fig\n\t\t\tself.ax = ax\n\n\t\t#Calculate the power spectrum\n\t\tl,Pl = self.powerSpectrum(l_edges)\n\n\t\t#Plot\n\t\tself.ax.plot(l,l*(l+1.)*Pl/(2*np.pi),**kwargs)\n\t\tself.ax.set_xscale(\"log\")\n\t\tself.ax.set_yscale(\"log\")\n\n\t\t#Upper scale shows the angle\n\t\tif show_angle:\n\t\t\tax_top = self.ax.twiny()\n\t\t\tax_top.set_xticks(self.ax.get_xticks())\n\t\t\tax_top.set_xlim(self.ax.get_xlim())\n\t\t\tax_top.set_xticklabels([\"{0:.2f}\".format(((2*np.pi/ell)*u.rad).to(angle_unit).value) for ell in self.ax.get_xticks()])\n\t\t\tax_top.set_xlabel(r\"$\\theta$({0})\".format(angle_unit.to_string()),fontsize=22)\n\n\t\t#Labels\n\t\tself.ax.set_xlabel(r\"$\\ell$\",fontsize=22)\n\t\tself.ax.set_ylabel(r\"$\\ell(\\ell+1)P_\\ell^{\\kappa\\kappa}/2\\pi$\",fontsize=22)", "def spectrum(ind, power, epower=None, ind_fit=None, power_fit=None,\n fit_label=\"Fit\", show=True, savepath=None):\n plt.errorbar(ind, power, yerr=epower, fmt='k.', capsize=2, alpha=0.2,\n zorder=0, label=\"WMAP data\")\n if power_fit is not None and ind_fit is not None:\n plt.plot(ind_fit, power_fit, 'r-', label=fit_label, zorder=1)\n plt.legend()\n if savepath is not None:\n plt.savefig(savepath)\n if show:\n plt.show()\n else:\n plt.close()", "def plot_spectrum(spectrum):\n plt.figure()\n \n x = spectrum.wavelengths\n y = spectrum.fluxes\n y_err = spectrum.flux_errors\n \n plt.errorbar(x,y,yerr=y_err)\n \n plt.savefig(\"spec_figure.png\")\n plt.close()", "def plot_channels(self, data_array):\n\n plt.figure()\n for p in range(1, 7):\n plt.subplot(6, 1, p)\n plt.plot(data_array[p-1, :])\n\n plt.draw()\n plt.show()\n return", "def spectrum(syst, x, y=None, params=None, mask=None, file=None,\n show=True, dpi=None, fig_size=None, ax=None):\n\n if not mpl_available:\n raise RuntimeError(\"matplotlib was not found, but is required \"\n \"for plot_spectrum()\")\n if y is not None and not has3d:\n raise RuntimeError(\"Installed matplotlib does not support 3d plotting\")\n\n if isinstance(syst, system.FiniteSystem):\n def ham(**kwargs):\n return syst.hamiltonian_submatrix(params=kwargs, sparse=False)\n elif callable(syst):\n ham = syst\n else:\n raise TypeError(\"Expected 'syst' to be a finite Kwant system \"\n \"or a function.\")\n\n params = params or dict()\n keys = (x[0],) if y is None else (x[0], y[0])\n array_values = (x[1],) if y is None else (x[1], y[1])\n\n # calculate spectrum on the grid of points\n spectrum = []\n bound_ham = functools.partial(ham, **params)\n for point in itertools.product(*array_values):\n p = dict(zip(keys, point))\n if mask and mask(**p):\n spectrum.append(None)\n else:\n h_p = np.atleast_2d(bound_ham(**p))\n spectrum.append(np.linalg.eigvalsh(h_p))\n # massage masked grid points into a list of NaNs of the appropriate length\n n_eigvals = len(next(filter(lambda s: s is not None, spectrum)))\n nan_list = [np.nan] * n_eigvals\n spectrum = [nan_list if s is None else s for s in spectrum]\n # make into a numpy array and reshape\n new_shape = [len(v) for v in array_values] + [-1]\n spectrum = np.array(spectrum).reshape(new_shape)\n\n # set up axes\n if ax is None:\n fig = _make_figure(dpi, fig_size, use_pyplot=(file is None))\n if y is None:\n ax = fig.add_subplot(1, 1, 1)\n else:\n warnings.filterwarnings('ignore',\n message=r'.*mouse rotation disabled.*')\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n warnings.resetwarnings()\n ax.set_xlabel(keys[0])\n if y is None:\n ax.set_ylabel('Energy')\n else:\n ax.set_ylabel(keys[1])\n ax.set_zlabel('Energy')\n ax.set_title(', '.join('{} = {}'.format(*kv) for kv in params.items()))\n else:\n fig = None\n\n # actually do the plot\n if y is None:\n ax.plot(array_values[0], spectrum)\n else:\n if not hasattr(ax, 'plot_surface'):\n msg = (\"When providing an axis for plotting over a 2D domain the \"\n \"axis should be created with 'projection=\\\"3d\\\"\")\n raise TypeError(msg)\n # plot_surface cannot directly handle rank-3 values, so we\n # explicitly loop over the last axis\n grid = np.meshgrid(*array_values)\n for i in range(spectrum.shape[-1]):\n spec = spectrum[:, :, i].transpose() # row-major to x-y ordering\n ax.plot_surface(*(grid + [spec]), cstride=1, rstride=1)\n\n _maybe_output_fig(fig, file=file, show=show)\n\n return fig", "def plot_1d_spectrum(data_vec, freq, title, **kwargs):\n if len(data_vec) != len(freq):\n raise ValueError('Incompatible data sizes! spectrum: '\n + str(len(data_vec)) + ', frequency: ' + str(freq.shape))\n freq *= 1E-3 # to kHz\n\n title = title + ': mean UDVS, mean spatial response'\n fig, axes = plot_complex_spectra(np.expand_dims(data_vec, axis=0), freq, title=title,\n subtitle_prefix='', num_comps=1, x_label='Frequency (kHz)',\n figsize=(5, 3), amp_units='V', **kwargs)\n return fig, axes", "def new_spectrum(self):\n self.expose()\n self.read_data()\n self.plot()", "def plotSpectrum(self,wavelengths,intensities = 1.0):\n\n fieldAngle,spectralOutput = self.getIntensitySpectum(wavelengths,intensities)\n\n # Do the actual plot\n plot(np.degrees(fieldAngle),spectralOutput)\n grid()\n title(\"Spectral plot\")\n xlabel(\"Angle in degrees\")\n ylabel(\"Intensty\")", "def spectrum(da, config, index=0, channel=20):\n # Look for the data in the matrix\n data = da[channel, :, index]\n\n plt.plot(config['wave'], data)\n plt.title('Spectrum measured by MUDIS', fontsize=14)\n plt.xlabel('Wavelength[nm]', fontsize=13)\n plt.ylabel('Radiance[counts]', fontsize=13)\n plt.xticks(size=12)\n plt.yticks(size=12)\n plt.show()", "def plot_wavefunctions(self, num_levels=10):\n for ind, psi in enumerate(self.psis(num_levels)):\n plot(self.x, psi * sign(psi[1] - psi[0]), label=\"$\\psi_%d$\" % ind)", "def plot_intensity_prop(stack, wavelengths_arr, colors_arr):\n\n # for the electric fields profile\n for i, wl in enumerate(wavelengths_arr):\n electric_tot_te, electric_tot_tm, reflectivity_te, reflectivity_tm, transmission_te, transmission_tm, index_tot, L_tot, theta_tot = transfer_matrix_method(\n stack, 1, 0, wl, 0)\n intensity = np.abs(electric_tot_te[::-1]) ** 2\n plt.plot(L_tot * 1e6, intensity / max(intensity) * 2, color=colors_arr[i])\n # for the indexes profile\n ax.plot(L_tot * 1e6, index_tot[::-1], color='black')\n ax.fill_between(L_tot * 1e6, index_tot[::-1], color='azure')", "def band_plot(N=400,a=1.0):\n foot_step=2*np.pi/N\n x=np.arange(0.0,2*np.pi/a,foot_step)\n y=band_energy(x)\n plt.plot(x,y)", "def plotSpect(spec, sr):\r\n fig, ax = plt.subplots()\r\n img = librosa.display.specshow(spec, x_axis='time', y_axis='mel', sr=sr, fmax=8000, ax=ax) \r\n fig.colorbar(img, ax=ax, format='%+2.0f dB') \r\n ax.set(title='Mel-frequency spectrogram')", "def plotSpectrum(inp,xrng=[],yrng=[],xlabel='',ylabel='',xlog=False,ylog=False,grid=False,\n legend=[],legend_location='upper right',fontscale=1,legend_fontscale=1,title='',\n color='k',colormap=None,linestyle='-',linewidth=1.5,alpha=1.,\n show_noise=True,color_noise='k',linestyle_noise='-',linewidth_noise=1.5,alpha_noise=0.5,\n comparison=None,color_comparison='grey',linestyle_comparison='-',linewidth_comparison=1.5,alpha_comparison=1,\n residual=False,color_residual='m',linestyle_residual='-',linewidth_residual=1.5,alpha_residual=0.5,\n telluric=False,color_telluric='grey',linestyle_telluric='-',linewidth_telluric=1.5,alpha_telluric=0.2,\n features=[],mdwarf=False,ldwarf=False,tdwarf=False,young=False,binary=False,nsamples=100,\n band=[],band_color='k',band_alpha=0.2,band_label='',band_label_position='bottom',band_width=0.1,\n show_zero=True,stack=0.,zeropoint=0.,color_zero='k',linestyle_zero=':',linewidth_zero=1.5,alpha_zero=0.3,\n inset=False,inset_xrange=[],inset_yrange=[],inset_position=[0.65,0.60,0.20,0.20],inset_features=False,\n output='',multiplot=False,multipage=False,layout=[1,1],figsize=[],tight=True,\n interactive=False,**kwargs):\n\n# keyword parameters (for backward compatability)\n for k in ['showZero','showzero']: show_zero=kwargs.get(k,show_zero)\n for k in ['showNoise','noise','uncertainty','shownoise','showuncertainty','show_uncertainty']: show_noise=kwargs.get(k,show_noise)\n\n for k in ['line_style','lineStyle','ls','linestyles','line_styles']: linestyle=kwargs.get(k,linestyle)\n for k in ['line_width','lineWidth','width','lw','linewidths','line_widths']: linewidth=kwargs.get(k,linewidth)\n for k in ['colors','colour','colours']: color=kwargs.get(k,color)\n for k in ['colorScheme','color_scheme','colorscheme','colorMap','color_map']: colormap=kwargs.get(k,colormap)\n\n for k in ['colornoise','colorNoise','colorUnc','coloruncertainty','color_uncertainty','colorUncertainty']: color_noise=kwargs.get(k,color_noise)\n for k in ['linestylenoise','line_style_noise','linestyleNoise']: linestyle_noise=kwargs.get(k,linestyle_noise)\n for k in ['linewidthnoise','linewidthNoise','line_width_noise']: linewidth_noise=kwargs.get(k,linewidth_noise)\n for k in ['alphanoise','alphaNoise']: alpha_noise=kwargs.get(k,alpha_noise)\n\n for k in ['colorzero','colorZero']: color_zero=kwargs.get(k,color_zero)\n for k in ['linestylezero','line_style_zero','linestyleZero']: linestyle_zero=kwargs.get(k,linestyle_zero)\n for k in ['linewidthzero','linewidthZero','line_width_zero']: linewidth_zero=kwargs.get(k,linewidth_zero)\n for k in ['alphazero','alphaZero']: alpha_zero=kwargs.get(k,alpha_zero)\n\n for k in ['colorcomparison','colorComparison']: color_comparison=kwargs.get(k,color_comparison)\n for k in ['linestyleComparison','line_style_comparison','linestylecomparison']: linestyle_comparison=kwargs.get(k,linestyle_comparison)\n for k in ['linewidthcomparison','linewidthComparison','line_width_comparison']: linewidth_comparison=kwargs.get(k,linewidth_comparison)\n for k in ['alphacomparison','alphaComparison']: alpha_comparison=kwargs.get(k,alpha_comparison)\n\n for k in ['colorresidual','colorResidual']: color_residual=kwargs.get(k,color_residual)\n for k in ['linestyleresidual','line_style_residual','linestyleResidual']: linestyle_residual=kwargs.get(k,linestyle_residual)\n for k in ['linewidthresidual','linewidthResidual','line_width_residual']: linewidth_residual=kwargs.get(k,linewidth_residual)\n for k in ['alpharesidual','alphaResidual']: alpha_residual=kwargs.get(k,alpha_residual)\n\n for k in ['bands']: band=kwargs.get(k,band)\n if len(band) == 2 and isinstance(band[0],list) == False: band = [band]\n for k in ['bandcolors','bandcolor','band_colors']: band_color=kwargs.get(k,band_color)\n for k in ['bandalphas','band_alphas','bandalpha']: band_alpha=kwargs.get(k,band_alpha)\n for k in ['band_labels','bandlabel','bandlabels']: band_label=kwargs.get(k,band_label)\n for k in ['band_label_positions','bandlabelposition','bandlabelpositions']: band_label_position=kwargs.get(k,band_label_position)\n for k in ['bandwidth','bandwidths','band_widths']: band_width=kwargs.get(k,band_width)\n for par in [band_color,band_alpha,band_label,band_label_position,band_width]:\n if not isinstance(par,list): par = [par]*len(band)\n if len(par) < len(band): par.extend([par[-1] for x in range(len(band)-len(par))])\n\n for k in ['legends','label','labels']: legend=kwargs.get(k,legend)\n if not isinstance(legend,list): legend = [legend]\n for k in ['legendfontscale','legendFontscale']: legend_fontscale=kwargs.get(k,legend_fontscale)\n legend_fontscale=legend_fontscale*fontscale\n for k in ['legendLocation','legendlocation','labelLocation','labellocation','label_location']: legend_location=kwargs.get(k,legend_location)\n\n for k in ['xrange','x_range','wave_range','wrange','wrng']: xrng=kwargs.get(k,xrng)\n if not isinstance(xrng,list): xrng = [xrng]\n for k in ['yrange','y_range','flux_range','frange','frng']: yrng=kwargs.get(k,yrng)\n if not isinstance(yrng,list): yrng = [yrng]\n\n for k in ['multilayout','multiLayout','multi_layout']: layout=kwargs.get(k,layout)\n for k in ['file','filename']: output=kwargs.get(k,output)\n if not isinstance(output,str): output=''\n filetype = '.pdf'\n if output!='': filetype=output.split('.')[-1]\n\n if comparison != None and isinstance(comparison,splat.Spectrum) == False and isinstance(comparison,list) == False: \n print('plotSpectrum() Warning: comparison spectrum should be a splat Spectrum object, you passed {}'.format(comparison))\n comparison = None\n\n# some plotting constants\n xlabel_default = 'Wavelength'\n ylabel_deafult = 'Flux'\n\n# telluric bands in micron\n telluric_bands = [[1.1,1.2]*u.micron,[1.3,1.5]*u.micron,[1.75,2.0]*u.micron]\n\n# assign features by group\n if not isinstance(features,list): features = [features]\n if ldwarf==True or mdwarf==True: features.extend(['k','na','feh','tio','co','h2o','h2'])\n if tdwarf==True: features.extend(['k','ch4','h2o','h2'])\n if young==True: features.extend(['vo'])\n if binary==True: features.extend(['sb'])\n\n# clean repeats in features while maintaining order - set does not do this\n if len(features)>0:\n fea = []\n for i in features:\n if i not in fea: fea.append(i)\n features = fea\n\n\n# if a list is passed, use this list\n splist = copy.deepcopy(inp)\n if isinstance(splist,list) == False: splist = [splist]\n \n# set up for multiplot\n if len(splist) == 1: multiplot = False\n \n# array of lists => force multiplot\n elif len(splist) > 1 and isinstance(splist[0],list) == True: multiplot = True\n else: pass\n\n# reformat array of spectra of multiplot is used (i.e., user forgot to set)\n if multiplot == True and isinstance(splist[0],splat.Spectrum):\n splist = [[s] for s in splist]\n\n elif multiplot == False and isinstance(splist[0],splat.Spectrum):\n splist = [splist]\n \n# flatten array if multiplot is not set\n elif multiplot == False and isinstance(splist[0],list) and len(splist) > 1:\n splist = [[item for sublist in splist for item in sublist]] # flatten\n else: pass\n\n# total number of spectra - use to assign default legends\n allsps = [item for sublist in splist for item in sublist] # Total number of spectra\n if len(legend) == 0: legend=[sp.name for sp in allsps]\n if len(legend) < len(allsps):\n legend.extend([allsps[i].name for i in range(len(legend),len(allsps)-len(legend))])\n \n\n# now run a loop through the input subarrays\n plt.close('all')\n\n# set up here for multiple file output\n nplot = 1\n if multipage == True or multiplot == True:\n nplot = layout[0]*layout[1]\n numpages = int(len(splist) / nplot) + 1\n if (len(splist) % nplot == 0):\n numpages -= 1\n fig = []\n \n if multipage == True and filetype.lower() == 'pdf':\n pdf_pages = PdfPages(output)\n \n if multipage == False:\n if len(splist) > 1:\n filebase = output.replace('.{}'.format(filetype),'')\n files = [filebase+'{}.'.format(i+1)+filetype for i in numpy.arange(len(splist))]\n else:\n files = [output]\n\n pg_n = 0 # page counter\n plt_n = 0 # plot per page counter\n lg_n = 0 # legend per plot counter\n\n for plts,sp in enumerate(splist):\n# set specific plot parameters\n if not isinstance(sp[0],splat.Spectrum):\n raise ValueError('\\nInput to plotSpectrum has wrong format:\\n\\n{}\\n\\n'.format(sp[0]))\n\n# set up plotting defaults for the list of spectra - REPLACE THIS\n if not isinstance(zeropoint,list): zeropoint = [zeropoint]*len(sp)\n if len(zeropoint) < len(sp): zeropoint.extend([zeropoint[-1] for x in range(len(sp)-len(zeropoint))])\n if not isinstance(color,list): color = [color]*len(sp)\n if len(color) < len(sp): color.extend([color[-1] for x in range(len(sp)-len(color))])\n if not isinstance(linestyle,list): linestyle = [linestyle]*len(sp)\n if len(linestyle) < len(sp): linestyle.extend([linestyle[-1] for x in range(len(sp)-len(linestyle))])\n if not isinstance(linewidth,list): linewidth = [linewidth]*len(sp)\n if len(linewidth) < len(sp): linewidth.extend([linewidth[-1] for x in range(len(sp)-len(linewidth))])\n if not isinstance(alpha,list): alpha = [alpha]*len(sp)\n if len(alpha) < len(sp): alpha.extend([alpha[-1] for x in range(len(sp)-len(alpha))])\n if not isinstance(color_noise,list): color_noise = [color_noise]*len(sp)\n if len(color_noise) < len(sp): color_noise.extend([color_noise[-1] for x in range(len(sp)-len(color_noise))])\n if not isinstance(linestyle_noise,list): linestyle_noise = [linestyle_noise]*len(sp)\n if len(linestyle_noise) < len(sp): linestyle_noise.extend([linestyle_noise[-1] for x in range(len(sp)-len(linestyle_noise))])\n if not isinstance(linewidth_noise,list): linewidth_noise = [linewidth_noise]*len(sp)\n if len(linewidth_noise) < len(sp): linewidth_noise.extend([linewidth_noise[-1] for x in range(len(sp)-len(linewidth_noise))])\n if not isinstance(alpha_noise,list): alpha_noise = [alpha_noise]*len(sp)\n if len(alpha_noise) < len(sp): alpha_noise.extend([alpha_noise[-1] for x in range(len(sp)-len(color_noise))])\n if not isinstance(color_comparison,list): color_comparison = [color_comparison]*len(sp)\n if len(color_comparison) < len(sp): color_comparison.extend([color_comparison[-1] for x in range(len(sp)-len(color_comparison))])\n if not isinstance(linestyle_comparison,list): linestyle_comparison = [linestyle_comparison]*len(sp)\n if len(linestyle_comparison) < len(sp): linestyle_comparison.extend([linestyle_comparison[-1] for x in range(len(sp)-len(linestyle_comparison))])\n if not isinstance(linewidth_comparison,list): linewidth_comparison = [linewidth_comparison]*len(sp)\n if len(linewidth_comparison) < len(sp): linewidth_comparison.extend([linewidth_comparison[-1] for x in range(len(sp)-len(linewidth_comparison))])\n if not isinstance(alpha_comparison,list): alpha_comparison = [alpha_comparison]*len(sp)\n if len(alpha_comparison) < len(sp): alpha_comparison.extend([alpha_comparison[-1] for x in range(len(sp)-len(alpha_comparison))])\n\n# settings that work if the spectrum was read in as legitmate Spectrum object\n try:\n xlabel = kwargs.get('xlabel','{} ({})'.format(sp[0].wave_label,sp[0].wave.unit))\n ylabel = kwargs.get('ylabel','{} ({})'.format(sp[0].flux_label,sp[0].flux.unit))\n except:\n xlabel = kwargs.get('xlabel',xlabel_default)\n ylabel = kwargs.get('ylabel',ylabel_default)\n# initial plot range\n bound = [numpy.nanmin(sp[0].wave.value),numpy.nanmax(sp[0].wave.value)]\n ymax = [numpy.nanquantile(s.flux.value,0.98) for s in sp]\n bound.extend(numpy.array([-0.02,1.3])*numpy.nanmax(ymax)+\\\n numpy.array([numpy.nanmin(zeropoint),numpy.nanmax(zeropoint)+stack*(len(sp)-1)]))\n\n# set colormap if provided\n if colormap != None:\n values = numpy.arange(len(sp))\n color_map = plt.get_cmap(colormap)\n norm = colmap.Normalize(vmin=0, vmax=1.0*values[-1])\n scalarMap = cm.ScalarMappable(norm=norm, cmap=color_map)\n for i in range(len(sp)): color[i] = scalarMap.to_rgba(values[i])\n\n# GENERATE PLOTS\n if multiplot == True or multipage == True:\n plt_n = plts % nplot\n if (plt_n == 0):\n fig.append(plt.figure())\n pg_n += 1\n ax = fig[pg_n-1].add_subplot(layout[0], layout[1], plt_n+1)\n \n# plotting a single plot with all spectra\n else:\n plt.close('all')\n plt_n = 0\n fig = []\n if len(figsize)>0: fig.append(plt.figure(figsize=figsize))\n else: fig.append(plt.figure())\n ax = fig[0].add_subplot(111)\n \n for ii, a in enumerate(sp):\n# zeropoint and stack\n flx = [i+zeropoint[ii] for i in a.flux.value]\n if stack > 0: flx = [f + (len(sp)-ii-1)*stack for f in flx]\n ax.plot(a.wave.value,flx,color=color[ii],linestyle=linestyle[ii], lw=linewidth[ii], alpha=alpha[ii], zorder = 10, label = legend[lg_n]) \n\n# add comparison\n if comparison != None:\n# zeropoint and stack\n cflx = [i+zeropoint[ii] for i in comparison.flux.value]\n if stack > 0: cflx = [f + (len(sp)-ii-1)*stack for f in cflx]\n ax.plot(comparison.wave.value,cflx,color=color_comparison[ii],linestyle=linestyle_comparison[ii], lw=linewidth_comparison[ii], alpha=alpha_comparison[ii], zorder = 10)\n \n# add residual\n if residual == True and len(sp) == 2:\n # Save flux values from first spectrum\n if ii == 0:\n flx0 = [f - (len(sp)-ii-1)*stack for f in flx]\n \n # Subtract fluxes and plot\n elif ii == 1:\n res = [flx0[f_n] - f for f_n, f in enumerate(flx)]\n ax.plot(a.wave.value, res, alpha = alpha_residual[ii], color = color_residual[ii], linsetyle=linestyle_residual[ii], lw=linewidth_residual[ii])\n \n # Fix bound[2] if residual goes below 0\n if numpy.nanmin(res) < bound[2]:\n b0 = numpy.argmin(a.wave.value[a.wave.value > bound[0]])\n b1 = numpy.argmax(a.wave.value[a.wave.value < bound[1]])\n bound[2] = numpy.nanmin(res[b0:b1])\n\n# noise\n if show_noise == True:\n ns = [i+zeropoint[ii] for i in a.noise.value]\n ax.plot(a.wave.value,ns,color=color_noise[ii],linestyle=linestyle_noise[ii],alpha=alpha_noise[ii], lw=linewidth_noise[ii], zorder = 10)\n\n# zeropoint\n if show_zero == True:\n ze = numpy.ones(len(a.flux))*zeropoint[ii]\n ax.plot(a.wave.value,ze,color=color[ii],linestyle=linestyle_zero,alpha=alpha_zero,lw=linewidth_zero, zorder = 10)\n\n# save maximum flux among all spectra for plotting\n# THIS IS VERY SLOW AND IT WOULD BE BETTER TO FIND AN ALTERNATE APPROACH\n if len(features)>0:\n f = interp1d(a.wave,flx,bounds_error=False,fill_value=0.)\n if ii == 0: \n wvmax = numpy.linspace(bound[0],bound[1],nsamples)\n flxmax = numpy.array(f(wvmax))\n else: flxmax = numpy.maximum(flxmax,numpy.array(f(wvmax)))\n\n# legend counter\n lg_n = lg_n + 1 # Increment legend\n\n\n# label features\n# THIS NEEDS TO BE FIXED WITH GRETEL'S STUFF\n if len(features) > 0:\n yoff = 0.02*(bound[3]-bound[2]) # label offset\n fontsize = int((10-numpy.nanmin([(layout[0]*layout[1]-1),6]))*fontscale)\n for ftr in features:\n ftr = ftr.lower()\n if ftr in FEATURE_LABELS:\n ftrc = checkDict(ftr,FEATURE_LABELS)\n if ftrc != False:\n for ii,waveRng in enumerate(FEATURE_LABELS[ftrc]['wavelengths']):\n wRng = waveRng.to(sp[0].wave.unit).value\n# features must be contained in plot range (may change this)\n if numpy.nanmin(wRng) > bound[0] and numpy.nanmax(wRng) < bound[1]:\n wfeature = numpy.where(numpy.logical_and(wvmax >= numpy.nanmin(wRng),wvmax <= numpy.nanmax(wRng)))\n if len(wvmax[wfeature]) == 0: wfeature = numpy.argmax(numpy.absolute(wvmax-numpy.nanmedian(wRng)))\n y = numpy.nanmax(flxmax[wfeature])+yoff\n flxmax[wfeature] = flxmax[wfeature]+3.*yoff\n\n if FEATURE_LABELS[ftrc]['type'] == 'band':\n ax.plot(wRng,[y+yoff]*2,color='k',linestyle='-')\n ax.plot([wRng[0]]*2,[y,y+yoff],color='k',linestyle='-')\n ax.text(numpy.mean(wRng),y+1.5*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=fontsize)\n else:\n for w in wRng: ax.plot([w]*2,[y,y+yoff],color='k',linestyle='-')\n ax.text(numpy.mean(wRng),y+1.5*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=fontsize)\n bound[3] = numpy.nanmax([numpy.nanmax(flxmax)+2.*yoff,bound[3]])\n\n# add grid\n if grid == True: ax.grid() \n\n# axis labels \n fontsize = (numpy.round(numpy.max([13./((layout[0]*layout[1])**0.33),5]))) * fontscale\n legend_fontsize = (13-numpy.min([(layout[0]*layout[1]-1),8])) * legend_fontscale\n ax.set_xlabel(xlabel, fontsize = fontsize)\n ax.set_ylabel(ylabel, fontsize = fontsize)\n ax.tick_params(axis='x', labelsize=fontsize)\n ax.tick_params(axis='y', labelsize=fontsize)\n\n# add title\n if title!='': ax.set_title(title)\n\n# log scale?\n if kwargs.get('xlog',False): ax.set_xscale('log',nonposx='clip')\n if kwargs.get('ylog',False): ax.set_yscale('log',nonposy='clip')\n\n# place legend\n if len(legend) > 0:\n if legend_location == 'outside':\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.15, box.width * 0.7, box.height * 0.7])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':legend_fontsize})\n else:\n ax.legend(loc=legend_location, prop={'size':legend_fontsize})\n bound[3] = bound[3]+0.1*(bound[3]-bound[2]) # extend axis for in-plot legends\n\n# overplot telluric absorption\n if telluric == True:\n yoff = 0.02*(bound[3]-bound[2]) # label offset\n for waveRng in telluric_bands:\n wR = waveRng.to(sp[0].wave.unit).value\n rect = patches.Rectangle((wR[0],bound[2]),wR[1]-wR[0],bound[3]-bound[2],facecolor=color_telluric,alpha=alpha_telluric,color=color_telluric)\n ax.add_patch(rect)\n ax.text(numpy.mean(wR),bound[2]+3*yoff,r'$\\oplus$',horizontalalignment='center',fontsize=fontsize)\n\n# overplot color swaths for pre-specified bands\n if len(band) > 0:\n for i,b in enumerate(band):\n if not isinstance(b,list): \n try: b = [float(b)-0.5*band_width,float(b)+0.5*band_width]\n except:\n print('\\nWarning: plotSpectrum bands variables should be array of 2-element arrays; you passed {}'.format(band))\n b = [0.,0.]\n rect = patches.Rectangle((b[0],bound[2]),b[1]-b[0],bound[3]-bound[2],facecolor=band_color[i],color=band_color[i],alpha=band_alpha[i])\n ax.add_patch(rect)\n if band_label_position[i].lower() == 'top':\n ax.text(numpy.mean(b),bound[3]-3*yoff,band_label[i],horizontalalignment='center',fontsize=fontsize)\n elif band_label_position[i].lower() == 'middle':\n ax.text(numpy.mean(b),0.5*(bound[2]+bound[3]),band_label[i],horizontalalignment='center',fontsize=fontsize)\n else:\n ax.text(numpy.mean(b),bound[2]+3*yoff,band_label[i],horizontalalignment='center',fontsize=fontsize)\n\n# place inset - RIGHT NOW ONLY SETTING LIMITS WITH FIRST SPECTRUM IN LIST\n if inset == True and len(inset_xrange) == 2:\n ax_inset = fig[pg_n-1].add_axes(inset_position) #, axisbg='white')\n bound2 = inset_xrange\n if len(inset_yrange) == 0:\n b0 = numpy.argmax(sp[0].wave.value > bound2[0])\n b1 = numpy.argmin(sp[0].wave.value < bound2[1])\n inset_yrange = [numpy.nanmin(sp[0].flux.value[b0:b1]),numpy.nanmax(sp[0].flux.value[b0:b1])]\n bound2.extend(inset_yrange)\n db = (bound2[3]-bound2[2])\n bound2[2] = bound2[2]-0.05*db\n bound2[3] = bound2[3]+0.05*db\n ax_inset.axis(bound2)\n inset_fontsize = fontsize*0.7\n\n for ii,a in enumerate(sp):\n flx = [i+zeropoint[ii] for i in a.flux.value]\n ax_inset.plot(a.wave.value,flx,color=colors[ii],linestyle=linestyle[ii],linewidth=linewidth[ii],alpha=alpha[ii]) \n ax_inset.set_xlabel('')\n ax_inset.set_ylabel('')\n ax_inset.tick_params(axis='x', labelsize=inset_fontsize)\n ax_inset.tick_params(axis='y', labelsize=inset_fontsize)\n# ax_inset.legend()\n\n# inset feature labels\n if len(inset_features) > 0:\n yoff = 0.05*(bound2[3]-bound2[2])\n for ftr in inset_features:\n ftrc = checkDict(ftr,FEATURE_LABELS)\n if ftrc != False:\n for ii,waveRng in enumerate(FEATURE_LABELS[ftrc]['wavelengths']):\n wRng = waveRng.to(sp[0].wave.unit).value\n if (numpy.min(wRng) > bound2[0] and numpy.max(wRng) < bound2[1]):\n wfeature = numpy.where(numpy.logical_and(wvmax >= numpy.nanmin(wRng),wvmax <= numpy.nanmax(wRng)))\n if len(wvmax[wfeature]) == 0: wfeature = numpy.argmax(numpy.absolute(wvmax-numpy.nanmedian(wRng)))\n y = numpy.nanmax(flxmax[wfeature])+yoff\n flxmax[wfeature] = flxmax[wfeature]+3.*yoff\n \n if FEATURE_LABELS[ftrc]['type'] == 'band':\n ax_inset.plot(wR,[y+yoff]*2,color='k',linestyle='-')\n ax_inset.plot([wR[0]]*2,[y,y+yoff],color='k',linestyle='-')\n ax_inset.text(numpy.mean(wR),y+2*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=inset_fontsize)\n else:\n for w in waveRng:\n ax_inset.plot([w]*2,[y,y+yoff],color='k',linestyle='-')\n ax_inset.text(numpy.mean(wR),y+2*yoff,FEATURE_LABELS[ftrc]['label'],horizontalalignment='center',fontsize=inset_fontsize)\n waveRng = [wR[0]-0.02,wR[1]+0.02] # for overlap\n \n# update offset\n if len(inset_features) > 0: bound2[3] = numpy.nanmax([bound2[3],numpy.nanmax(flxmax)+5.*yoff])\n ax_inset.axis(bound2)\n\n# finalize bounding\n if len(xrng) > 0: bound[0:2] = xrng\n if len(yrng) > 0: bound[2:4] = yrng\n if isUnit(bound[0]): bound = [x.value for x in bound]\n ax.axis(bound)\n \n# save to file or display\n# ERROR HERE - CHECK WHAT FILES\n if multipage == False:\n if files[plts] != '' and (plts % nplot == 3 or plts == len(splist)-1):\n if kwargs.get('tight',True) == True: \n plt.savefig(files[plts], bbox_inches='tight')\n else:\n plt.savefig(files[plts])\n if output == '' and not kwargs.get('web',False):\n plt.show()\n if (kwargs.get('interactive',False) != False): plt.ion()\n else: plt.ioff()\n\n\n# save figures in multipage format and write off pdf file\n if multipage == True: \n for pg_n in numpy.arange(numpages):\n# fig[pg_n].text(0.5, 0.04, xlabel, ha = 'center', va = 'center')\n# fig[pg_n].text(0.06, 0.5, ylabel, ha = 'center', va = 'center', rotation = 'vertical')\n fig[pg_n].tight_layout\n fig[pg_n].suptitle(title, fontsize = int(14*fontsize), fontweight = 'bold')\n pdf_pages.savefig(fig[pg_n])\n if filetype.lower() == 'pdf':\n pdf_pages.close()\n\n plt.clf()\n return fig", "def draw_spectrum(msm):\n # eigenvalues of T sorted by the size\n length = min(len(msm.eigenv), 10) \n a = msm.eigenv[0:length]\n #a = sorted(W, reverse=True, key=lambda x: abs(x))[0:length]\n time = msm.timescales[0:length]\n x = np.arange(1.0,11.0,1.0)[0:length]\n\n # Define limits of the graph\n xmin = 0.7\n xmax = 10.3\n ymin = -0.1\n ymax = 1.1\n\n # Plot the ten biggest eigenvalues:\n ax1 = plt.subplot(111)\n plt.plot(x,a, 'ro', alpha=0.7, ms=8)\n plt.vlines(x,0,a)\n plt.xlabel('Index i', fontsize=12)\n ax1.set_ylabel(r'Eigenvalue $\\lambda_i$', fontsize=12, color='r')\n for tl in ax1.get_yticklabels(): #set tick label color\n tl.set_color('r')\n ax1.xaxis.tick_bottom()\n ax1.yaxis.tick_left()\n plt.axis([xmin, xmax, ymin, ymax])\n\n # add horizontal lines for orientation\n plt.axhline(linewidth=1, color='k')\n plt.axhline(y=1, linewidth=1, color='y')\n\n # plot timescales on the right y-axis:\n ax2 = plt.twinx()\n ax2.plot(x, time, 'bs', alpha=0.6, ms=6)\n #ax2.set_ylim([ymin, ymax])\n #ax2.set_yticks(time)\n #ax2.set_yticklabels([\"{0:0.2}\".format(timescale) for timescale in time])\n ax2.set_ylabel(r'Implied timescale $t_i$', fontsize=12, color='b')\n for tl in ax2.get_yticklabels():\n tl.set_color('b')\n ax2.yaxis.tick_right()\n\n plt.title('Eigenvalues and Implied Timescales', fontsize=16)\n\n plt.axis([xmin, xmax, 0., 1.05*time[1]])\n plt.show()", "def plot_spectrum(spec, ax=None, **kwargs):\n\n if not spec.data.size or not spec.energy.size or np.all(spec.energy == 0):\n raise ValueError('Spectrum must be loaded and calibrated')\n\n ax = plot_xy(spec.energy, spec.data, ax=ax, **kwargs)\n\n return ax", "def mc_energyplot(energy_array):\n \n\n plt.plot(energy_array, \"r-\", label=\"energy\")\n\n plt.xlabel(\"No. of steps\")\n plt.ylabel(\"Total Energy (kJ/mol)\")\n \n plt.title(\"Total energy vs steps\")\n plt.legend(loc=1, fontsize= 'x-large')\n plt.show()", "def display_band_structure_1d(num_atoms, h_poly, cycles = 1, phase_offset = 0):\n x = []\n y = [[] for i in range(num_atoms)]\n n = 100*cycles\n for k in range(-n/2, n/2):\n # for k in range(0, n):\n alpha = 2*math.pi*k/n+phase_offset\n phase = numpy.exp(alpha*1j)\n #h_minus, h_zero, h_plus = compute_hamiltonian(num_atoms, atoms, bonds)\n #h = h_minus*phase.conjugate()+h_zero+h_plus*phase\n h = eval_hamiltonian(num_atoms, h_poly, (phase, 1))\n\n e, v = eigensystem(h)\n #print k,h,e\n\n x.append(alpha)\n for i in range(num_atoms):\n y[i].append(e[i])\n\n for i in range(num_atoms):\n # matplotlib.pyplot.plot(x, y[i])\n for cycle in range(0, cycles):\n matplotlib.pyplot.plot(x[0:100], y[i][100*cycle:100*(cycle+1)])\n # matplotlib.pyplot.show()", "def plot_spectrum(model_wave, model_flux, power_law, blackbody,\n isophotal_weight, calibration_results, outfile=None):\n from matplotlib.backends.backend_agg \\\n import FigureCanvasAgg as FigureCanvas\n from matplotlib.figure import Figure\n\n print('\\nPlotting isophotal wavelengths')\n fig = Figure(figsize=(10, 10))\n FigureCanvas(fig)\n ax = fig.add_subplot(1, 1, 1)\n\n fmin = np.min(model_flux)\n fmax = np.max(model_flux)\n f_mean = calibration_results['flux_mean']\n isophotal_weight = calibration_results['isophotal_wt']\n if power_law == 1 or blackbody == 1:\n ax.plot(model_wave, model_flux)\n else:\n ax.step(model_wave, model_flux, where='mid')\n ax.plot(isophotal_weight, f_mean, color='red', linestyle='--')\n print('Lambda_iso <F_lambda> F_lambda(lambda_iso)')\n for i in range(len(isophotal_weight)):\n ax.scatter(isophotal_weight.iloc[i], f_mean.iloc[i],\n marker='d', color='k')\n fiso = iso.interpol(model_flux, model_wave, isophotal_weight.iloc[i])\n print(f'{isophotal_weight[i]:.5e}\\t{f_mean[i]:.5e}\\t{fiso:.5e}')\n\n ax.set_ylabel([fmin, fmax])\n ax.set_yscale('log')\n ax.set_xlabel('Wavelength (micron)')\n ax.set_ylabel('Flux (W/m2/micron)')\n\n if outfile is None:\n plotname = 'spectrum.png'\n else:\n plotname = '.'.join(outfile.split('.')[:-1]) + '.png'\n fig.savefig(plotname, bbox_inches='tight', dpi=300)\n print(f'Plotting to {plotname}')", "def ensquared_one_pix(array, pix_scale, new_scale=40, plot=True):\n\n n = int(new_scale // pix_scale)\n minPix, maxPix = (pix + 1 - n) // 2, (pix + 1 + n) // 2\n ens = array[minPix:maxPix, minPix:maxPix]\n # print(ens.shape)\n energy = np.sum(ens)\n\n if plot:\n mapp = 'viridis'\n f, (ax1, ax2) = plt.subplots(1, 2)\n ax1 = plt.subplot(1, 2, 1)\n square = Rectangle((minPix-0.5, minPix-0.5), n, n, linestyle='--', fill=None, color='white')\n ax1.add_patch(square)\n img1 = ax1.imshow(array, cmap=mapp)\n ax1.set_title('%.1f mas pixels' % (pix_scale))\n img1.set_clim(0, 1)\n plt.colorbar(img1, ax=ax1, orientation='horizontal')\n\n ax2 = plt.subplot(1, 2, 2)\n img2 = ax2.imshow(ens, cmap=mapp)\n ax2.set_title('%d mas window' %new_scale)\n img1.set_clim(0, 1)\n plt.colorbar(img2, ax=ax2, orientation='horizontal')\n\n return energy", "def plot_spectrum(data_path, save_path,\n title='k'):\n\n data = np.loadtxt(data_path,\n delimiter=TAB_DELIMITER,\n dtype=COUNT_DTYPE,\n skiprows=SKIPROW, usecols=COLS,)\n channels = data[:, 0]\n counts = data[:, 1]\n \n plt.figure()\n plt.plot(channels, counts,\"r.\")\n if title is not None:\n plt.title(data_path)\n plt.ylabel(\"Counts\")\n plt.xlabel(\"Channel\")\n plt.savefig(save_path, dpi=DPI)\n \n return", "def plot_spectrum(sims, noise=False, maxtime=240):\n logging.log(15, \"starte plotting\")\n #ein Spektrum mit max 30 Chroms, gemeinsame Zeitenliste erstellen\n if len(sims) < 30:\n spectrum = [0,maxtime]\n #evtl Rauschen hinzufuegen\n if noise:\n for i in range(int(sims[0].number*len(sims)/10)):\n spectrum.append(random.uniform(0, maxtime))\n for sim in sims:\n for t in sim.times:\n if sim.pd[0] < 250:\n spectrum.append(t)\n hist, bins = np.histogram(spectrum, bins= maxtime, normed = True)\n offset = bins[1:]-bins[:-1]\n plt.plot(bins[:-1]+offset, hist, \"k\")\n #plt.ylim((0, 0.3))\n plt.xlim((0, maxtime))\n plt.xlabel(\"Retentionszeit/s\")\n plt.ylabel(\"Intensität\")\n title = \"Spektrum\"\n if noise:\n title += \" mit Rauschen\"\n plt.suptitle(title)\n plt.show()", "def Plot_Spectrum(Path,borne1 = 0,borne2 = 0) :\n x,y=[],[]\n fs = open(Path, 'r') \n#index_array = 0\n while 1: \n txt = fs.readline()\n if txt =='': \n break\n x.append(float(txt[0:9]))\n y.append(float(txt[10:17]))\n #x[index_array],y[index_array] = float(txt[0:9]),float(txt[10:17])\n #index_array = index_array+1\n \n fs.close()\n x = np.array(x)\n y = np.array(y)\n if ((borne1 == 0) & (borne2 == 0)) :\n pass \n else :\n index_ok = ((x<borne2) & (x>borne1))\n x = x[index_ok]\n y = y[index_ok]\n plt.figure(1)\n plt.plot(x,y)\n plt.xlabel(r\"Nombre d'onde $(cm^{-1})$\")", "def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()", "def plot_grating_coupler_sweep_spectrum(matlab_file_path, function=log):\n d = loadmat(matlab_file_path)\n print(d.keys())\n\n for i in range(len(d[\"M_sweep\"][0])):\n x = d[\"WL\"][0] * 1e9\n y = function(d[\"M_T\"][i])\n label = str(int(1e9 * d[\"M_sweep\"][0][i]))\n plt.plot(x, y, label=label)\n\n plt.legend()", "def plot(self, num_levels=10):\n if num_levels == -1:\n num_levels = len(self.energies())\n print(self.energies(num_levels))\n figure(figsize=(20, 5))\n subplot(1, num_levels + 1, 1)\n self.plot_potential()\n #xlabel('$\\phi$')\n for ii, psi2D in enumerate(self.get_2Dpsis(num_levels)):\n subplot(1, num_levels + 1, ii + 2)\n #imshow(psi2D.real,extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]),interpolation=\"None\",aspect='auto')\n imshow(psi2D.real, interpolation=\"None\", aspect='auto')\n xlabel(ii)", "def plotFFT(filename):\n fs_rate, signal = wavfile.read(filename)\n len_audio = len(signal.shape)\n print(signal.shape)\n print(signal[:][0])\n if len_audio == 2:\n signal = signal.sum(axis=1) / 2\n N = signal.shape[0]\n FFT = abs(scipy.fft(signal))\n FFT_side = FFT[range(N//2)]\n freqs = scipy.fftpack.fftfreq(signal.size, 1.0/fs_rate)\n fft_freqs = np.array(freqs)\n freqs_side = freqs[range(N//2)] # one side frequency range\n plt.plot(freqs_side, abs(FFT_side), \"b\") # plotting the complete fft spectrum\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('Single-sided Amplitude')\n plt.show()", "def plot_2D_edp(self, xmin=-100, xmax=100, zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n #Y = rho_xz[:,1]\n #Z = rho_xz[:,2]\n X.shape = (N, N)\n Y.shape = (N, N)\n Z.shape = (N, N)\n plt.figure()\n plt.contourf(X, Y, Z)", "def SimpleArrayPlotHelper(self,filename):\n #levels = np.linspace(-100.0, 9900.0, 100, endpoint=True)\n plt.figure()\n #plt.contourf(orography_field,levels)\n plt.colorbar()\n pts.invert_y_axis()", "def plot(self, ax):\n data = self.psd1d\n freqs = data[:, 0]\n psd1d = data[:, 1]\n psd1d_err = data[:, 2]\n\n xmin = freqs[1] / 1.2 # ignore the first 0\n xmax = freqs[-1] * 1.1\n ymin = np.min(psd1d) / 10.0\n ymax = np.max(psd1d[1:] + psd1d_err[1:]) * 1.5\n\n if self.meanstd:\n label = \"mean\"\n labelerr = \"standard deviation\"\n else:\n label = \"median\"\n labelerr = \"1.4826*MAD\"\n if self.bunit:\n ylabel = r\"Power [(%s/%s)$^2$]\" % (self.bunit, self.pixel[1])\n else:\n ylabel = \"Power\"\n\n ax.errorbar(freqs, psd1d, yerr=psd1d_err,\n fmt=\"none\", label=labelerr)\n ax.plot(freqs, psd1d, marker=\"o\", label=label)\n ax.set(xscale=\"log\", yscale=\"log\",\n xlim=(xmin, xmax), ylim=(ymin, ymax),\n title=\"Radial (Azimuthally Averaged) Power Spectral Density\",\n xlabel=r\"$k$ [%s$^{-1}$]\" % self.pixel[1],\n ylabel=ylabel)\n ax.legend()\n\n if self.pixel[1] != \"pixel\":\n # Add an additional X axis for pixel-based frequencies\n ax2 = ax.twiny()\n ax2.set_xscale(ax.get_xscale())\n pix_ticks = np.logspace(-4, 0, num=5) # [pixel^-1]\n ax2.set_xticks(pix_ticks)\n ax2.set_xticklabels([r\"10$^{%d}$\" % ep\n for ep in np.log10(pix_ticks)])\n x1_min, x1_max = ax.get_xlim()\n x2_min, x2_max = x1_min*self.pixel[0], x1_max*self.pixel[0]\n ax2.set_xlim(x2_min, x2_max)\n ax2.set_xlabel(r\"$k$ [pixel$^{-1}$] (1 pixel = %.2f %s)\" %\n self.pixel)\n ax2.grid(False)\n # Raise title position to avoid overlapping\n ax.title.set_position([0.5, 1.1])\n return (ax, ax2)\n else:\n return ax", "def plot_periodogram(trj, coord: str = \"y\", fs: int = 1, interactive: bool = True):\n from scipy import signal\n\n vals = trj[coord].values\n f, Pxx = signal.periodogram(vals, fs=fs, window=\"hanning\", scaling=\"spectrum\")\n plt.title(\"Power Spectrum\")\n plt.plot(f, Pxx)\n if interactive:\n plt.show()\n\n return plt.gcf()", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def plot_power(subject, channel, cv, axes, vmin=None, vmax=None, bb=False, bb2=False, elabel=None):\n ax0, ax1 = axes\n axes = [ax for ax in axes if (ax is not None)]\n if bb2:\n power_data = np.load(os.path.join(os.environ['HOME'], 'plots/xfreq/data',\n '{}_{}_{}_power_bb2.npz'.format(subject, cv, channel)))['power_data']\n elif bb:\n power_data = np.load(os.path.join(os.environ['HOME'], 'plots/xfreq/data',\n '{}_{}_{}_power_bb.npz'.format(subject, cv, channel)))['power_data']\n else:\n power_data = np.load(os.path.join(os.environ['HOME'], 'plots/xfreq/data',\n '{}_{}_{}_power.npz'.format(subject, cv, channel)))['power_data']\n\n if ax0 is not None:\n print(power_data[::-1, s].min(), power_data[::-1, s].max())\n print(power_data.shape)\n im = ax0.imshow(power_data[::-1, s], interpolation='nearest', cmap='afmhot',\n aspect='auto', vmin=vmin, vmax=vmax)\n yticklabels = [5, 25, 75]\n yticks = [40-np.searchsorted(bands.chang_lab['cfs'], y, side='right') for y in\n yticklabels]\n yticklabels.append(200)\n yticks.append(0)\n ax0.set_yticks(yticks)\n ax0.set_yticklabels(yticklabels)\n if elabel is None:\n elabel = channel\n ax0.set_title('Electrode: {}'.format(elabel), **axes_label_fontstyle)\n ax0.set_ylabel('Freq. (Hz)', **axes_label_fontstyle)\n ax0.axvline(100, 0, 1, linestyle='--', c='white', lw=1.)\n #ax0.set_xlabel('Time (ms)', fontsize=axes_label_fontsize)\n\n if ax1 is not None:\n hg_bands = np.logical_and(bands.chang_lab['cfs'] >= bands.neuro['min_freqs'][-1],\n bands.chang_lab['cfs'] <= bands.neuro['max_freqs'][-1])\n b_bands = np.logical_and(bands.chang_lab['cfs'] >= bands.neuro['min_freqs'][2],\n bands.chang_lab['cfs'] <= bands.neuro['max_freqs'][2])\n\n hb_bands = np.logical_and(bands.chang_lab['cfs'] >= bands.neuro['min_freqs'][3],\n bands.chang_lab['cfs'] <= bands.neuro['max_freqs'][3])\n b_bands = np.logical_or(b_bands, hb_bands)\n b_bands = range(10, 21)\n hg = power_data[hg_bands].mean(axis=0)\n b = power_data[b_bands].mean(axis=0)\n\n b = b[s]\n hg = hg[s]\n\n hg -= hg.min()\n hg /= hg.max()\n hg = 2. * hg - 1\n b -= b.min()\n b /= b.max()\n b = 2. * b - 1\n\n ax1.plot(hg, c='r', lw=2)\n ax1.plot(b, c='k', lw=2)\n ax1.set_ylabel('Normalized\\nAmplitude', **axes_label_fontstyle)\n ax1.set_xlabel('Time (ms)', **axes_label_fontstyle)\n ax1.set_xlim([0, plot_idx[-1]])\n ax1.axvline(100, 0, 1, linestyle='--', lw=1., c='gray')\n for ax in axes:\n ax.set_xticks([0, 100, plot_idx[-1]])\n ax.set_xticklabels([-500, 0, int(1000 * plot_time[-1])-500])\n ax.tick_params(**tickparams_fontstyle)\n return im", "def app_complex(data_pupil,data_phase,oversize=4):\n#phase colors\n # cdict = {'red': ((0.0, 1.0, 1.0),(0.25, 0.0, 0.0),(0.5, 0.0, 0.0),(0.75, 1.0, 1.0),(1.00, 1.0, 1.0)),'green': ((0.0, 0.0, 0.0),(0.25, 1.0, 1.0),(0.5, 0.0, 0.0),(0.75, 1.0, 1.0),(1.0, 0.0, 0.0)),'blue': ((0.0, 0.0, 0.0),(0.25, 0.0, 0.0),(0.5, 1.0, 1.0),(0.75, 0.0, 0.0),(1.0, 0.0, 0.0))}\n #my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)\n\n size=data_pupil.shape[0]\n#make empty oversized array\n\n expand_phase=zeros([oversize*size,oversize*size])\n expand_amp=zeros([oversize*size,oversize*size])\n\n#copy fits into lower left corner\n\n expand_amp[0:size,0:size]=data_pupil[0:size,0:size]\n expand_phase[0:size,0:size]=data_phase[0:size,0:size]\n\n#move to corners\n\n expand_phase=roll(expand_phase,-size/2,0)\n expand_phase=roll(expand_phase,-size/2,1)\n\n expand_amp=roll(expand_amp,-size/2,0)\n expand_amp=roll(expand_amp,-size/2,1)\n\n# recalculate real and imaginary part\n\n #xr=expand_amp*cos(expand_phase)\n #yr=expand_amp*sin(expand_phase)\n\n# make complex array\n\n complexr=expand_amp*numpy.exp(1j*expand_phase)\n\n# apply 2d-fft\n\n complexr=numpy.fft.fftpack.fft2(complexr)\n return fftshift(complexr)", "def plot1dim(i_dim):\n freq = plt.hist(x=x[:,i_dim], bins=min(100,4*self.grid_shape[i_dim]))[0]\n plt.plot(self.xg[i_dim][:,0],np.zeros(self.grid_shape[i_dim]) + 0.5*np.max(freq),'ko',markersize=3)\n plt.xlabel(r'x_%d'%i_dim)\n plt.ylabel('Frequency')\n plt.title('Dim %d, m = %d' % (i_dim, self.grid_shape[i_dim]))", "def plot_spectra_from_scan(scan, name, direct_file, grism_file):\n\n tr_scan = np.transpose(scan)\n column_sums = [sum(scan_col) for scan_col in tr_scan[5:-5]]\n x = np.arange(len(column_sums))\n wv = convert_rows_to_wv(direct_file, grism_file, x)\n plt.plot(wv, column_sums)\n plt.savefig('{0}/spectra/{1}_spectrum.png'.format('/'.join(name.split('/')[:-1]), name.split('/')[-1]))\n plt.clf()\n return wv, column_sums", "def test_power(self):\r\n a = 6 # shape\r\n samples = 5000\r\n max = -0.06\r\n min = -3.3\r\n s = np.random.power(a, samples) * -1 * (min - max) + min\r\n plt.hist(s, bins=30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def plotSpectrum(self, spectrum=None):\n if spectrum is None:\n spectrum = self.spectrometer.getSpectrum()\n\n if len(self.axes.lines) == 0:\n self.axes.plot(self.spectrometer.wavelength, spectrum, 'k')\n self.axes.set_xlabel(\"Wavelength [nm]\")\n self.axes.set_ylabel(\"Intensity [arb.u]\")\n else:\n self.axes.lines[0].set_data( self.spectrometer.wavelength, spectrum) # set plot data\n self.axes.relim()", "def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()", "def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "def plot_spectrumxichange(self):\n countgood = 0 ; countbad = 0\n for idata in self.datarg:\n if idata[-1, 0] == 1.: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'b') \n countgood += 1\n print countgood , 'good solution'\n else: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'r') \n print countbad, 'bad solution'\n countbad += 1\n print 'We found %g good solutions and %g tda startdistributions that broke down before xi = 1, we hope that\\'s what you expected' %(countgood,countbad)\n #Create custom artistsr[goodline,badline],['solution','breakdown']\n goodline = pl.Line2D((0,1),(0,0), color='b') \n badline = pl.Line2D((0,1),(0,0), color='r')\n self.layout(self.reader.depvar['depvar'] , r'energy spectrum (a.u.)' , tit = r'All tda start distributions $\\xi$' , legendhand = [goodline , badline] , legendlab = ['solution', 'breakdown'] )\n self.savefig('xispec')", "def show_trace_2d(f, results): #@save\n set_figsize()\n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1),torch.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')", "def plot_array(self):\n locations = np.array([getattr(pulsar, 'location') for pulsar in self.pulsars])\n fig = plt.figure()\n ax = plt.subplot(111, projection=\"hammer\")\n for location in locations:\n plt.plot(location.ra, location.dec, '.', color='b')\n return fig", "def plot_powerperband(self, bandwidth=2, show=True, fname=None):\n power = self.get_powerperband(bandwidth)\n ls = self.get_degrees()\n\n fig, ax = plt.subplots(1, 1)\n ax.set_xlabel('degree l')\n ax.set_ylabel('bandpower')\n ax.set_xscale('log', basex=bandwidth)\n ax.set_yscale('log', basey=bandwidth)\n ax.grid(True, which='both')\n ax.plot(ls[1:], power[1:], label='power per degree l')\n fig.tight_layout(pad=0.1)\n if show:\n plt.show()\n if fname is not None:\n fig.savefig(fname)", "def plot_power_spectrum_fits(self, figsize=(20, 10)):\n\n debug_figs = []\n debug_fig_names = []\n # individual power spectra\n for ii in range(self.nangles):\n fig = plot_power_spectrum_fit(self.separated_components_ft[ii, 0], self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.power_spectrum_params[ii, 0], frq_sim=(0, 0), mask=self.pspec_masks[ii, 0],\n figsize=figsize, ttl_str=\"Unshifted component, angle %d\" % ii)\n debug_figs.append(fig)\n debug_fig_names.append(\"power_spectrum_unshifted_component_angle=%d\" % ii)\n\n fig = plot_power_spectrum_fit(self.separated_components_ft[ii, 1], self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.power_spectrum_params[ii, 1], frq_sim=self.frqs[ii], mask=self.pspec_masks[ii, 1],\n figsize=figsize, ttl_str=\"Shifted component, angle %d\" % ii)\n\n debug_figs.append(fig)\n debug_fig_names.append(\"power_spectrum_shifted_component_angle=%d\" % ii)\n\n return debug_figs, debug_fig_names", "def FourierPlot(tas):\n detrend = signal.detrend(tas)\n L = len(tas)\n freqs = np.fft.fftfreq(L)\n tas_fft = np.fft.fft(detrend)\n R = tas_fft.real\n Im = tas_fft.imag\n mag = np.sqrt(R**2+Im**2)\n plt.plot(1/freqs,mag)", "def _generate_plot(ax, power_data, title, min_db, max_db):\n # only generate plots for the transducers that have data\n if power_data.size <= 0:\n return\n\n ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)\n return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)", "def plot_stft(arr, fs, nfft, noverlap):\n\n import matplotlib\n matplotlib.use('agg')\n from matplotlib.mlab import window_hanning, specgram\n\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n\n arr2D, freqs, bins = specgram(arr, Fs=fs, NFFT=nfft, noverlap=noverlap, window=window_hanning)\n\n # comment this line\n #plt.axis('off')\n axes = plt.gca()\n\n # change this line for y axis control\n axes.set_ylim([1000, 0])\n\n extent = (bins[0],bins[-1]*1024,freqs[-1],freqs[0])\n im = plt.imshow(arr2D,aspect='auto',extent = extent,interpolation=\"none\")\n plt.gca().invert_yaxis()\n\n # comment this line\n #plt.tight_layout(pad=0)\n # comment this line\n plt.colorbar()\n\n # flush draw commands\n fig.canvas.draw()\n\n # Now we can save it to a numpy array.\n data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n plt.close()\n\n # transpose to BGR\n return data[..., ::-1]", "def plot_powerlaw_output(timeDB, xunits='yr', yunits='MPa', skip=8, P0=33.0):", "def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def plot_chans(freq=True):\n f,ax = plt.subplots(4,3)\n for ant in range(12):\n snap.write_int('rst',1)\n snap.write_int('antenna',ant)\n snap.write_int('rst',0)\n\n time.sleep(ACC_LEN/(512*200e6)*1e3)\n arr = struct.unpack('>256Q',snap.read('spectrum',8*256))\n \n ax[ant%4][int(ant/4)].semilogy(FREQ,arr,'.-',lw=1)\n ax[ant%4][int(ant/4)].set_xlim(FREQ.max(), FREQ.min())\n ax[ant%4][int(ant/4)].set_title('Antenna %s'%ANT_LABELS[ant])\n plt.show()", "def showCl(ell,temps,title='CAMB ISWout power spectrum'):\n plt.plot(ell,temps*ell*(ell+1)/(2*np.pi) *1e12) #1e12 to convert to microK**2\n plt.xlabel('multipole moment l')\n plt.ylabel('l(l+1)C_l/(2pi) [microK**2]')\n plt.title(title)\n plt.show()", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')", "def plot_hypnogram(eeg, stages, srate):\r\n \r\n fig,ax1 = plt.subplots() #Needed for the multiple y-axes\r\n \r\n #Use the specgram function to draw the spectrogram as usual\r\n y_lim = 40;\r\n plt.specgram(eeg/np.sum(eeg),NFFT=512,Fs=srate)\r\n\r\n #Label your x and y axes and set the y limits for the spectrogram\r\n ax1.set_ylim((0,y_lim))\r\n ax1.set_xlim((0,len(eeg)/srate))\r\n plt.title ('Hypnogram')\r\n ax1.set_xlabel('Time in Seconds')\r\n ax1.set_ylabel('Frequency in Hz')\r\n \r\n ax2 = ax1.twinx() #Necessary for multiple y-axes\r\n \r\n #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds\r\n #HINT: Use drawstyle='steps' to allow step functions in your plot\r\n ax2.plot(np.arange(0,len(stages))*30,stages,drawstyle='steps')\r\n\r\n #Label your right y-axis and change the text color to match your plot\r\n ax2.set_ylabel('NREM Stages',color='b')\r\n\r\n \r\n #Set the limits for the y-axis \r\n ax2.set_ylim(0.5,3.5)\r\n ax2.set_xlim((0,len(eeg)/srate))\r\n #Only display the possible values for the stages\r\n ax2.set_yticks(np.arange(1,4))\r\n \r\n #Change the left axis tick color to match your plot\r\n for t1 in ax2.get_yticklabels():\r\n t1.set_color('b')\r\n \r\n #Title your plot \r", "def plot_powerlaw(self, **kwargs):\n\n if self.gamma is None:\n self.exponent()\n p = powerlaw.plot(exponent=-self.gamma,\n xmax=self.max_deg, xmin=self.k_min,\n **kwargs\n )\n pylab.show()\n return p", "def example():\n ldata = 200\n degrees = np.arange(ldata+1, dtype=np.float64)\n degrees[0] = np.inf\n power = degrees**(-1)\n\n clm1 = pysh.SHCoeffs.from_random(power, exact_power=False)\n clm2 = pysh.SHCoeffs.from_random(power, exact_power=True)\n\n fig, ax = plt.subplots()\n ax.plot(clm1.spectrum(unit='per_l'), label='Normal distributed power')\n ax.plot(clm2.spectrum(unit='per_l'), label='Exact power')\n ax.set(xscale='log', yscale='log', xlabel='degree l',\n ylabel='power per degree l')\n ax.grid(which='both')\n ax.legend()\n\n plt.show()", "def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')", "def plot_fft(x, y, th=1e-4):\n n = x.size\n Lx = x[-1]-x[0]\n yf = np.fft.rfft(y)\n xf = np.fft.rfftfreq(n, d=Lx/n)\n fig = plt.figure(figsize=[9, 9])\n ax = fig.add_subplot(211)\n ax.plot(x, y)\n plt.title('1) first component of ODE solution')\n\n ax = fig.add_subplot(223)\n yf = yf / (n/2)\n ii = (np.abs(yf) > th)\n ii[0] = False\n plt.plot(xf[ii], np.abs(yf[ii]))\n T0 = 1.0/np.mean(xf*np.abs(yf))\n plt.title('2) power spectrum')\n plt.draw()\n plt.pause(2)\n return T0", "def plot_combined_spectrum(SSC, band):\n\n def get_spectrum(SSC, band):\n spectrum = spectra[str(SSC['no'])][band]\n frequency = spectrum['frequency'].to(u.GHz)\n intensity = spectrum['spectrum'].to(u.K)\n # shift spectrum to rest frequency\n velshift = SSC['velshift']\n frequency = [(-vsys-velshift).to(u.GHz, equivalencies=u.doppler_optical(f)).value for f in frequency]*u.GHz\n # remove NaNs\n frequency, intensity = crossmatch(frequency.to(u.GHz).value, intensity.to(u.K).value)\n return frequency, intensity\n\n def get_model(SSC, band):\n with open(escape_fname(os.path.join(XCLASSdir,'SSC_'+str(SSC['no']),'combined_model.spectrum.pickle')), 'rb') as f:\n m = pickle.load(f, encoding=\"latin1\")\n frequency = (m[:,0]*u.MHz).to(u.GHz)\n model = m[:,1]*u.K\n return frequency.value,model.value\n\n def set_up_figure(SSC, band):\n fig,ax = plt.subplots(nrows=1, ncols=1, squeeze=True, sharex='col', sharey='row', figsize=(10,8))\n ax.text(0.05, 0.9, 'SSC '+str(SSC['no'])+': '+band, color='k', transform=ax.transAxes, ha='left', va='top', weight='bold', fontsize=16, bbox=props)\n return fig,ax\n\n def plot_spectrum(ax, frequency, spectrum):\n ax.plot(frequency, spectrum, lw=1, ls='-', color='k', zorder=3)\n ax.fill_between(frequency, spectrum, [0. for f in frequency], color='grey', alpha=0.5, zorder=2)\n\n def plot_fitted_spectrum(ax, frequency, model):\n ax.plot(frequency, model, lw=1, ls='-', color='r', zorder=5)\n # ax.fill_between(frequency, model, [0. for f in frequency], color='r', alpha=0.5, zorder=4)\n\n def get_detected_lines(band=None):\n # get detected species\n all_species = []\n for SSC in SSCs:\n for specie in detected_species[str(SSC['no'])]:\n if not specie in all_species:\n all_species.append(specie)\n # get all lines of the detected species\n all_lines = []\n for specie in all_species:\n slines = [l for l in lines if l['XCLASS']==specie]\n for sl in slines:\n all_lines.append(sl)\n # keep only lines of given band\n if not band==None:\n bandlines = []\n for line in all_lines:\n if band=='LSB':\n if line['restfreq']<350*u.GHz:\n bandlines.append(line)\n elif band=='USB':\n if line['restfreq']>350*u.GHz:\n bandlines.append(line)\n return sorted(bandlines, key=lambda k: k['restfreq'])\n else:\n return sorted(all_lines, key=lambda k: k['restfreq'])\n\n def label_lines(ax, spectrum, band):\n detected_lines = get_detected_lines(band=band)\n for idx,line in enumerate(detected_lines):\n restfreq = line['restfreq'].to(u.GHz).value\n if (restfreq>frequency[0] and restfreq<frequency[-1]):\n if band=='LSB':\n xlim = [342.4, 346.2]\n elif band=='USB':\n xlim = [354.3, 358.1]\n xloc = xlim[0] +((idx+0.5)/len(detected_lines))*(xlim[1]-xlim[0])\n ax.axvline(x=restfreq, ymin=0, ymax=1, color='dimgrey', ls='--', lw=0.5, zorder=1)\n ax.plot([restfreq,xloc], [1.05*np.nanmax(spectrum), 1.05*1.05*np.nanmax(spectrum)], color='dimgrey', ls='--', lw=0.5, zorder=1, clip_on=False)\n ax.text(xloc, 1.06*1.05*np.nanmax(spectrum), line_tex(line), color='dimgrey', fontsize=10, rotation=90, ha='center', va='bottom')\n\n def format_figure(ax, frequency, spectrum, band):\n if band=='LSB':\n ax.set_xlim([342.4, 346.2])\n elif band=='USB':\n ax.set_xlim([354.3, 358.1])\n ax.set_ylim(-0.05*np.nanmax(spectrum), 1.05*np.nanmax(spectrum))\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.1))\n ax.yaxis.set_major_locator(MultipleLocator(10))\n ax.yaxis.set_minor_locator(MultipleLocator(2))\n ax.tick_params(axis='both', which='major', labelsize=12)\n ax.set_axisbelow(True)\n ax.grid(axis='y', ls=':', c='grey')\n ax.set_xlabel(r'$\\nu_\\mathrm{rest}$ [GHz]', fontsize=12)\n ax.set_ylabel(r'T$_\\mathrm{b}$ [K]', fontsize=12)\n fig.set_tight_layout(True)\n\n def save_figure(fig, band):\n savepath = escape_fname(os.path.join(plotdir, '03.XCLASS_fit', 'combined_spectra', 'SSC_'+str(SSC['no'])+'.'+band+'.combined_spectrum.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_inches='tight')\n\n\n frequency, spectrum = get_spectrum(SSC, band)\n mfrequency, model = get_model(SSC, band)\n fig,ax = set_up_figure(SSC, band)\n plot_spectrum(ax, frequency, spectrum)\n plot_fitted_spectrum(ax, mfrequency, model)\n label_lines(ax, spectrum, band)\n format_figure(ax, frequency, spectrum, band)\n save_figure(fig, band)", "def _plot(self):\r\n fig = plt.figure()\r\n\r\n # Take out second component of intensity if needed\r\n # if self._vna.isTwoComponents():\r\n # intensitySimplified = []\r\n # for i in range(len(self._intensity)):\r\n # tempSet = []\r\n # for j in range(len(self._intensity[i])):\r\n # if (j%2) == 0:\r\n # tempSet.append(self._intensity[i][j])\r\n # intensitySimplified.append(tempSet)\r\n # for i in range(len(self._frequency)):\r\n # plt.plot(self._frequency[i],intensitySimplified[i],label=('%sv' % self._voltages[i][0]))\r\n # else:\r\n for i in range(len(self._frequency)):\r\n plt.plot(self._frequency[i],self._intensity[i],label=('%sv' % self._voltages[i][0]))\r\n plt.legend(loc='upper left')\r\n fig.suptitle('Intensity-Frequency with non-Constant Voltage', fontsize=18)\r\n plt.xlabel('Frequency (Hz)', fontsize=18)\r\n plt.ylabel('Intensity (dBm)', fontsize=16)\r\n\r\n # Save plot\r\n self._saveFig()", "def plot_hypnogram(eeg, stages, srate):\r\n \r\n fig,ax1 = plt.subplots() #Needed for the multiple y-axes\r\n \r\n #Use the specgram function to draw the spectrogram as usual\r\n ax1.specgram(eeg, NFFT=256, Fs=srate)\r\n\r\n #Label your x and y axes and set the y limits for the spectrogram\r\n ax1.set_xlabel('Time (seconds)')\r\n ax1.set_ylabel('Frequency (Hz)')\r\n ax1.set_ylim(ymax=30)\r\n \r\n ax2 = ax1.twinx() #Necessary for multiple y-axes\r\n \r\n #Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds\r\n #HINT: Use drawstyle='steps' to allow step functions in your plot\r\n i = 0\r\n bin_size = 30*srate\r\n c = np.zeros(len(eeg)/bin_size)\r\n while i + bin_size < len(eeg):\r\n c[i/bin_size] = classify_epoch(eeg[range(i,i+bin_size)],srate)\r\n i = i + bin_size\r\n \r\n xx = range(0, c.size*30, 30)\r\n ax2.plot(xx,c, drawstyle='steps')\r\n ax2.set_xlim(xmax=3000) #max=3000 for Test, max=3600 for Practice\r\n\r\n #Label your right y-axis and change the text color to match your plot\r\n ax2.set_ylabel('NREM Stage',color='b')\r\n ax2.set_ylim(0.5,3.5)\r\n \r\n #Set the limits for the y-axis \r\n \r\n #Only display the possible values for the stages\r\n ax2.set_yticks(np.arange(1,4))\r\n \r\n #Change the left axis tick color to match your plot\r\n for t1 in ax2.get_yticklabels():\r\n t1.set_color('b')\r\n \r\n #Title your plot \r\n plt.title('Hypnogram and Spectogram for Test Data')\r\n \r\n plt.show()", "def plot_2d(self):\n fig = plt.figure(figsize=(10,8))\n \n d = int(len(self.a_scale.flat)**0.5)\n a_scale = self.a_scale.reshape(d,d)\n c_scale = self.c_scale.reshape(d,d)\n E_coh = self.E_coh.reshape(d,d)\n plt.pcolormesh(a_scale, c_scale, E_coh)\n plt.xlabel('xy linear deformation coefficient')\n plt.xlabel('z linear deformation coefficient')\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('cohesive energy (eV/atom)',\n fontsize='x-large')\n plt.show()\n \n return fig", "def plot_spectrum(inp='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', z=9.505, vel_width=100, bkg=None, scale_disp=1.3, nspline=27, show_cont=True, draws=100, figsize=(16, 8), ranges=[(3650, 4980)], Rline=1000, full_log=False, write=False, eazy_templates=None, use_full_dispersion=True, get_spl_templates=False, scale_uncertainty_kwargs=None, plot_unit=None, spline_single=True, sys_err=0.02, return_fit_results=False, use_aper_columns=False, label=None, **kwargs):\n global SCALE_UNCERTAINTY\n \n lw, lr = utils.get_line_wavelengths()\n \n if isinstance(inp, str):\n sampler = SpectrumSampler(inp, **kwargs)\n file = inp\n elif isinstance(inp, pyfits.HDUList):\n sampler = SpectrumSampler(inp, **kwargs)\n file = None\n else:\n file = None\n sampler = inp\n \n if (label is None) & (file is not None):\n label = os.path.basename(file)\n \n spec = sampler.spec\n \n if (use_aper_columns > 0) & ('aper_flux' in spec.colnames):\n if ('aper_corr' in spec.colnames) & (use_aper_columns > 1):\n ap_corr = spec['aper_corr']*1\n else:\n ap_corr = 1\n \n flam = spec['aper_flux']*spec['to_flam']*ap_corr\n eflam = spec['aper_full_err']*spec['to_flam']*ap_corr\n else:\n flam = spec['flux']*spec['to_flam']\n eflam = spec['full_err']*spec['to_flam']\n \n wrest = spec['wave']/(1+z)*1.e4\n wobs = spec['wave']\n mask = spec['valid']\n \n flam[~mask] = np.nan\n eflam[~mask] = np.nan\n \n bspl = sampler.bspline_array(nspline=nspline, get_matrix=True)\n\n # bspl = utils.bspline_templates(wave=spec['wave']*1.e4,\n # degree=3,\n # df=nspline)\n \n w0 = utils.log_zgrid([spec['wave'].min()*1.e4,\n spec['wave'].max()*1.e4], 1./Rline)\n \n templates, tline, _A = make_templates(sampler, z,\n bspl=bspl,\n eazy_templates=eazy_templates,\n vel_width=vel_width,\n scale_disp=scale_disp,\n use_full_dispersion=use_full_dispersion,\n disp=spec.disp,\n grating=spec.grating,\n **kwargs,\n )\n \n if scale_uncertainty_kwargs is not None:\n _, escl, _ = calc_uncertainty_scale(file=None,\n data=(spec, _A),\n **scale_uncertainty_kwargs)\n eflam *= escl\n spec['escale'] *= escl\n \n okt = _A[:,mask].sum(axis=1) > 0\n \n _Ax = _A[okt,:]/eflam\n _yx = flam/eflam\n \n if eazy_templates is None:\n _x = np.linalg.lstsq(_Ax[:,mask].T, \n _yx[mask], rcond=None)\n else:\n _x = nnls(_Ax[:,mask].T, _yx[mask])\n \n coeffs = np.zeros(_A.shape[0])\n coeffs[okt] = _x[0]\n \n _model = _A.T.dot(coeffs)\n _mline = _A.T.dot(coeffs*tline)\n _mcont = _model - _mline\n \n full_chi2 = ((flam - _model)**2/eflam**2)[mask].sum()\n cont_chi2 = ((flam - _mcont)**2/eflam**2)[mask].sum()\n \n if return_fit_results:\n return templates, coeffs, flam, eflam, _model, mask, full_chi2\n \n try:\n oktemp = okt & (coeffs != 0)\n \n AxT = (_A[oktemp,:]/eflam)[:,mask].T\n \n covar_i = utils.safe_invert(np.dot(AxT.T, AxT))\n covar = utils.fill_masked_covar(covar_i, oktemp)\n covard = np.sqrt(covar.diagonal())\n \n has_covar = True\n except:\n has_covar = False\n covard = coeffs*0.\n N = len(templates)\n covar = np.eye(N, N)\n \n print(f'\\n# line flux err\\n# flux x 10^-20 erg/s/cm2')\n if label is not None:\n print(f'# {label}')\n \n print(f'# z = {z:.5f}\\n# {time.ctime()}')\n \n cdict = {}\n eqwidth = {}\n \n for i, t in enumerate(templates):\n cdict[t] = [float(coeffs[i]), float(covard[i])]\n if t.startswith('line '):\n lk = t.split()[-1]\n \n # Equivalent width:\n # coeffs, line fluxes are in units of 1e-20 erg/s/cm2\n # _mcont, continuum model is in units of 1-e20 erg/s/cm2/A\n # so observed-frame equivalent width is roughly\n # eqwi = coeffs[i] / _mcont[ wave_obs[i] ]\n \n if lk in lw:\n lwi = lw[lk][0]*(1+z)/1.e4\n continuum_i = np.interp(lwi, spec['wave'], _mcont)\n eqwi = coeffs[i]/continuum_i\n else:\n eqwi = np.nan\n \n eqwidth[t] = eqwi\n \n print(f'{t:>20} {coeffs[i]:8.1f} ± {covard[i]:8.1f} (EW={eqwi:9.1f})')\n \n \n if 'srcra' not in spec.meta:\n spec.meta['srcra'] = 0.0\n spec.meta['srcdec'] = 0.0\n spec.meta['srcname'] = 'unknown'\n \n spec['model'] = _model/spec['to_flam']\n spec['mline'] = _mline/spec['to_flam']\n \n data = {'z': float(z),\n 'file':file,\n 'label':label,\n 'ra': float(spec.meta['srcra']),\n 'dec': float(spec.meta['srcdec']),\n 'name': str(spec.meta['srcname']),\n 'wmin':float(spec['wave'][mask].min()),\n 'wmax':float(spec['wave'][mask].max()),\n 'coeffs':cdict,\n 'covar':covar.tolist(),\n 'wave': [float(m) for m in spec['wave']],\n 'flux': [float(m) for m in spec['flux']],\n 'err': [float(m) for m in spec['err']],\n 'escale': [float(m) for m in spec['escale']],\n 'model': [float(m) for m in _model/spec['to_flam']],\n 'mline':[float(m) for m in _mline/spec['to_flam']],\n 'templates':templates, \n 'dof': int(mask.sum()), \n 'fullchi2': float(full_chi2), \n 'contchi2': float(cont_chi2),\n 'eqwidth': eqwidth,\n }\n \n for k in ['z','wmin','wmax','dof','fullchi2','contchi2']:\n spec.meta[k] = data[k]\n \n #fig, axes = plt.subplots(len(ranges)+1,1,figsize=figsize)\n if len(ranges) > 0:\n fig = plt.figure(figsize=figsize, constrained_layout=True)\n gs = GridSpec(2, len(ranges), figure=fig)\n axes = []\n for i, _ra in enumerate(ranges):\n axes.append(fig.add_subplot(gs[0,i]))\n \n axes.append(fig.add_subplot(gs[1,:]))\n \n else:\n fig, ax = plt.subplots(1,1,figsize=figsize)\n axes = [ax]\n \n _Acont = (_A.T*coeffs)[mask,:][:,:nspline]\n _Acont[_Acont < 0.001*_Acont.max()] = np.nan\n \n if (draws is not None) & has_covar:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mu = np.random.multivariate_normal(coeffs[oktemp], covar_i, size=draws)\n \n #print('draws', draws, mu.shape, _A.shape)\n mdraws = _A[oktemp,:].T.dot(mu.T)\n else:\n mdraws = None\n \n if plot_unit is not None:\n unit_conv = (1*spec.meta['flamunit']).to(plot_unit,\n equivalencies=spec.equiv).value\n else:\n unit_conv = np.ones(len(wobs))\n \n for ax in axes:\n if 1:\n ax.errorbar(wobs, flam*unit_conv, eflam*unit_conv,\n marker='None', linestyle='None',\n alpha=0.5, color='k', ecolor='k', zorder=100)\n\n ax.step(wobs, flam*unit_conv, color='k', where='mid', lw=1, alpha=0.8)\n # ax.set_xlim(3500, 5100)\n\n #ax.plot(_[1]['templz']/(1+z), _[1]['templf'])\n \n ax.step(wobs[mask], (_mcont*unit_conv)[mask],\n color='pink', alpha=0.8, where='mid')\n ax.step(wobs[mask], (_model*unit_conv)[mask],\n color='r', alpha=0.8, where='mid')\n \n cc = utils.MPL_COLORS\n for w, c in zip([3727, 4980, 6565, 9070, 9530, 1.094e4, 1.282e4, \n 1.875e4], \n [cc['purple'], cc['b'], cc['g'], 'darkred', 'darkred', \n cc['pink'], cc['pink'], cc['pink']]):\n wz = w*(1+z)/1.e4\n dw = 70*(1+z)/1.e4\n ax.fill_between([wz-dw, wz+dw], [0,0], [100,100], \n color=c, alpha=0.07, zorder=-100)\n \n \n if mdraws is not None:\n ax.step(wobs[mask], (mdraws.T*unit_conv).T[mask,:],\n color='r', alpha=np.maximum(1./draws, 0.02), zorder=-100, where='mid')\n\n if show_cont:\n ax.plot(wobs[mask], (_Acont.T*unit_conv[mask]).T,\n color='olive', alpha=0.3)\n \n ax.fill_between(ax.get_xlim(), [-100, -100], [0, 0], color='0.8', \n alpha=0.5, zorder=-1)\n\n ax.fill_betweenx([0, 100], [0,0], [1215.67*(1+z)/1.e4]*2, \n color=utils.MPL_COLORS['orange'], alpha=0.2,\n zorder=-1)\n \n ax.grid()\n\n # axes[0].set_xlim(1000, 2500)\n # ym = 0.15; axes[0].set_ylim(-0.1*ym, ym)\n \n for i, r in enumerate(ranges):\n axes[i].set_xlim(*[ri*(1+z)/1.e4 for ri in r])\n # print('xxx', r)\n \n if spec.filter == 'clear':\n axes[-1].set_xlim(0.6, 5.29)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.1))\n axes[-1].xaxis.set_major_locator(MultipleLocator(0.5))\n elif spec.filter == 'f070lp':\n axes[-1].set_xlim(0.69, 1.31)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))\n elif spec.filter == 'f100lp':\n axes[-1].set_xlim(0.99, 1.91)\n axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))\n axes[-1].xaxis.set_major_locator(MultipleLocator(0.1))\n elif spec.filter == 'f170lp':\n axes[-1].set_xlim(1.69, 3.21)\n elif spec.filter == 'f290lp':\n axes[-1].set_xlim(2.89, 5.31)\n else:\n axes[-1].set_xlim(wrest[mask].min(), wrest[mask].max())\n \n axes[-1].set_xlabel(f'obs wavelenth, z = {z:.5f}')\n \n #axes[0].set_title(os.path.basename(file))\n \n for ax in axes:\n xl = ax.get_xlim()\n ok = wobs > xl[0]\n ok &= wobs < xl[1]\n ok &= np.abs(wrest-5008) > 100\n ok &= np.abs(wrest-6564) > 100\n ok &= mask\n if ok.sum() == 0:\n ax.set_visible(False)\n continue\n \n ymax = np.maximum((_model*unit_conv)[ok].max(), 10*np.median((eflam*unit_conv)[ok]))\n \n ymin = np.minimum(-0.1*ymax, -3*np.median((eflam*unit_conv)[ok]))\n ax.set_ylim(ymin, ymax*1.3)\n # print(xl, ymax)\n \n if ok.sum() > 0:\n if (np.nanmax((flam/eflam)[ok]) > 20) & (full_log):\n ax.set_ylim(0.005*ymax, ymax*5)\n ax.semilogy()\n \n if len(axes) > 0:\n gs.tight_layout(fig, pad=0.8)\n else:\n fig.tight_layout(pad=0.8)\n \n if label is not None:\n fig.text(0.015*12./12, 0.005, f'{label}',\n ha='left', va='bottom',\n transform=fig.transFigure, fontsize=8)\n \n fig.text(1-0.015*12./12, 0.005, time.ctime(),\n ha='right', va='bottom',\n transform=fig.transFigure, fontsize=6)\n \n \n return fig, spec, data", "def generate_spectrum(self):\n matlab_method = self.matlab_mapper[self.matlab_script]\n n, dm, peak_locations, omega_res, n_shell, gamma_amp = matlab_method(float(self.n_max), float(self.n_max_s),\n float(self.num_channels), float(self.scale),\n float(self.omega_shift), float(self.dg),\n float(self.dgs), float(self.gamma_amp_factor),\n float(self.amp_factor), float(self.epsilon2),\n nargout=6)\n dm = [list(d) for d in dm]\n self.num_timesteps = len(dm[0])\n if type(peak_locations) == float:\n peak_locations = list([peak_locations])\n else:\n peak_locations = [list(p) for p in peak_locations]\n spectrum = Spectrum(n=n, dm=dm, peak_locations=peak_locations, n_shell=n_shell, gamma_amp=gamma_amp, **self.__dict__)\n return spectrum", "def plot_array(array, subplot_ijk, title=\"\", font_size=18, cmap=None):\n\n sp = plt.subplot(*subplot_ijk)\n sp.set_title(title, fontsize=font_size)\n plt.axis('off')\n plt.imshow(array, cmap=cmap)", "def plot_spectra(path):\r\n plt.figure(figsize=(20, 10))\r\n x, y= np.loadtxt(fname=path, delimiter='\\t',dtype=int,\r\n usecols = (1,2), skiprows=100, unpack = True)\r\n plt.plot(x, y)\r\n return plt.show()", "def fit_plot_features_spectrum(star, path):\n # fit the features\n waves, fluxes, npts, results = fit_features_spec(star, path)\n\n # plot the data\n fig, ax = plt.subplots(\n 2, 1, figsize=(8, 6), sharex=True, gridspec_kw={\"height_ratios\": [6, 1]}\n )\n fluxes[npts == 0] = np.nan\n ax[0].plot(waves, fluxes, color=\"k\", lw=0.5, alpha=0.7)\n\n # plot the fitted models\n # 2 Gaussians\n ax[0].plot(waves, results[0](waves), lw=2, label=\"2 Gaussians\")\n\n # 2 asymmetric Gaussians (with the two individual profiles)\n ax[0].plot(\n waves,\n results[3](waves),\n lw=2,\n label=\"2 mod. Gaussians\",\n )\n ax[0].plot(waves, results[3][0](waves), color=\"C1\", lw=1, ls=\"--\")\n ax[0].plot(waves, results[3][1](waves), color=\"C1\", lw=1, ls=\"--\")\n\n # 2 Drudes\n ax[0].plot(\n waves,\n results[1](waves),\n ls=\"--\",\n lw=1,\n label=\"2 Drudes\",\n )\n\n # 2 asymmetric Drudes\n ax[0].plot(\n waves,\n results[4](waves),\n ls=\"--\",\n lw=1,\n label=\"2 mod. Drudes\",\n )\n\n # 2 Lorentzians\n ax[0].plot(\n waves,\n results[2](waves),\n ls=\":\",\n lw=1,\n label=\"2 Lorentzians\",\n )\n\n # 2 asymmetric Lorentzians\n ax[0].plot(\n waves,\n results[5](waves),\n ls=\":\",\n lw=1,\n label=\"2 mod. Lorentzians\",\n )\n\n # 1 asymmetric Drude\n ax[0].plot(\n waves,\n results[6](waves),\n ls=\"-.\",\n lw=1,\n label=\"1 mod. Drude\",\n )\n\n # finish the upper plot\n ax[0].set_ylabel(\"flux\")\n ax[0].set_ylim(-0.4e-12, 0.05e-12)\n ax[0].axhline(color=\"k\", ls=\":\")\n ax[0].yaxis.set_major_locator(MaxNLocator(prune=\"lower\"))\n ax[0].legend(fontsize=fs * 0.6)\n\n # plot the residuals (for the best fitting model)\n ax[1].scatter(waves, results[3](waves) - fluxes, s=0.7, color=\"C1\")\n ax[1].set_ylim(-1e-13, 1e-13)\n ax[1].axhline(ls=\"--\", c=\"k\", alpha=0.5)\n ax[1].set_ylabel(\"residual\")\n\n # finish and save the plot\n plt.xlabel(r\"$\\lambda$ [$\\mu m$]\")\n plt.subplots_adjust(hspace=0)\n plt.savefig(\n \"/Users/mdecleir/spex_nir_extinction/Figures/\" + star + \"_spec_features.pdf\",\n bbox_inches=\"tight\",\n )", "def plot_freq_spec(data, title):\n plt.title(title)\n\n def plot_freq_spec(axis, line, label):\n n = len(axis)\n fft = fftpack.fft(axis) / n\n fft = fft[range(int(n / 2))]\n plt.plot(range(int(n / 2)), abs(fft), line, label=label)\n plot_freq_spec(data[:, 0], 'r-', label='x')\n plot_freq_spec(data[:, 1], 'g-', label='y')\n plot_freq_spec(data[:, 2], 'b-', label='z')", "def plot_pixel_array(arr, figsize=(10, 10)):\n arr = arr.squeeze()\n plt.figure(figsize=figsize)\n plt.imshow(arr, cmap=plt.cm.bone)\n plt.show()", "def example():\n # --- input data filename ---\n infile = os.path.join(os.path.dirname(__file__),\n '../../ExampleDataFiles/MarsTopo719.shape')\n coeffs, lmax = pysh.shio.shread(infile)\n\n # --- plot grid ---\n grid = pysh.expand.MakeGridDH(coeffs, csphase=-1)\n fig_map = plt.figure()\n plt.imshow(grid)\n\n # ---- compute spectrum ----\n ls = np.arange(lmax + 1)\n pspectrum = pysh.spectralanalysis.spectrum(coeffs, unit='per_l')\n pdensity = pysh.spectralanalysis.spectrum(coeffs, unit='per_lm')\n\n # ---- plot spectrum ----\n fig_spectrum, ax = plt.subplots(1, 1)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('degree l')\n ax.grid(True, which='both')\n\n ax.plot(ls[1:], pspectrum[1:], label='power per degree l')\n ax.plot(ls[1:], pdensity[1:], label='power per degree l and order m')\n\n ax.legend()\n\n fig_map.savefig('SHRtopography_mars.png')\n fig_spectrum.savefig('SHRspectrum_mars.png')\n print('mars topography and spectrum saved')\n\n # plt.show()", "def plt_spectrogram(X,win_length, hop_size, sample_rate, zoom_x=None, zoom_y=None,tick_labels='time-freq'):\n\n # Find the size of stft\n Nf,Nt=np.shape(X)\n\n # Compute the log magnitude spectrogram\n X=20*np.log10(np.abs(X))\n\n # Extract the first half of the spectrum for each time frame\n X=X[0:Nf/2]\n # Nf=np.shape(X)[0]\n #\n # # Generate time vector for plotting\n # times=(hop_size/float(sample_rate))*np.arange(Nt)\n #\n # # Generate frequency vector for plotting\n # freqs=(float(sample_rate)/win_length)*np.arange(Nf)\n #\n # # Generate time and frequency matrices for pcolormesh\n # times_matrix,freqs_matrix=np.meshgrid(times,freqs)\n # #\n # # Plot the log magnitude spectrogram\n # plt.title('Log magnitude spectrogram')\n # if tick_labels == 'bin-frame':\n # plt.pcolormesh(X)\n # plt.xlabel('Time-frame Number')\n # plt.ylabel('Frequency-bin Number')\n # else:\n # plt.pcolormesh(times_matrix,freqs_matrix,X)\n # plt.xlabel('Time (sec)')\n # plt.ylabel('Frequency (Hz)')\n #\n # # Zoom in on the plot if specified\n # if zoom_x is None and zoom_y is None:\n # plt.axis('tight')\n #\n # if zoom_x is not None:\n # plt.xlim(zoom_x)\n #\n # if zoom_y is not None:\n # plt.ylim(zoom_y)\n #\n return X", "def comp_output_spectra(self):\n assert(hasattr(self,'r'))\n \n self.nx=int(self.nx)\n \n r_mat=self.r.T.reshape(self.nx,self.nx,self.N)\n\n in_allfreqs = np.fft.fftshift(np.fft.fftfreq(self.nx,d=self.L/self.nx))\n \n self.freqs=in_allfreqs[self.nx/2:]\n \n r_dft_flat=np.fft.fftshift(np.fft.fft2(r_mat,axes=[0,1]),axes=[0,1])*(self.L/self.nx)**2\n\n r_pw=abs(r_dft_flat)**2 \n r_pw_profiles=gl.dft2d_profiles(r_pw)\n \n self.re_pw_profile=np.mean(r_pw_profiles,axis=0)\n self.he_pw_profile=self.inputs.in_mean_pw_profile", "def visualize(self, name):\n size = self.experiments[name]['size']\n matrix = [[self.experiments[name]['hi-c'][0][i+size*j] \\\n for i in xrange(size)] \\\n for j in xrange(size)]\n plt.imshow(log2(matrix), origin='lower')\n plt.show()", "def plot_data(self):", "def plot_fft(self):\r\n\r\n self.ipx = int(self.imageData.shape[1]/2.)\r\n\r\n self.ipy = int(self.imageData.shape[2]/2.)\r\n\r\n nearf = np.absolute(self.DF[0:(self.freqs.shape[0]/2)-1,self.ipx-2:self.ipx+2,self.ipy-2:self.ipy+2])\r\n\r\n mpl.plot(self.freqs[0:(self.freqs.shape[0]/2)-1], np.mean(np.mean(nearf,axis=1), axis=1),\r\n\r\n 'ko-', markersize=2.5)\r\n\r\n mpl.plot(self.freqs[self.freq_point], np.mean(np.mean(nearf,axis=1), axis=1)[self.freq_point], 'ro', markersize=5)\r\n\r\n nearf = np.absolute(self.DF[0:(self.freqs.shape[0]/2)-1,-6:-1,-6:-1])\r\n\r\n mpl.plot(self.freqs[0:(self.freqs.shape[0]/2)-1], np.mean(np.mean(nearf,axis=1), axis=1),\r\n\r\n 'c-', markersize=2.5)\r\n\r\n mpl.title('FFT center of image and corner')", "def _show(self, a):\n fig = plt.figure()\n fig.set_size_inches((2, 2))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n plt.set_cmap('hot')\n ax.imshow(a, aspect='equal')\n plt.show()", "def show(self):\n lines, = pylab.plot(self.wavelengths, self.intensities)\n return lines" ]
[ "0.6865385", "0.68452597", "0.68080705", "0.6768042", "0.66995203", "0.66993386", "0.64395714", "0.6430631", "0.6409568", "0.63501585", "0.6320459", "0.6320107", "0.6272982", "0.62168336", "0.6190964", "0.6128101", "0.6125795", "0.61254364", "0.6112583", "0.61059433", "0.61004025", "0.6051989", "0.60469955", "0.60303813", "0.6016913", "0.60047066", "0.59806514", "0.5972129", "0.5967924", "0.59647965", "0.59573674", "0.59401906", "0.5902001", "0.58994156", "0.58940727", "0.5887024", "0.5883663", "0.5881895", "0.5868602", "0.5863277", "0.58131737", "0.5803669", "0.5799181", "0.5791832", "0.5791678", "0.57845235", "0.5766396", "0.5765937", "0.57579726", "0.5748388", "0.57471454", "0.5743975", "0.5743222", "0.57361853", "0.57331735", "0.57294226", "0.5724864", "0.57159734", "0.57146317", "0.5714033", "0.57003266", "0.56887686", "0.5684357", "0.5680311", "0.56775707", "0.56703854", "0.566729", "0.5663138", "0.56616795", "0.5648976", "0.5635458", "0.5633164", "0.56272244", "0.56137204", "0.561091", "0.56044257", "0.55998886", "0.55992967", "0.5581529", "0.5575866", "0.5566818", "0.5564039", "0.556196", "0.5559689", "0.5555338", "0.5552164", "0.55517364", "0.5539533", "0.5536046", "0.55353934", "0.5519128", "0.5510835", "0.5505155", "0.549707", "0.54894567", "0.5487903", "0.548674", "0.54863113", "0.5484306", "0.5479173", "0.54766774" ]
0.0
-1
raise a figure to the top
def show_plot(figure_id=None): if figure_id is None: fig = pl.gcf() else: # do this even if figure_id == 0 fig = pl.figure(num=figure_id) pl.show() pl.pause(1e-9) fig.canvas.manager.window.activateWindow() fig.canvas.manager.window.raise_()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_top(self):\n return group()", "def XPBringRootWidgetToFront(inWidget):\n pass", "def autostop():", "def raise_window(window):\n window.attributes('-topmost', 1)\n window.attributes('-topmost', 0)", "def back_extra(self, ontop):\n\n self.frames[ontop].forget()\n self.extraframe.pack(anchor='center', expand=True, fill='y')", "def show(im,fig= None): #X\n im = im.copy()\n if len(im.shape)==1 or im.shape[1]==1:\n im = X2patch(im)\n im[im<=DEAD]=-0.5\n if fig is None:\n plt.figure()\n fig = plt.imshow(hsv_to_rgb(im+0.5))\n fig.set_data(hsv_to_rgb(im+0.5))\n plt.draw()\n plt.pause(0.001)\n return fig", "def show():\n setup()\n plt.show()", "def force_draw(self):\n import matplotlib.pyplot as plt\n\n plt.show()", "def draw(self):\n self.figure.show()\n self.figure.canvas.draw()", "def front_wall(self):\n self.place = \"bed\"\n print(\"You are infront of the bed.\"\n \"You look under it and find a notebook.\")\n nb = Notebook('notebook')\n nb.clue()", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def page_top(self):\n self._pos = 0\n self._display()", "def hide(self):\n self.geometry(\"%dx%d%+d%+d\" % (0, 0, 0, 0))", "def clear(self):\n self._fig = go.Figure()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def update_figure(self):\n\n self.draw()", "def win_raise(self):\n self.raise_()\n self.activateWindow()", "def OnFloated(self, event):\n self._floating = True\n wx.PostEvent(self, wxDockPaneFloatedEvent())", "def draw(self):\n self.figure.canvas.draw_idle()", "def draw_window_pane():\n houseturtle.begin_fill()\n for y in range(4):\n houseturtle.pendown()\n houseturtle.forward(35)\n houseturtle.left(90)\n houseturtle.penup()\n houseturtle.end_fill()", "def style_snapshot(figure: Figure) -> Figure:\n figure.axis.visible = False\n figure.xgrid.visible = False\n figure.ygrid.visible = False\n figure.toolbar_location = None\n figure.toolbar.logo = None\n figure.outline_line_width = 0\n figure.outline_line_alpha = 0\n\n return figure", "def set_top_widget(self, widg):\r\n if widg in self.widgets:\r\n self.widgets.remove(widg)\r\n self.widgets.insert(0, widg)\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if not i == widg:\r\n i.unfocus()", "def plot(self):\n self.fig = plt.figure('black hole')\n self.fig.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n self.ax = plt.subplot()\n\n if self.img2 is not None:\n self.ax.imshow(self.img2)\n else:\n print(\"No black hole deformation in the memory, displayed the original image instead.\")\n self.ax.imshow(self.img_debut)\n\n self.fig.canvas.set_window_title('Black hole')\n self.ax.set_title(\"scrool to zoom in or out \\nright click to add an offset in the background \\nleft click to refresh image \\n close the option windows to stop the program\")\n self.fig.canvas.mpl_connect('scroll_event', self.onscroll)\n self.fig.canvas.mpl_connect('button_press_event', self.onclick)\n self.fig.canvas.mpl_connect('axes_leave_event', self.disconnect)\n self.fig.canvas.mpl_connect('axes_enter_event', self.connect)\n\n self.draw()", "def figure4():\n\n plot_settings = {'y_limits': [-80, -50],\n 'x_limits': None,\n 'y_ticks': [-80, -70, -60, -50],\n 'locator_size': 5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 20,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_4',\n 'legend': ['control', 'apamin'],\n 'legend_size': 8,\n 'y_on': True}\n line_styles = ['-', 'dotted']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate figure 1 (top)\n for ix, g_sk_bar in enumerate([0.3, 0]):\n t, y = solver(100, g_sk_bar=g_sk_bar)\n plt.plot(t, y[:, 0], c='k', linestyle=line_styles[ix])\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 1, 2)\n t1 = 1200\n t, y = solver(t1, t_start=50, duration=t1, i_bias_on=0.33, g_sk_bar=0.03)\n plt.plot(t, y[:, 0], 'k-')\n\n plot_settings['y_limits'] = [-100, 30]\n plot_settings['x_limits'] = [0, t1]\n plot_settings['y_ticks'] = [-80, -60, -40, -20, 0, 20]\n plot_settings['locator_size'] = 10\n plot_settings['scale_size'] = 100\n plot_settings['legend'] = None\n alter_figure(plot_settings, close=True) # Alter plot for publication", "def topStack(self):\r\n\r\n self.z_stack=0\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def tight_layout(self):\n self.fig.tight_layout()\n self.canvas.draw()", "def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)", "def top_option():\n active = get_active_window()\n Width=get_middle_Width(active)\n Height=get_top_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def show():\n\tplt.show()", "def __enter__(self):\n if self.back_flag:\n # Set LaTeX params\n matplotlib.rcParams.update({ \n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n \"pgf.preamble\": \"\\n\".join( self.packages ),\n })\n plt.rc('font', size=self.SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=self.BIGGER_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=self.MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=self.SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=self.SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=self.MEDIUM_SIZE) # legend fontsize\n plt.rc('figure', titlesize=self.BIGGEST_SIZE) # fontsize of the figure title", "def frame():\n fig = plt.figure(figsize = (6, 3))\n\n plt.subplots_adjust(left=.15, bottom=.2, right=.95, top=.9)\n ax = fig.add_subplot(111)\n \n ax.tick_params(axis=\"x\", labelsize=12)\n ax.tick_params(axis=\"y\", labelsize=12)\n\n return fig, ax", "def make_space_above(self, ax, topmargin=1):\n fig = ax.figure\n s = fig.subplotpars\n w, h = fig.get_size_inches()\n\n figh = h - (1-s.top)*h + topmargin\n fig.subplots_adjust(bottom=s.bottom*h/figh, top=1-topmargin/figh)\n fig.set_figheight(figh)", "def show_fig_and_wait(self):\n\n # window management\n self.fig.canvas.manager.show()\n self.fig.canvas.draw_idle()\n # starting a 'blocking' loop to let the user interact\n self.fig.canvas.start_event_loop(timeout=-1)", "def clear_figure(self):\n self.figure.clf()", "def _setup_plot(x: float, y: float) -> plt.figure:\n LOG.debug(\"Initializing plot.\")\n plt.ion()\n fig = plt.figure(figsize=(x, y), num=\"GlacierFlowModel\")\n fig.patch.set_facecolor(\"black\")\n return fig", "def show(self):\n plt.show()", "def figure():\n global fig\n return fig", "def _DoUpdatePlot( self, wd, ht ):\n self.ax.grid(\n True, 'both', 'both',\n\tcolor = '#c8c8c8', linestyle = ':', linewidth = 1\n\t)", "def back(self, ontop):\n\n self.frames[ontop].forget()\n self.mainframe.pack(anchor='center', expand=True, fill='y')", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def peek(self, **kwargs):\n\n plt.figure()\n axes = plt.gca()\n data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]\n axes.plot(self.data.index,self.data,label=data_lab)\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-4,1)\n axes.set_title('Nobeyama Radioheliograph')\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))\n axes.set_ylabel('Correlation')\n axes.legend()\n plt.show()", "def add_figure(self,sig,index,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(sig)", "def autorange(self):\n self._checkfigure()\n self.axes.autoscale_view(True)", "def grow_main(self):\n self._grow_main(self.change_ratio)\n self.group.layout_all()", "def draw(self):\n self.strip.show()", "def show_figure(self):\n pylab.show()", "def hide(self):\r\n self.rect.center = (WINDOWWIDTH/2, WINDOWHEIGHT -2000)", "def move_to_win(self):\n self.external_win = PlotWindow(plot=self.pw, parent=self)\n self.external_win.closeWin.connect(lambda: self.layout().takeAt(1))\n self.external_win.closeWin.connect(lambda: self.layout().insertWidget(1, self.pw))\n self.external_win.closeWin.connect(lambda: self.btn_open.setEnabled(True))\n self.external_win.show()", "def redraw(self):\n # enough to go to front, don't need to clear the line\n sys.stderr.write(self._FRONT)\n self.draw()", "def add_figure1(self,x,y,index=1,title='',xlabel='',ylabel=''):\n self.last_index = index\n ax = self.fig.add_subplot(self.position+index)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(x,y)", "def visualise(self):\n self.w = VisualizeSetupBox(self.master, self._df)\n self.master.wait_window(self.w.top)", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def empty_figure() -> object:\n figure = go.Figure(go.Scatter(x=[], y=[]))\n figure.update_layout(template=None)\n figure.update_xaxes(showgrid=False, showticklabels=False, zeroline=False)\n figure.update_yaxes(showgrid=False, showticklabels=False, zeroline=False)\n\n return figure", "def reset_stack_arm(top):\n if top is not None and top.name in ['sandwichtop', 'sandwichtop_no_label']:\n if top.ey > 0:\n top.reset_y()", "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "def figure1():\n\n plot_settings = {'y_limits': [-80, -50],\n 'x_limits': None,\n 'y_ticks': [-80, -70, -60, -50],\n 'locator_size': 5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 20,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_1',\n 'legend_size': 8,\n 'legend': None,\n 'y_on': True}\n\n t, y = solver(100) # Integrate solution\n plt.figure(figsize=(5, 2)) # Create figure\n plt.plot(t, y[:, 0], 'k-') # Plot solution\n\n \"\"\"\n Annotate plot with figures\n \"\"\"\n plt.gca().annotate('fAHP', xy=(13.5, -65), xytext=(17, -60),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n plt.gca().annotate('ADP', xy=(15.5, -66), xytext=(25, -65),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n plt.gca().annotate('mAHP', xy=(38, -77), xytext=(43, -72),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n alter_figure(plot_settings, close=True) # Alter figure for publication", "def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)", "def page_top(self):\n self._npos = 0\n self.display()", "def subtract(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def grow_stack_arm(top):\n if top is not None and top.name in ['sandwichtop', 'sandwichtop_no_label']:\n _bot = find_sandwich_bottom(top)\n if _bot is None:\n return\n if top.ey > 0:\n top.reset_y()\n _ty = top.spr.get_xy()[1]\n _th = top.spr.get_dimensions()[1]\n _by = _bot.spr.get_xy()[1]\n _dy = _by - (_ty + _th)\n if _dy > 0:\n top.expand_in_y(_dy / top.scale)\n top.refresh()", "def update_figure(picks, curve, active_well): \n w = p.get_well(active_well) ##selects the correct welly.Well object\n picks_df = pd.read_json(picks)\n picks_selected = picks_df[picks_df['UWI'] == active_well.replace(' ', '-')]\n \n # regenerate figure with the new horizontal line\n fig = helper.make_log_plot(w=w, ymin=ymin)# , resample=0.1) # resample needs a float to change basis\n fig.update_layout(uirevision=active_well)\n helper.update_picks_on_plot(fig, picks_selected)\n \n return fig", "def all():\n adjust_spines()\n draggable_legend()\n plt.gcf().canvas.mpl_connect('close_event', handle_close)", "def display(self):\n with push_style():\n fill(255)\n circle((self.xoff + self.x, self.yoff + self.y), 6, mode=\"CENTER\")", "def detach_plot(self):\n detached = tk.Toplevel(self)\n detached.wm_title(\"Glycoprotein\")\n fig = mpl.figure.Figure(figsize=(5, 4), dpi=100)\n ax = fig.add_subplot(111)\n chid = self.chain.get()\n\n l = len(self.myGlycosylator.sequences[chid])\n sequons = [k for k in self.myGlycosylator.sequons.keys() if chid in k[:len(chid)]]\n trees = self.original_glycans.copy()\n trees.update(self.linked_glycans)\n self.myDrawer.draw_glycoprotein(l, self.myGlycosylator.get_start_resnum(chid), sequons, ax = ax, axis = 0,\n trees = trees, names = self.names, sequon_color = self.sequon_colors)\n ax.axis('equal')\n ax.axis('off')\n\n canvas = FigureCanvasTkAgg(fig, master=detached)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n toolbar = NavigationToolbar2TkAgg(canvas, detached)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)", "def setup_figure(self):\n \n # connect ui widgets to measurement/hardware settings or functions\n self.ui.start_pushButton.clicked.connect(self.start)\n self.ui.interrupt_pushButton.clicked.connect(self.interrupt)\n self.settings.save_h5.connect_to_widget(self.ui.save_h5_checkBox)\n self.settings.save_movie.connect_to_widget(self.ui.save_movie_checkBox)\n \n # Set up pyqtgraph graph_layout in the UI\n self.graph_layout=pg.GraphicsLayoutWidget()\n self.ui.plot_groupBox.layout().addWidget(self.graph_layout)\n \n self.aux_graph_layout=pg.GraphicsLayoutWidget()\n self.ui.aux_plot_groupBox.layout().addWidget(self.aux_graph_layout)\n \n self.camera_layout=pg.GraphicsLayoutWidget()\n self.ui.camera_groupBox.layout().addWidget(self.camera_layout)\n\n # Create PlotItem object (a set of axes) \n \n self.plot1 = self.graph_layout.addPlot(row=1,col=1,title=\"Lick\")\n self.plot2 = self.graph_layout.addPlot(row=2,col=1,title=\"breathing\")\n\n # Create PlotDataItem object ( a scatter plot on the axes )\n self.breathing_plot = self.plot2.plot([0])\n self.lick_plot_0 = self.plot1.plot([0])\n self.lick_plot_1 = self.plot1.plot([1]) \n \n self.lick_plot_0.setPen('y')\n self.lick_plot_1.setPen('g')\n \n self.T=np.linspace(0,10,10000)\n self.k=0\n \n self.camera_view=pg.ViewBox()\n self.camera_layout.addItem(self.camera_view)\n self.camera_image=pg.ImageItem()\n self.camera_view.addItem(self.camera_image)", "def showPrevFrame(self):\n if(self.hasPrevFrame()):\n self.activeFrames.pop()\n self.activeFrames[-1].tkraise()\n else:\n self.showFrame(\"frame_start\")", "def plot_blank(self):\n self.figure_bmp.SetBitmap(self.controller.plot_blank())", "def display(self):\n self.figure, self.axes = self.createFigure()\n\n self.setupLayout()\n self.quitFlag = False\n self.animation = animation.FuncAnimation(self.figure, self.animate, interval=100)\n plt.show()", "def alarm(self, event):\r\n\r\n # top left corner of top level window\r\n x1_coordinate, y1_coordinate = self.winfo_rootx(), self.winfo_rooty()\r\n\r\n # bottom right corner of top level window\r\n x2_coordinate = x1_coordinate + self.winfo_width()\r\n y2_coordinate = y1_coordinate + self.winfo_height()\r\n if not (x1_coordinate < event.x_root < x2_coordinate and\r\n y1_coordinate < event.y_root < y2_coordinate):\r\n self.attributes(\"-alpha\", 0.1)\r\n self.bell()\r\n self.after(100, lambda: self.attributes(\"-alpha\", 1))", "def plot_clear():\n plt.cla()", "def _update_plot(self) -> None:\n\n # Check if plotting is active\n if self._fig is None:\n return None\n LOG.debug(\"Updating plot.\")\n\n # Extract glaciated area\n hs_back = np.ma.masked_where(\n self.h <= 1,\n hillshade(\n self.ele, self.PLOT_HILLSHADE_AZIMUTH, self.PLOT_HILLSHADE_ALTITUDE\n ),\n )\n\n # Clear plot and draw axes\n self._fig.clear()\n ax = plt.subplot(121, facecolor=\"black\")\n ax.tick_params(axis=\"x\", colors=\"w\")\n ax.tick_params(axis=\"y\", colors=\"w\")\n ax.set(xlabel=\"X-coordinate [m]\", ylabel=\"Y-coordinate [m]\")\n ax.xaxis.label.set_color(\"w\")\n ax.yaxis.label.set_color(\"w\")\n title_text = f\"Year: {str(self.i)} ELA: {str(int(self.ela))} m.a.s.l.\"\n ax.set_title(title_text, color=\"white\", size=18)\n\n # Draw new image layers\n plt.imshow(self.hs, vmin=90, vmax=345, cmap=\"copper\", extent=self.extent)\n plt.imshow(255 - hs_back, vmin=1, vmax=150, cmap=\"Greys\", extent=self.extent)\n\n # Mass balance\n ax1 = plt.subplot(222, facecolor=\"black\")\n ax1.plot(self.mass_balance, color=\"w\")\n ax1.plot(self.mass_balance_trend, color=\"r\")\n ax1.set(ylabel=\"Mass balance [m]\")\n ax1.yaxis.label.set_color(\"w\")\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.tick_params(axis=\"y\", colors=\"w\")\n ax1.set_title(f\"Gradient: {str(self.m)} m/m\", color=\"white\", size=18)\n\n # Plot mean thickness\n ax2 = plt.subplot(224, sharex=ax1, facecolor=\"black\")\n ax2.plot(self.mass, color=\"w\")\n ax2.set(xlabel=\"Year [a]\", ylabel=\"Mean thickness [m]\")\n ax2.xaxis.label.set_color(\"w\")\n ax2.yaxis.label.set_color(\"w\")\n ax2.tick_params(axis=\"x\", colors=\"w\")\n ax2.tick_params(axis=\"y\", colors=\"w\")\n\n # Draw new plot\n self._fig.canvas.draw()\n plt.pause(0.05)", "def set_frame(self):\n plt.tick_params(\n bottom=False,\n labelbottom=False,\n left=False,\n labelleft=False,\n )\n # Thicken frame.\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n self.ax.spines[axis].set_linewidth(self.framesize)\n # Diagonal.\n self.ax.plot(\n [0, 1],\n [0, 1],\n transform=self.ax.transAxes,\n lw=self.framesize,\n color=\"black\",\n zorder=1,\n )\n # Axis labels.\n self.ax.set_ylabel(self.y, size=self.labelsize, labelpad=20)\n self.ax.set_xlabel(self.x, size=self.labelsize, labelpad=20)\n self.ax.set_title(self.title, size=self.titlesize, pad=60)\n self.ax.xaxis.set_label_position(\"top\")\n\n return self", "def figure5():\n\n plot_settings = {'y_limits': [-100, 30],\n 'x_limits': None,\n 'y_ticks': [-80, -60, -40, -20, 0, 20],\n 'locator_size': 10,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 500,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_5',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True}\n\n plt.figure(figsize=(5, 2))\n\n t, y = solver(3000, duration=2400, i_bias_on=0.22, t_start=60)\n plt.plot(t, y[:, 0], 'k-')\n alter_figure(plot_settings) # Alter figure for publication\n plt.gca().add_patch(patches.Rectangle((40, -75), 120, 16, fill=False)) # Draw rectangle to highlight inset\n\n \"\"\"\n Create inset of highlighted region\n Due to inset: alter_figure is not used\n \"\"\"\n plt.axes([.75, .5, .25, .4], axisbg='y')\n\n ix_start = np.where(t < 40)[0][-1] # Find index for beginning of inset\n ix_end = np.where(t < 160)[0][-1] # Find index for end of inset\n v_highlighted = y[ix_start:ix_end, 0]\n plt.plot(t[ix_start:ix_end], v_highlighted, 'k')\n plt.ylim([-75, -55])\n plt.box('off')\n plt.xticks([])\n plt.yticks([])\n plt.xlim([t[ix_start], t[ix_end]])\n add_scalebar(plt.gca(), matchx=False, matchy=False, sizex=25, sizey=5, hidey=False, labelx='25', labely='5', loc=1)\n\n plt.savefig('figures/figure_5.pdf', dpi=1200)\n plt.close()", "def bpy_modal(self):\r\n self._pre_draw_common()", "def do_layout(self, *args, **kw):\n if self.use_draw_order and self.component is not None:\n self._layout_as_overlay(*args, **kw)\n else:\n super(PlotGrid, self).do_layout(*args, **kw)\n return", "def init_plot(self, master):\n b = Figure(figsize=(8, 6), dpi=100)\n ac = b.add_subplot(111)\n ac.plot(10, 10)\n ac.set_title('Current tour plot')\n ac.set_xlabel('X axis coordinates')\n ac.set_ylabel('Y axis coordinates')\n ac.grid(True)\n canvas = FigureCanvasTkAgg(b, master)\n canvas.draw()\n canvas.get_tk_widget().grid(row=1, column=1, sticky=W)", "def do_top(self, arg):\n if self.curindex == 0:\n self.error('Oldest frame')\n return\n self._select_frame(0)", "def top(self):\n # Sets our Z value to one.\n self.setZValue(1)\n # Set every colliding items Z value to 0\n for sibling in self.collidingItems():\n sibling.setZValue(0)", "def mine():\n\n fig = new_slide()\n slide_heading(fig, 'Lesser-maintained parts')\n\n theta = np.linspace(0, 2*np.pi)\n x = np.cos(theta - np.pi/2)\n y = np.sin(theta - np.pi/2)\n z = theta\n\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n markerline, stemlines, baseline = ax.stem(\n x, y, z, linefmt='grey', markerfmt='D', bottom=np.pi)\n markerline.set_markerfacecolor('none')\n\n ax = fig.add_subplot(1, 2, 2)\n ax.axis('off')\n ax.imshow(imread('webagg.png'))\n\n yield fig", "def i_paint_main_head(self):\n raise NotImplemented('method not implemented yet!')", "def View_Preorder( self ):\r\n cb.order = 0\r\n self.system.Draw( )", "def topSash(parent,defaultSize=(100,100),onSashDrag=None):\r\n sash = wx.SashLayoutWindow(parent,style=wx.SW_3D)\r\n sash.SetDefaultSize(defaultSize)\r\n sash.SetOrientation(wx.LAYOUT_HORIZONTAL)\r\n sash.SetAlignment(wx.LAYOUT_TOP)\r\n sash.SetSashVisible(wx.SASH_BOTTOM, True)\r\n if onSashDrag:\r\n id = sash.GetId()\r\n sash.Bind(wx.EVT_SASH_DRAGGED_RANGE, onSashDrag,id=id,id2=id)\r\n return sash", "def figure8():\n\n plot_settings = {'y_limits': [15, 60],\n 'x_limits': None,\n 'y_ticks': [20, 30, 40, 50, 60],\n 'locator_size': 5,\n 'y_label': 'ISI (ms)',\n 'x_ticks': [],\n 'scale_size': 0,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_8',\n 'legend': ['First ISI', 'Second ISI'],\n 'legend_size': 8,\n 'y_on': True,\n 'legend_location': 4}\n\n g_t_bars = np.linspace(0.02, 0.2, 10)\n isi = np.zeros((len(g_t_bars), 2))\n\n for ix, g_t_bar in enumerate(g_t_bars):\n t, y = solver(200, t_start=15, duration=260, g_t_bar=g_t_bar)\n t_spike, f = spike_times(t, y[:, 0])\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n\n plt.subplot(2, 2, 1) # Generate subplot 1 (top left)\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_t_bars[3], 35, 0, 11, head_width=0, head_length=0, fc='k', ec='k')\n plt.gca().arrow(g_t_bars[3], 46, -0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_t_bars[3], 35, 0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Acceleration\", (0.1, 35), fontsize=8)\n plt.gca().annotate(\"Adaptation\", (0.01, 46), fontsize=8)\n alter_figure(plot_settings)\n\n plt.subplot(2, 2, 2) # Generate subplot 2 (top right)\n g_n_bars = np.linspace(0.02, 0.2, 10)\n isi = np.zeros((len(g_t_bars), 2))\n for ix, g_n_bar in enumerate(g_n_bars):\n t, y = solver(200, g_n_bar=g_n_bar, duration=260, t_start=15, g_t_bar=0.02)\n t_spike, f = spike_times(t, y[:, 0])\n\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_n_bars[3], 30, 0, 10, head_width=0, head_length=0, fc='k', ec='k')\n plt.gca().arrow(g_n_bars[3], 40, -0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_n_bars[3], 30, 0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Acceleration\", (0.1, 30), fontsize=8)\n plt.gca().annotate(\"Adaptation\", (0.015, 40), fontsize=8)\n plot_settings['y_ticks'] = []\n plot_settings['y_label'] = \"\"\n plot_settings['y_on'] = False\n plot_settings['legend_location'] = 4\n alter_figure(plot_settings)\n\n plt.subplot(2, 2, 3) # Generate subplot 3 (bottom left)\n g_t_bars = np.linspace(0.02, 0.16, 8)\n isi = np.zeros((len(g_t_bars), 2))\n for ix, g_t_bar in enumerate(g_t_bars):\n t, y = solver(200, g_t_bar=g_t_bar, duration=260, t_start=15, ca_type=1)\n t_spike, f = spike_times(t, y[:, 0])\n\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_t_bars[2], 25, -0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_t_bars[4], 25, 0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Adaptation\", (0.06, 25), fontsize=8)\n\n plot_settings['y_limits'] = [0, 45]\n plot_settings['y_ticks'] = [0, 10, 20, 30, 40]\n plot_settings['locator_size'] = 5\n plot_settings['y_label'] = 'ISI (ms)'\n plot_settings['y_on'] = True\n plot_settings['legend_location'] = 3\n alter_figure(plot_settings)\n\n plt.subplot(2, 2, 4)\n g_n_bars = np.linspace(0.02, 0.16, 8)\n isi = np.zeros((len(g_t_bars), 2))\n for ix, g_n_bar in enumerate(g_n_bars):\n t, y = solver(200, duration=260, t_start=15, g_n_bar=g_n_bar, g_t_bar=0.02, ca_type=2)\n t_spike, f = spike_times(t, y[:, 0])\n\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_n_bars[2], 20, -0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_n_bars[4], 20, 0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Adaptation\", (0.06, 20), fontsize=8)\n\n plot_settings['y_ticks'] = []\n plot_settings['y_label'] = ''\n plot_settings['y_on'] = False\n plot_settings['legend_location'] = 2\n alter_figure(plot_settings, close=True)", "def header(self) -> NoReturn:\n self.set_x(self.t_margin + self.b_margin)\n self.ln(self.line_height)", "def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))", "def plot_refresh():\n figure.canvas.draw()", "def set_figure_variables(self):\n #self.fig.canvas.manager.full_screen_toggle()\n self.gs = self.fig.add_gridspec(2, 3)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[0, 1])\n self.ax3 = self.fig.add_subplot(self.gs[0, 2])\n self.ax4 = self.fig.add_subplot(self.gs[1, 0])\n self.ax5 = self.fig.add_subplot(self.gs[1, 1])\n self.ax6 = self.fig.add_subplot(self.gs[1, 2])\n # histogram with indicator scoring\n self.ax1.set_xlabel(\"indicators\")\n self.ax1.set_ylabel(\"score (%)\")\n # graph with flood safety levels\n self.ax2.set_xlabel(\"dike section\")\n self.ax2.set_ylabel(\"chance of flooding occurrence\")\n # graph with water levels vs dike height\n self.ax3.set_xlabel(\"river length (meters)\")\n self.ax3.set_ylabel(\"height (meters)\")\n # graph with overall costs made\n self.ax6.set_ylabel(\"million Euros\")\n \n self.ax1.set_ylim([0, 100])\n self.ax2.set_ylim([0, 100])\n self.ax3.set_ylim([14, 18])\n self.ax6.set_ylim([0, 25000000])\n \n self.ax1.set_title(\"Overall score on indicators\")\n self.ax2.set_title(\"Flood safety levels\")\n self.ax3.set_title(\"Normative water levels vs dike crest height\")\n self.ax6.set_title(\"Budget spent\")\n \n self.x_pos = np.arange(len(self.indicators))\n self.ax1.set_xticks(self.x_pos)\n self.ax1.set_xticklabels(self.indicators)\n \n flood_safety_levels = [100, 200, 400, 600, 800, 1000, 1250]\n self.ax2.set_yticks(flood_safety_levels)\n self.ax2.set_yticklabels([\"1/\"+str(value) for value in flood_safety_levels])\n \n self.plot1 = None\n self.plot2 = None\n self.plot3 = None\n self.plot4 = None\n self.plot5 = None\n self.plot6 = None\n return", "def figure7():\n\n plot_settings = {'y_limits': [-100, 30],\n 'x_limits': None,\n 'y_ticks': [-80, -60, -40, -20, 0, 20],\n 'locator_size': 10,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 50,\n 'x_label': \"\",\n 'scale_loc': 3,\n 'figure_name': 'figure_7',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True}\n\n marker = ['o', 's', '^']\n line_styles = ['-', 'dotted', '--']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate subplot 1 (top)\n t, y = solver(250, i_bias_on=2, duration=260)\n plt.plot(t, y[:, 0], 'k-')\n alter_figure(plot_settings)\n\n plt.subplot(2, 1, 2) # Generate subplot 2 (bottom)\n for ix, i_bias_on in enumerate([2, 1.5, 1]):\n t, y = solver(250, i_bias_on=i_bias_on, duration=260)\n t_spike, f = spike_times(t, y[:, 0])\n plt.plot(t_spike[0:-1], f, c='k', linestyle=line_styles[ix], marker=marker[ix], fillstyle='none')\n\n plot_settings['y_limits'] = [20, 40]\n plot_settings['y_ticks'] = [20, 25, 30, 35, 40]\n plot_settings['locator_size'] = 2.5\n plot_settings['y_label'] = 'Frequency (Hz)'\n plot_settings['legend'] = ['2.0 nA', '1.5 nA', '1.0 nA']\n plot_settings['scale_size'] = 0\n plot_settings['legend_location'] = 4\n alter_figure(plot_settings, close=True)", "def topStack_x(self):\r\n self.x_stack=0\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')", "def _set_border_and_background_transparent(figure):\n figure.setAttribute('COLOR', pyHAttributeColor(0, 0, 0, 0))\n figure.setAttribute('FILL', pyHAttributeFillColor(0, 0, 0, 0))", "def showTopView(self):\r\n if(self.dataController.fileLoaded == True): \r\n self.dataController.showTopView()\r\n self.midsagittalView = True\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False", "def plot_init(bottom_left: Point, top_right: Point):\n global figure\n global axes\n\n plt.ion()\n figure, axes = plt.subplots(1, 1)\n axes.set_xlim(bottom_left[0], top_right[0])\n axes.set_ylim(bottom_left[1], top_right[1])\n axes.set_aspect(\"equal\", adjustable=\"box\")", "def relayout(self): \n\t\t#self.urmaswin.Layout()\n\t\t#wx.CallAfter(self.urmaswin.Layout)\n\t\t#wx.CallAfter(self.visualizer.OnSize)", "def _setup_figure(self):\n\n plt.figure(1)\n plt.clf()\n\n # Two main axes\n self._tsne_window = plt.axes([0.05, 0.05, 0.4, 0.4])\n self._main_window = plt.axes([0.05, 0.55, 0.4, 0.4])\n\n # Nine sub axes\n self._sub_windows = []\n for row in range(3):\n for col in range(3):\n tt = plt.axes([0.5+0.17*col, 0.75-0.25*row, 0.15, 0.15])\n tt.set_xticks([])\n tt.set_yticks([])\n self._sub_windows.append(tt)\n\n # Register the button click\n self._cid = plt.figure(1).canvas.mpl_connect('button_press_event', self._onclick)\n\n # Text\n plt.figure(1).text(0.6, 0.2, 'Click with 2nd or 3rd mouse button to select image...')\n plt.figure(1).text(0.05, 0.5, 'Click in main image or tSNE plot to find similar cutouts...')\n plt.figure(1).text(0.6, 0.05, 'The tSNE data reduction calculated from data run through {}'.format(self._model_name), fontsize=8)\n\n # Show\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "def set_canvas(self):\n self.ui.figure = plt.figure(figsize=(10, 10))\n self.ui.figure.patch.set_facecolor('None')\n self.ui.canvas = FigureCanvas(self.ui.figure)\n self.ui.canvas.setStyleSheet('background-color:transparent;')\n # Matplotlib toolbar\n self.ui.toolbar = NavigationToolbar(self.ui.canvas, self)\n self.ui.toolbar.setMaximumHeight(30)\n self.ui.figureLayout.addWidget(self.ui.toolbar)\n self.ui.figureLayout.addWidget(self.ui.canvas)\n self.ui.canvas.mpl_connect('button_press_event', self.onclick)\n self.ui.canvas.mpl_connect('pick_event', self.onclick_pick)", "def hide_figure_grid(fig: object, grid: object) -> None:\n grid.grid(False)" ]
[ "0.62913567", "0.61378044", "0.5949495", "0.5857847", "0.58181804", "0.5804387", "0.57883143", "0.5783538", "0.5747582", "0.5702409", "0.5701583", "0.56797063", "0.5665179", "0.5653984", "0.56508195", "0.5632079", "0.5603762", "0.5568151", "0.5565534", "0.5541218", "0.55291635", "0.5506989", "0.5491923", "0.5469061", "0.54611033", "0.54590744", "0.54584694", "0.54571587", "0.5456919", "0.5456919", "0.5436639", "0.5432458", "0.54278105", "0.5417365", "0.5411274", "0.54069984", "0.53986514", "0.5392319", "0.53776205", "0.5365758", "0.5365757", "0.53580785", "0.5353261", "0.5347782", "0.53474253", "0.53429323", "0.53392416", "0.5336493", "0.5321252", "0.5319694", "0.5292884", "0.5287298", "0.5286098", "0.5268901", "0.5268901", "0.5268901", "0.5265576", "0.5259612", "0.52585715", "0.52558446", "0.52550954", "0.52546316", "0.5241685", "0.52404255", "0.5240158", "0.5236621", "0.5233083", "0.52278996", "0.52256805", "0.5224466", "0.5221333", "0.5220289", "0.5214955", "0.52114445", "0.52043414", "0.52010775", "0.5200002", "0.51979816", "0.5192517", "0.5192249", "0.51920635", "0.5189295", "0.51857823", "0.5177987", "0.5177254", "0.51734924", "0.5172673", "0.5170999", "0.51692027", "0.51686364", "0.51666534", "0.51644796", "0.51600856", "0.5158446", "0.51484585", "0.5141279", "0.51309866", "0.51205426", "0.51204115", "0.51150095", "0.51149833" ]
0.0
-1
replace the normal pause, w/o raising window()
def pause(time=1e-6): pl.draw() pl.gcf().canvas.start_event_loop(time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pause(self):\n\t\tpass", "def pause(): # real signature unknown; restored from __doc__\n pass", "def pause(self):\n pass", "def pause(self):\n pass", "def pause():\n click.pause()", "def pause(self):\n \n self.pause = True", "def pause(self):\n pass\n # self.condor_object.hold()", "def pause(self):\n self.gui_block.clear()", "def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break", "def pause(self):\n raise NotImplementedError()", "def pause(self):\n raise NotImplementedError()", "def hw_pause(self):\n self.logger.debug(\"Pause called (no-op)\")", "def execute_pause(self):\n pass", "def pause(self) -> None:\n super().stop()", "def pause(self):\n if self._pause:\n self._pause = False\n else:\n self._pause = True\n self.step() # trigger the next step", "def __window_stop(self):\n pass", "def pause(ms=None): #py:pause\n if ms is None:\n RUR._pause_()\n else:\n RUR._pause_(ms)", "def inhale_pause_control(self):\n self.worker_piston.pause = True", "def pauseProducing(self):\n pass", "def pause_button(self):\r\n self.is_action = True\r\n self.update_settings()\r\n self.is_pause = True\r\n if self.pause_call is not None:\r\n self.wm.after(1, self.pause_call)", "def pause(self):\n self._cleanup()\n self._paused = True", "async def pause_behaviors(self) -> None:", "def pause(self):\n self._event.clear()", "def on_pause(self):\n pass", "def unpause(self):\n \n self.pause = False", "def pause(self):\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n return\n self.screen.fill(self.BACKGROUND_COLOR)\n self.graph.draw(self.screen)\n pygame.display.flip()", "def pause(self) :\n raise NotImplementedError(\"pause not implemented\")", "def pause(self):\n self.sendCommand(\"pause\")", "def Pause():\n\tDmg.enableButton.SetOff()", "def _pause(self):\n # Sleep until a signal is received. This prevents the master from\n # exiting immediately even if there are no runners (as happens in the\n # test suite).\n signal.pause()", "def before_sweep(self):\r\n _debug('GUISignalGenerator: before_sweep()')\r\n self.window.sleep(0.05)", "def execute_pause(self):\n print(self.empty_lines + self.double_tab + \"to continue press any key..\\r\")\n self.getch()", "def mypause(interval):\n backend = plt.rcParams['backend']\n if backend in matplotlib.rcsetup.interactive_bk:\n figManager = matplotlib._pylab_helpers.Gcf.get_active()\n if figManager is not None:\n canvas = figManager.canvas\n if canvas.figure.stale:\n canvas.draw()\n canvas.start_event_loop(interval)\n return", "def _suspend(event: E) -> None:\n event.app.suspend_to_background()", "def continue_button(self):\r\n self.update_settings()\r\n self.is_pause = False\r\n self.is_step = False\r\n if self.continue_call is not None:\r\n self.wm.after(1, self.continue_call)", "def pause(self) -> None:\n self._running.clear()", "def pause(seconds):\n time.sleep(seconds);", "def run(self):\n while inputs.check_for_key_press() == None:\n graphics.clear_display_surf()\n graphics.get_display_surf().blit(self.title_pause_surf, self.title_pause_rect)\n graphics.update_display_surf() \n self.fps_clock.tick(4)\n inputs.clear_event_queue()", "def do_pause(self, args) :\r\n self.__Logger.warn(\"pausing all simulations\")\r\n\r\n self.cmds[\"SimulatorPaused\"] = True", "def unpause(self):\n self.paused = False", "def _control_pause(self):\n self.player.pause()", "def pause(self):\n self.paused_time = time.time()\n self.paused = True", "def pause(cls):\n\n cls._set_mode_paused()\n TimeDisplay.stop_time()\n for callback in cls.pause_callback:\n callback()", "def return_screen_to_normal():\n curses.endwin()", "def _paused(self):\n self.view.clear()\n if self._game.getLives() == 2:\n self._mssg = (GLabel(text=TWO_LIFE_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=20))\n elif self._game.getLives() == 1:\n self._mssg = (GLabel(text=ONE_LIFE_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n \n font_size=20))\n if self._new_n_press():\n self._state = STATE_COUNTDOWN\n self._mssg = None\n self._game.resetPaddle()\n self.time = 0", "def default_signal_pauser(n, f):\n signal.pause()", "def pause(self):\n self.entry['state']=DGG.DISABLED\n self.ignoreAll()", "def do_pause(self, text):\n sm.input(text + '\\n')", "def pause(self):\n self.block.mobile = not self.block.mobile\n if not self.paused:\n self.paused = True\n # Also print paused message\n self.screen.print(\"PAUSED\")\n else:\n self.paused = False\n self.screen.print(\"\")\n # Also reset tick time\n self.t = time.time()", "def pause(self):\n self.terminate(signal.SIGTERM)", "def pause(self):\n self.terminate(signal.SIGTERM)", "def pause(self, time=0.000001):\n plt.pause(time)", "def do_unpause(self, args) :\r\n self.__Logger.warn(\"unpausing simulations\")\r\n\r\n self.cmds[\"SimulatorPaused\"] = False", "def pause_handler(term):\n inp = None\n while inp not in (\"p\", \"P\", \"q\", \"Q\"):\n print(term.home + term.clear + term.move_y(term.height // 2))\n print(term.black_on_white(term.center(\"press P to continue.\")))\n\n inp = term.inkey(timeout=10)", "def early_interact():\n\n global fullscreen\n global current_movie\n\n fullscreen = True\n current_movie = None", "def pauseRunning(self):\r\n self.start_button['state'] = 'normal'\r\n self.pause_button['state'] = 'disabled'\r\n self.running = False", "def pauseLoop(self):\n print('SYNC stim detected LOW')\n self.loopRunning = False", "def pause(self):\n self.background_scheduler.pause()", "def pause_update(self):\n if self.pause_event:\n self.pause_event.activate()", "def pause(self, _):\n if not self.is_ended:\n self.canvas.create_text(self.game.width // 2,\n self.game.height // 2,\n text=\"Paused\",\n font=(Game.FONT, 50),\n fill=Game.TEXT_COLOUR,\n tag='pause_text')\n self.game.running = not self.game.running\n if self.game.running:\n self.canvas.delete('pause_text')", "def __pause(self, reset=False):\n\n self.entrada.write('pause 0\\n')\n self.entrada.flush()\n self.__new_handle(reset)\n self.estado = \"paused\"\n self.emit(\"estado\", \"paused\")", "def pause(self) -> None:\n self.system.notify(\"Jarvis::Paused\")\n self.media.pause()", "def toggle_pause(self):\n self.stdin_queue.put(\"toggle\")", "def _unpause_gazebo(self):\n self.unpause_physics_proxy()", "def pause(self):\n self.stdin_queue.put(\"pause\")", "def pause():\n xd = display.XKCDDisplayService()\n if xd.is_running():\n click.echo(\"pausing the dialogs\")\n xd.send_signal(signal.SIGUSR2)\n else:\n click.echo(\"xkcd service not running\")", "def paused(self) -> bool:", "def pause(self):\n\n RosProxy().call_service(\"/welding_driver/arc_stop\", Empty, EmptyRequest())\n super(WeldTask, self).pause()", "def suspend(self):\n self.__running = False", "def on_worker_unpaused(self):\n self.playing = True\n self.enable_video_buttons(False, True, True)\n self.unpausing = False", "def pauseBot(self):\n print \"Pausing BotPersonality \" + self.name\n self.running = False\n if self.fetch_thread is not None:\n self.fetch_thread.cancel()", "def debug_paused(self):\n self.emit(QtCore.SIGNAL(\"debug_paused\"))", "def wait(self):\n time.sleep(self.pause_time)", "def togglePause(self):\n self.model.paused = not self.model.paused\n self.proc.send_signal(signal.SIGUSR1)", "def pause():\n global pause_continue\n if pause_continue:\n return\n line = input(\n \"Paused. 'q' to quit, 'c' to continue without pausing, anything else to step.\"\n )\n if line:\n if line[0] == \"q\":\n exit(0)\n if line[0] == \"c\":\n pause_continue = True", "def unpause(self) :\n raise NotImplementedError(\"unpause not implemented\")", "def on_worker_paused(self):\n self.playing = False\n self.pausing = False\n self.enable_video_buttons(True, False, True)", "def random_pause():\n pause_time = random.uniform(0, 0.5)\n sleep(pause_time)", "def after_all_sweeps(self):\r\n _debug('GUISignalGenerator: after_all_sweeps()')\r\n self.window.sleep(0.05)", "def pause(self, instance, callback):\n pass", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def prolongLiving(self):\n global LIVING_ID, DISPLAY_DURATION\n if(LIVING_ID):\n GObject.source_remove(LIVING_ID)\n LIVING_ID = GObject.timeout_add(\n DISPLAY_DURATION, destroyWindow, self.windowType)", "def prompt_stop(cls):\n\n cls._set_mode_prompt_stop()\n TimeDisplay.stop_time()\n for callback in cls.pause_callback:\n callback()", "def pause(self):\n \n print \"Press Enter to continue...\"\n waiting = True\n \n while waiting:\n if msvcrt.getch() == '\\r': waiting = False", "def end_screen(win):\n\tpass", "def pause_game_timer(self):\n self._pause_start_time = datetime.datetime.now()", "def close(self):\n #title()\n self.experiment.pause = True\n if self.running:\n self.running = False\n\n self._unregisterCallbacks()", "def launchSyncToolWindow(self):\r\n self.unbind()\r\n self.videoPlayer.pause()\r\n self.w_synctool = SyncToolWindow(self)", "def stop(self):\n self.idle = True\n # pass", "def toggle_pause(self, sender):\n if self.pause:\n self.app.title = 'checking'\n sender.title = 'pause'\n self.pause = False\n else:\n self.app.title = 'paused'\n sender.title = 'start'\n self.pause = True", "def pause(self):\n self.java_obj.pause()\n return self", "def pause_work(self):\n if self.worker_thread is not None:\n self.worker_thread.change_state(WorkerState.PAUSE)", "def pause_workflow(self):\n self._gdb_interface.pause_workflow()", "def __pause(self):\n\n self.set_state(gst.STATE_PAUSED)", "def debug_paused(self):\n raise NotImplementedError", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def set_pause(self, pause):\n\n game_status = self.game.get_game_status();\n if(game_status == GameStatus.NotStarted or game_status == GameStatus.Finished):\n return;\n\n if(pause == True):\n self.game.set_game_status(GameStatus.Paused);\n self.bttn_pause.set_text(\"Reprendre la partie\");\n\n self.game.stop_timer();\n\n elif(pause == False):\n self.game.set_game_status(GameStatus.InProgress);\n self.bttn_pause.set_text(\"Mettre en pause\");\n\n self.game.start_timer();", "def after_single_sweep(self):\r\n _debug('GUISignalGenerator: .after_single_sweep()')\r\n self.window.sleep(0.05)", "def pause(self, instance):\n self.power_off(instance)", "def break_stimulus(win,break_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show break stimulus\n #if 50 seconds pass, then quit experiment\n break_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n core.quit\n break_stim.setAutoDraw(False)" ]
[ "0.7750189", "0.760757", "0.7583673", "0.7583673", "0.7269366", "0.71191716", "0.70278", "0.69974154", "0.6884622", "0.68308437", "0.68308437", "0.68252546", "0.68184483", "0.68105435", "0.6808565", "0.6798502", "0.67898375", "0.6775584", "0.6741306", "0.6734974", "0.67122114", "0.6699332", "0.66731596", "0.6664042", "0.66625196", "0.6548197", "0.6495606", "0.64904726", "0.6474632", "0.6472914", "0.64589506", "0.6457954", "0.64571583", "0.6447812", "0.64233965", "0.64148873", "0.6409167", "0.639712", "0.6394252", "0.6374031", "0.6353916", "0.6326285", "0.6317716", "0.6317057", "0.63129085", "0.62945753", "0.6291453", "0.62876415", "0.6284723", "0.6283653", "0.6283653", "0.6264236", "0.6254621", "0.62265784", "0.6205919", "0.62008226", "0.6182851", "0.61819416", "0.6180063", "0.6171666", "0.6168123", "0.61490816", "0.61472", "0.61402625", "0.6134467", "0.61281955", "0.61205757", "0.61202884", "0.6107225", "0.6095213", "0.6085749", "0.6085358", "0.6078585", "0.6076337", "0.606504", "0.60600877", "0.60586786", "0.6058034", "0.60544455", "0.605171", "0.6048636", "0.60442555", "0.604023", "0.6038983", "0.60316485", "0.6027564", "0.6024128", "0.6023016", "0.6017852", "0.60172427", "0.6015058", "0.60004294", "0.5994955", "0.5986093", "0.5981174", "0.5978513", "0.5977665", "0.59753364", "0.5957799", "0.59543353" ]
0.6827029
11
Plot the PDF of the density
def pdf_d(iout,run='',data='../data',iv='d',i4=0,nbin=100,xlim=[-4,3],lnd=False): s = di.snapshot(iout,run=run,data=data) n = nbin bins = np.linspace(xlim[0],xlim[1],n+1) htot = 0.0 i = 0 for p in s.patches: i += 1 if i%1000==0: print('{:.1f}%'.format(i/len(s.patches)*100.0)) d = p.var(iv,i4=i4) if lnd: logd = d/np.log(10.) else: logd = np.log10(d) h,e = np.histogram(logd,bins=bins) htot += h pl.hist(bins[0:n],bins=bins,weights=htot,log=True,density=True) return bins,htot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pdf(data, args):\n return Plot._dist(data, args)", "def make_kde_plot(x, pdf):\n\n fig = plt.figure(figsize=(768/96, 400/96), dpi=9)\n ax = plt.gca()\n ax.plot(x, pdf)\n ax.fill_between(x, pdf, alpha=.5)\n\n # Formatting\n plt.xlabel('Hourly rate ($)', fontsize=18)\n plt.xticks(fontsize=12)\n plt.ylabel('Number of tutors', fontsize=18)\n plt.yticks(fontsize=12)\n plt.title(\"Pricing distribution for similar tutors\", fontsize=24)\n plt.tight_layout()\n plt.show()\n\n # Save file to variable instead of writing to disk.\n img_io = StringIO()\n plt.savefig(img_io, dpi=96, format='png')\n img_io.seek(0)\n\n return img_io", "def pdf(self, grid, dataSegment):\n return self.density(dataSegment[0], *grid)", "def plot_density(sampler, threshold, sigma, width, n_random_samples = 10000):\n recX, labels = sampler.sample(n_random_samples)\n rec_t0 = recX[:,0]\n rec_amplitude = recX[:,1]\n generator.generate_pdf(threshold, sigma, width)\n fig = plt.figure(figsize = (12, 12))\n # pdf and random samples go to bottom right, margins on appropriate sides\n ax1 = plt.subplot2grid((12,12),(4,0), colspan = 9, rowspan = 8)\n pdf_map = ax1.contourf(generator.t0s, generator.amplitudes, generator.pdf, 10, cmap = 'Blues')\n ax1.scatter(rec_t0, rec_amplitude, s = 0.03, c = 'y')\n ax1.set_title('Probability density and random samples'.format(n_random_samples))\n ax1.set_xlabel('t0 [ns]')\n ax1.set_ylabel('amplitude [S/N]')\n ax1c = plt.subplot2grid((12,12), (1,9), rowspan = 3, colspan = 2)\n plt.colorbar(pdf_map, cax = ax1c, format = ticker.FuncFormatter(_fmt))\n ax2 = plt.subplot2grid((12,12),(1,0), colspan = 9, rowspan = 3, sharex = ax1)\n ax2.plot(generator.t0s[:,-1], generator.pdfu)\n ax2.hist(rec_t0, bins = generator.t0s[:,0], normed = True, alpha = 0.5)\n ax2.set_title('t0 margin distribution')\n ax2.set_ylabel('P(1 over)')\n plt.setp(ax2.get_xticklabels(), visible = False)\n ax3 = plt.subplot2grid((12,12),(4,9), rowspan = 8, colspan = 3, sharey = ax1)\n ax3.plot(generator.pdfv, generator.amplitudes[-1,:])\n ax3.hist(rec_amplitude, bins = generator.amplitudes[0,:], normed = True, orientation = 'horizontal', alpha = 0.5)\n ax3.set_title('Amplitude margin distribution')\n ax3.set_xlabel('P(1 over)')\n plt.setp(ax3.get_yticklabels(), visible = False)\n ax4 = plt.subplot2grid((12,12),(0,0), colspan = 9)\n ax4.text(0.5, 1.0, 'Exact P(one over) distribution and {0} random samples \\nthreshold : {1}, sigma : {2}, width : {3}'.format(n_random_samples, threshold, sigma, width), horizontalalignment = 'center', verticalalignment = 'top', fontsize = 18)\n ax4.set_axis_off()\n plt.tight_layout()\n plt.savefig('{0}/rng_test_thr{1}_sig{2}_w{3}.png'.format(plotdir, threshold, sigma, width))", "def plot_pdf(pdf,**kwargs):\n pl.hist(pdf.bins,bins=pdf.bins,weights=pdf.counts,**kwargs)\n return pdf.time", "def plot_density_of_states(xlim=(-10, 5), ylim=(-1.5, 1.5), fmt='pdf'):\n\n efermi = Vasprun('vasprun.xml').efermi\n dos_lines = open ('DOSCAR').readlines()\n\n x, up, down = [], [], []\n nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1\n\n for line in dos_lines[6:6+nedos]:\n split_line = line.split()\n x.append(float(split_line[0]) - efermi)\n up.append(float(split_line[1]))\n down.append(-float(split_line[2]))\n\n x, up, down = np.array(x), np.array(up), np.array(down)\n sum = up + down\n\n ax = plt.figure().gca()\n ax.set_xlim(xlim[0], xlim[1])\n ax.set_ylim(ylim[0], ylim[1])\n\n ax.set_xlabel(r'$\\mathrm{E\\/(eV)}$')\n ax.set_ylabel(r'$\\mathrm{Density\\/of\\/States$')\n ax.set_xticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_xticklabels()])\n ax.set_yticklabels([r'$\\mathrm{%s}$' % t for t in ax.get_yticklabels()])\n\n ax.plot(x, up, color='red' )\n ax.plot(x, down, color='green')\n ax.plot(x, sum, color='black' )\n if fmt is not None:\n plt.savefig('density_of_states.{}'.format(fmt))\n else:\n return ax\n\n plt.close()", "def plotPDF(self,thresholds,norm=False,fig=None,ax=None,**kwargs):\n\n\t\tif not matplotlib:\n\t\t\traise ImportError(\"matplotlib is not installed, cannot plot the PDF!\")\n\n\t\t#Instantiate figure\n\t\tif (fig is None) or (ax is None):\n\t\t\t\n\t\t\tself.fig,self.ax = plt.subplots()\n\n\t\telse:\n\n\t\t\tself.fig = fig\n\t\t\tself.ax = ax\n\n\t\t#Measure the PDF of the pixels\n\t\tkappa,pdf = self.pdf(thresholds,norm)\n\n\t\t#Plot the PDF\n\t\tself.ax.plot(kappa,pdf,**kwargs)\n\n\t\t#Adjust the labels\n\t\tif norm:\n\t\t\tself.ax.set_xlabel(r\"$\\sigma_{\\kappa}$\",fontsize=22)\n\t\t\tself.ax.set_ylabel(r\"$PDF(\\sigma_\\kappa)$\",fontsize=22)\n\t\telse:\n\t\t\ts = self.data.std()\n\t\t\tax_top = self.ax.twiny()\n\t\t\tax_top.set_xticks(self.ax.get_xticks())\n\t\t\tax_top.set_xlim(self.ax.get_xlim())\n\t\t\tax_top.set_xticklabels([ \"{0:.2f}\".format(n/s) for n in ax_top.get_xticks() ])\n\n\t\t\tself.ax.set_xlabel(r\"$\\kappa$\",fontsize=22)\n\t\t\tax_top.set_xlabel(r\"$\\kappa/\\sigma_\\kappa$\",fontsize=22)\n\t\t\tself.ax.set_ylabel(r\"${\\rm PDF}(\\kappa)$\",fontsize=22)", "def plot_pdf(data,b,X,outfile):\t\r\n\tme = \"LE_Plot.plot_pdf: \"\r\n\tshowplot = False\r\n\tt0 = time.time()\r\n\t## Data\r\n\tx, y = data\r\n\txmax, ymax = np.abs(x).max(), np.abs(y).max()\r\n\t## Plot pdf\r\n\tfs = 25\r\n\t# counts, xedges, yedges, im = plt.hist2d(x,y, bins=100, range=[[-2*X,+2*X],blim(b,X)], normed=True)\r\n\tcounts, xedges, yedges, im = plt.hist2d(x,y, bins=100, range=[[-xmax,+xmax],[-ymax,ymax]], normed=True)\r\n\tplt.xlabel(\"$x$\",fontsize=fs);plt.ylabel(\"$\\eta$\",fontsize=fs)\r\n\tplt.suptitle(outfile)\r\n\ttry:\r\n\t\tplt.savefig(outfile+\".png\")\r\n\t\tprint me+\"Plot saved as\",outfile+\".png\"\r\n\texcept IOError:\r\n\t\tprint me+\"ERROR: ouput direcotry not found, could not save\",outfile+\".png\"\r\n\t## Output\r\n\tif showplot:\tplt.show()\r\n\tplt.close()\t\t\r\n\tprint me+\"Plotting PDF:\",round(time.time()-t0,1),\"seconds\"\r\n\treturn counts.T, xedges, yedges", "def plot_pnl_pdf(title, vol, var, lgnd, report):\n fig, ax = plt.subplots(1, figsize=(16, 8))\n mu, sigma = 0, vol\n x = np.linspace(mu - 5 * sigma, 5 * sigma, 100)\n plt.plot(x, norm.pdf(x, mu, sigma));\n\n # add a vertical line for the VaR\n _ = plt.axvline(x=var, color='r', linestyle='--')\n\n # add title and labels\n _ = plt.legend(labels=[lgnd])\n _ = plt.xlabel('P&L', size=16)\n _ = plt.ylabel('density', size=16)\n\n # display the histogram\n report.write_plot(title)\n _ = plt.title(title, size=32)\n plt.show();\n plt.close();", "def _plot_posterior_pdf(self, parameter, ax, **kwargs):\n y_label = kwargs.pop('y_label', 'Posterior pdf')\n x_label = kwargs.pop('x_label', parameter)\n\n x_min = self.distribution_parameter_support[parameter][0]\n x_max = self.distribution_parameter_support[parameter][1]\n dx = (x_max - x_min)/100\n x_vals = np.arange(dx, x_max, dx)\n\n posterior = self._posterior_marginal_scipy(parameter)\n posterior_mean = self.posterior_mean(parameter)\n\n N = sum(self.data.values())\n if N > 0:\n fill_type = 'hdcr'\n hdcr = self.posterior_high_density_credible_region\n low_p, high_p = hdcr(parameter)\n else:\n fill_type = 'ccr'\n low_p, high_p = self.posterior_central_credible_region(parameter)\n\n x_fill = np.arange(low_p, high_p, 0.01)\n\n plot_parameter_pdf(ax, posterior, posterior_mean, x_vals,\n fill=fill_type, x_fill=x_fill, confidence=0.95,\n y_label=y_label, x_label=x_label, color='b')", "def distribution_plot(data):\r\n ready_data = sorted((data))\r\n fit = stats.norm.pdf(ready_data, np.mean(ready_data), np.std(ready_data))\r\n plt.plot(ready_data, fit, '-o')\r\n plt.ylabel(\"Prob\")\r\n plt.xlabel(\"Prices\")\r\n plt.title(\"Distribution of prices (Under 50 days) Demand Function\")\r\n plt.show()", "def make_jpdf_plot(x_data,\n y_data,\n x_label,\n y_label, \n axis=\"\", \n title=\"\"):\n \n xmin = 0.\n ymax = 0.\n ymin = 0.\n ymax = 0.\n if axis == \"\":\n xmin = x_data.min()\n xmax = x_data.max()\n ymin = y_data.min()\n ymax = y_data.max()\n axis = [xmin,xmax,ymin,ymax]\n else:\n xmin = axis[0]\n xmax = axis[1]\n ymin = axis[2]\n ymax = axis[3]\n\n # prepare data for jpdf plot\n X, Y = np.mgrid[xmin:xmax:100j,ymin:ymax:100j]\n positions = np.vstack([X.ravel(), Y.ravel()])\n values = np.vstack([x_data,y_data])\n kernel = scipy.stats.gaussian_kde(values)\n Z = np.reshape(kernel(positions).T, X.shape)\n \n \n plt.figure()\n plt.pcolor(X,Y,Z)\n plt.plot(x_data, y_data, 'k.', markersize=3)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.axis([xmin, xmax,ymin,ymax])\n if not title == \"\":\n plt.title(title)\n #plt.set_ylim([ymin, ymax])\n cb = plt.colorbar()\n cb.set_label(\"probability density\")\n plt.show()", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def plot_distribution(d, start=0.01, stop=10.0, resolution=0.1):\n import pylab\n X = numpy.arange(start, stop, resolution)\n Y = [math.exp(d.log_pdf(x)) for x in X]\n pylab.plot(X, Y)", "def density(x, y, pmap, amplitude=True):\n\n d = pmap['density']\n params = [pmap['x_mean'], pmap['y_mean'], pmap['sigma']]\n\n if d == 'gaussian':\n pdf = gaussian(x, y, params)\n \n elif d == 'students':\n pdf = student(x, y, pmap['nu'], params)\n \n if amplitude:\n pdf = pmap['amplitude'] * pdf\n\n return pdf", "def pdf(self,x):\n return self.categoricalDist.pdf(x)", "def pdf(self, x):\n raise NotImplementedError", "def pdf(x):\n return lambda point: self.information_manifold.point_to_pdf(point)(x)", "def pdf(self,x):\n returnPdf = self._distribution.pdf(x)\n return returnPdf", "def pdf(self, grid, dataSegment):\n return np.exp(-(dataSegment[0] ** 2.) / (2. * grid[0] ** 2.) - .5 * np.log(2. * np.pi * grid[0] ** 2.))", "def pdf(self,x):\n if self.transformation:\n pdfValue = self.pdfInTransformedSpace(x)\n else:\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[1] - grid[0] * dataSegment[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(\n 2. * np.pi * grid[1] ** 2.))", "def pdf(self, grid, dataSegment):\n r = grid[0]\n s = grid[1]\n sScaled = s*np.sqrt(1 - r**2.)\n return np.exp(-((dataSegment[1] - r * dataSegment[0]) ** 2.) / (2. * sScaled ** 2.) - .5 * np.log(\n 2. * np.pi * sScaled ** 2.))", "def pdf(x, point):\n raise NotImplementedError(\"The pdf method has not yet been implemented.\")", "def multi_density_plot(data, title, xlabel, ylabel, border=0.2):\n for e in data:\n cur = e['data']\n name = e['name']\n\n if (np.array(cur) == 0).all():\n continue\n\n density = gaussian_kde(cur)\n x = np.linspace(-border, border, len(cur))\n plt.plot(x, density(x), label=name)\n\n plt.tick_params(labelsize=20)\n\n plt.xlim(-border, border)\n\n plt.title(title, fontsize=33)\n plt.xlabel(xlabel, fontsize=30)\n plt.ylabel(ylabel, fontsize=30)\n\n #plt.legend(loc='best')\n\n Plotter.show(title)", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[0, 0] - grid[0]) ** 2.) / (2. * dataSegment[0, 1] ** 2.) -\n .5 * np.log(2. * np.pi * dataSegment[0, 1] ** 2.))", "def pdf(self, grid, dataSegment):\n return np.exp(\n -((dataSegment[0] - grid[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(2. * np.pi * grid[1] ** 2.))", "def pdf(self):\n\n pdf = PDF(self.valuesArray)\n return pdf.axes[0], pdf.pdf", "def make_distplot(data, output_f, title, xlabel, prefix):\n\n plt.figure(figsize=(8, 8), dpi=1200)\n displot = sns.distplot(data, hist=False, rug=True, color=\"b\")\n out_name = prefix + \"_\" + title + \"_\" + output_f.split(\".\")[0] + \".pdf\"\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel('Density')\n displot.figure.savefig(out_name)\n plt.close()", "def pdf(self, x):\n\n mean = self.mean\n stddev = self.stddev\n e = 2.7182818285\n pi = 3.1415926536\n\n exp = -1 * ((x - mean) ** 2) / (2 * (stddev ** 2))\n den = 2 * pi * (stddev ** 2)\n\n pdf = (1 / (den) ** 0.5) * (e ** exp)\n\n return pdf", "def plot_prob_density(x,\n y,\n cmap=cm.get_cmap('summer'),\n xscale='log',\n yscale='log',\n xParam=1.5,\n yParam=1.5,\n ax=None,\n use_gridspec=False\n ):\n fig, ax, cax = _get_fig_ax_and_colorbar_ax(xscale, yscale, ax=ax)\n\n X, Y, counts_matrix, bin_centers, means, Bins2D = \\\n _get_count_data_to_plot(x, y, xscale, yscale, xParam, yParam)\n\n prob_density = counts_matrix / (Bins2D.sizes.T * np.sum(counts_matrix))\n\n assert np.abs(np.sum(prob_density * Bins2D.sizes.T) - 1) < 0.000001\n im = ax.pcolor(X, Y, prob_density,\n cmap=cmap, norm=colors.LogNorm()\n )\n\n ax.plot(bin_centers, means, \"go-\")\n\n cbar = fig.colorbar(im, cax, ax=ax, use_gridspec=use_gridspec,\n orientation='vertical',\n format=LogFormatterMathtext())\n return fig, ax, cbar, im", "def pdf(self, grid, dataSegment):\n return (grid[0] ** dataSegment[0]) * (np.exp(-grid[0])) / (np.math.factorial(dataSegment[0]))", "def density(self, arg):\n mean = - self.sigma**2 * self.data['maturity']\n std = self.sigma * self.data['maturity']**.5\n return scs.norm(mean, std).pdf(arg)", "def probability_density(self, X):\n raise NotImplementedError", "def LEpdf(xydata):\r\n\t\r\n\t## Read eta (yy), xHO (x1) points from file\r\n\tyy,x1 = np.loadtxt(xydata,delimiter=\" \",skiprows=1)[:,1:3].T\r\n\tdel xydata\r\n\r\n\t## Construct a (normed) histogram of the data\r\n\tnbins = [100,100]\r\n\tH,xedges,yedges = np.histogram2d(x1,yy,bins=nbins,normed=True)\r\n\txpos = xedges[1:]-xedges[0]; ypos = yedges[1:]-yedges[0]\r\n\r\n\t## Plot pdf\r\n\tH = gaussian_filter(H, 3)\t## Convolve with Gaussian\r\n\tfig, (ax1, ax2, ax3) = plt.subplots(nrows=3)\r\n\tax1.imshow(H, interpolation='nearest', origin='low')#,extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])\r\n\tax1.set_xlabel(\"$x_{HO}$\");ax1.set_ylabel(\"$\\eta$\")\r\n\tax2.contour(xpos,ypos,H,10)\r\n\tax2.set_xlabel(\"$x_{HO}$\");ax2.set_ylabel(\"$\\eta$\")\r\n\tax3.hist2d(x1,yy, bins=100, normed=True)\r\n\tax3.set_xlabel(\"$x$\");ax3.set_ylabel(\"$\\eta$\")\r\n\tplt.tight_layout()\r\n\r\n\tplt.show()\r\n\treturn", "def define_pdf(self, values: torch.Tensor, weights: torch.Tensor, inds: torch.Tensor) -> Distribution:\n\n raise NotImplementedError()", "def plot_pdfs(meta):\n f = plt.figure(figsize=(5,5))\n sps = f.add_subplot(1,1,1)\n sps.set_title(meta.name+r' PDFs')\n plotstep(sps,meta.binends,meta.intPz,c=c_int,l=l_int+r'$P(z)$',s=s_int,w=w_int,d=d_int,a=a_int)\n dummy_x,dummy_y = np.array([-1,-2,-3]),np.array([-1,-2,-3])\n plotstep(sps,dummy_x,dummy_y,c=c_exp,s=s_map,w=w_exp,l=r' MLE $z$',d=d_map,a=a_map)\n sps.legend(loc='upper right',fontsize='x-small')\n np.random.seed(seed=meta.ngals)\n randos = random.sample(xrange(meta.ngals),len(meta.colors))\n for r in lrange(randos):\n plotstep(sps,meta.binends,meta.pdfs[randos[r]],c=meta.colors[r%len(meta.colors)],s=s_smp,w=w_smp,d=d_smp,a=a_smp)\n sps.vlines(meta.mleZs[randos[r]],0.,max(meta.pdfs[randos[r]]),color=meta.colors[r],linestyle=s_map,linewidth=w_map,dashes=d_map,alpha=a_map)\n sps.set_ylabel(r'$p(z|\\vec{d})$')\n sps.set_xlabel(r'$z$')\n sps.set_xlim(meta.binlos[0]-meta.bindif,meta.binhis[-1]+meta.bindif)\n sps.set_ylim(0.,1./meta.bindif)\n f.savefig(os.path.join(meta.topdir,'samplepzs.pdf'),bbox_inches='tight', pad_inches = 0)\n return", "def plot_density(data: pd.DataFrame, target: str, feature: str):\n\n plt.figure(figsize=(16, 4))\n\n sns.kdeplot(\n data[feature][data[target] == 1],\n shade=True, label='{}=1'.format(target), linewidth=3)\n sns.kdeplot(\n data[feature][data[target] == 0],\n shade=True, label='{}=0'.format(target), linewidth=3)\n\n min_v = data[feature].min()\n max_v = data[feature].max()\n plt.xlim(min_v, max_v)\n\n plt.title('Distribution of {} by {} value'.format(\n feature.upper(), target.upper()))\n plt.xlabel('{}'.format(feature))\n plt.ylabel('Density')", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def pdf(self,x):\n if x in self.values:\n pdfValue = self.mapping[x]\n else:\n if self.isFloat:\n vals = sorted(list(self.values))\n idx = bisect.bisect(vals, x)\n pdfValue = self.mapping[list(vals)[idx]]\n else:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate pdf for ' + str(x))\n return pdfValue", "def pdf(self,x):\n if self.base == 'natural':\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x\n else:\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x * 1./math.log(10.)\n return pdfValue", "def _plot_marginal_pdfs( res, nbins=101, **kwargs):\n\tfrom matplotlib import pyplot as pl\n\timport numpy as np\n\n\tnparam = len(res.vparam_names)\n\t# nrow = np.sqrt( nparam )\n\t# ncol = nparam / nrow + 1\n\tnrow, ncol = 1, nparam\n\n\tpdfdict = _get_marginal_pdfs( res, nbins )\n\n\tfig = plt.gcf()\n\tfor parname in res.vparam_names :\n\t\tiax = res.vparam_names.index( parname )+1\n\t\tax = fig.add_subplot( nrow, ncol, iax )\n\n\t\tparval, pdf, mean, std = pdfdict[parname]\n\t\tax.plot( parval, pdf, **kwargs )\n\t\tif np.abs(std)>=0.1:\n\t\t\tax.text( 0.95, 0.95, '%s %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.01:\n\t\t\tax.text( 0.95, 0.95, '%s %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.001:\n\t\t\tax.text( 0.95, 0.95, '%s %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telse :\n\t\t\tax.text( 0.95, 0.95, '%s %.3e +- %.3e'%( parname, mean, std),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\n\tplt.draw()", "def pdf(self, value=None):\n if value is None:\n value = self.value\n return self.rv.pdf(\n value, *self._pymc_dists_to_value(self.args), **self.kwds\n )", "def density(self):\n self.convert_window(\"Density\", \"kilograms/liter\", [\"grains/gallon(UK)\", \"grains/gallon(US)\", \"grams/cubic centimeters\", \"grams/liter\", \"grams/millimeters\", \"kilograms/cubic meters\", \"kilograms/liter\", \"megagrams/cubic meter\", \"milligrams/liter\", \"milligrams/millimeters\", \"ounces/cubic inch\", \"ounces/gallon(UK)\", \"ounces/gallon(US)\", \"pounds/cubic foot\", \"pounds/cubic inch\", \"pounds/gallon(UK)\", \"pounds/gallon(US)\", \"slugs/cubic foot\", \"tonnes/cubic meter\", \"tons(UK)/cubic yard\", \"tons(US)/cubic yard\"])", "def pdf(cls, params, xs=None):\n n = params['negative']\n if xs is None:\n # Find nice range to graph\n lower = ((-1 if n else 1) *\n params['loc'] / (np.exp(params['scale']) **\n cls.standard_deviations_in_default_range))[0, 0]\n upper = ((-1 if n else 1) *\n params['loc'] * (np.exp(params['scale']) **\n cls.standard_deviations_in_default_range))[0, 0]\n if n:\n lower, upper = upper, lower\n\n xs = np.linspace(\n (lower if np.isnan(params['minimum']) else params['minimum']).ravel(),\n (upper if np.isnan(params['maximum']) else params['maximum']).ravel(),\n cls.default_number_points_in_pdf\n ).ravel()\n\n ys = stats.lognorm.pdf(\n -1 * xs if n else xs,\n params['scale'],\n scale=np.exp(params['loc'])\n )\n return xs, ys.ravel()", "def pdf(self, xs):\n xs = np.atleast_2d(xs)\n return self.factor * np.exp(-0.5 * np.sum(xs * xs, axis=0))", "def density(categorical_var, numerical_var):\n #print(categorical_var)\n cat_list = categorical_var.astype('category')\n for cat in cat_list:\n sns.kdeplot(numerical_var[categorical_var == cat], label=cat)#, categorical_var)\n\n plt.show()", "def test_probability_density(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 0.])\n expected_result = 0.032245296420409846\n\n # Run\n result = copula.probability_density(X)\n\n # Check\n assert expected_result - 1e-16 < result < expected_result + 1e-16", "def pdf(self, x, log = False):\n if log:\n return D.logsumexp(\n D.multiple_gauss_den(x, self.mu, self.va, log = True)\n + N.log(self.w))\n else:\n return N.sum(D.multiple_gauss_den(x, self.mu, self.va) * self.w, 1)", "def pdf(s, x):\r\n x = Basic.sympify(x)\r\n return 1/(s.sigma*sqrt(2*pi)) * exp(-(x-s.mu)**2 / (2*s.sigma**2))", "def density(self, density):\n\n self._density = density", "def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):\n gr = N.meshgrid(rangex, rangey)\n x = gr[0].flatten()\n y = gr[1].flatten()\n xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)\n dmu = self.mu[:, dim]\n dva = self._get_va(dim)\n den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)\n den = den.reshape(len(rangey), len(rangex))\n\n return gr[0], gr[1], den", "def plot(self, *, axis=None):\n \n \n self.update_dF_args()\n \n if axis is None:\n axis = self.portrait.ax\n \n _x = np.linspace(*self.xRange, self.density)\n _y = _x.copy()\n \n _xdF = np.zeros([self.density, self.density])\n _ydF = np.zeros([self.density, self.density])\n \n for i,xx in enumerate(_x):\n for j,yy in enumerate(_y):\n _xdF[j,i], _ydF[j,i] = self.funcion(xx,yy, **self.dF_args)\n \n xct = axis.contourf(_x, _y,_xdF, levels=[-self.xprecision + self.offset, self.xprecision + self.offset], colors=[self.xcolor], extend='neither')\n yct = axis.contourf(_x, _y,_ydF, levels=[-self.yprecision + self.offset, self.yprecision + self.offset], colors=[self.ycolor], extend='neither')\n \n xct.cmap.set_over(self.bgcolor, alpha=self.alpha)\n yct.cmap.set_over(self.bgcolor, alpha=self.alpha)\n xct.cmap.set_under(self.bgcolor, alpha=self.alpha)\n yct.cmap.set_under(self.bgcolor, alpha=self.alpha)\n \n return xct, yct", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def n_pdf(x):\n return 1/np.sqrt(2*np.pi)*np.exp(-x**2/2)", "def plot_pdf(pop_name, pop_val, pop_file, full_pop_file, outdir='.'):\n try:\n plt.style.use(\n \"https://gist.githubusercontent.com/avivajpeyi/4d9839b1ceb7d3651cbb469bc6b0d69b/raw/4ee4a870126653d542572372ff3eee4e89abcab0/publication.mplstyle\")\n except Exception:\n pass\n\n plt.close('all')\n all = pd.read_csv(full_pop_file, sep=\" \")\n all['cos_theta_1'] = all['cos_tilt_1']\n all = process_samples(all)\n sub = pd.read_csv(pop_file, sep=\" \")\n sub = process_samples(sub)\n sub['cos_theta_1'] = sub['cos_tilt_1']\n\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for ax, l in zip(axes, [\"cos_theta_1\", \"cos_theta_12\"]):\n ax.hist(all[l], density=True, histtype='step', color=\"tab:blue\", label=\"ALL\", lw=2, alpha=0.8)\n ax.scatter(all[l], [0 for _ in all[l]], color=\"tab:blue\",marker=\"+\")\n ax.hist(sub[l], density=True, histtype='step', color=\"tab:purple\", label=\"HIGH SNR\", lw=2, alpha=0.6)\n ax.scatter(sub[l], [0 for _ in sub[l]], color=\"tab:purple\", marker=\"+\")\n\n x = np.linspace(-1, 1, 100)\n y1 = TruncatedNormal(mu=1, sigma=pop_val[0], minimum=-1, maximum=1).prob(x)\n y2 = TruncatedNormal(mu=1, sigma=pop_val[1], minimum=-1, maximum=1).prob(x)\n axes[1].plot(x, y2, color='tab:gray', zorder=-10, lw=3, label=\"TRUE\")\n axes[0].plot(x, y1, color='tab:gray', zorder=-10, lw=3)\n\n for i in range(len(axes)):\n if (i == 0):\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_1$\")\n axes[i].set_ylabel(\"PDF\")\n else:\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_{12}$\")\n axes[i].set_yticklabels([])\n axes[i].legend()\n axes[i].grid(False)\n axes[i].set_xlim(-1, 1)\n\n plt.suptitle(f\"POP {pop_name}\")\n plt.tight_layout()\n plt.savefig(f\"{outdir}/pop_trues_{pop_name}.png\")", "def pdf(self, x):\n\t\treturn 1.5 * np.power(x,2) ##obtained after finding z from integrating x^2 from -1 to 1", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot_pdf(Y, Y_Label='Y', nbins=0):\r\n\r\n # Options for the graphic\r\n pltfont = {'fontname': 'Bitstream Vera Sans', 'fontsize': 15} # font\r\n lc = 'k' # line colour\r\n\r\n ###########################################################################\r\n # Check inputs\r\n ###########################################################################\r\n if not isinstance(Y, np.ndarray):\r\n raise ValueError('\"Y\" must be a numpy.array.')\r\n if Y.dtype.kind != 'f' and Y.dtype.kind != 'i' and Y.dtype.kind != 'u':\r\n raise ValueError('\"Y\" must contain floats or integers.')\r\n Ny = Y.shape\r\n if len(Ny) > 1:\r\n if Ny[1] != 1:\r\n raise ValueError('\"Y\" be of shape (N,1) or (N, ).')\r\n N = Ny[0]\r\n Y = Y.flatten() # shape (N, )\r\n\r\n if np.isnan(Y).any():\r\n warn('some data in \"Y\" are nan')\r\n if np.isinf(Y).any():\r\n warn('some data in \"Y\" are inf')\r\n\r\n ###########################################################################\r\n # Check optional inputs\r\n ###########################################################################\r\n if not isinstance(Y_Label, str):\r\n raise ValueError('\"Y_Label\" must be a string.')\r\n\r\n if not isinstance(nbins, (int, np.int8, np.int16, np.int32, np.int64)):\r\n raise ValueError('\"nbins\" must be an integer.')\r\n if nbins < 0:\r\n raise ValueError('\"nbins\" must be positive.')\r\n elif nbins == 0:\r\n nbins = min(100, N/10) # set default value\r\n\r\n ###########################################################################\r\n # Create plot\r\n ###########################################################################\r\n\r\n fi, yi = np.histogram(Y.flatten(), nbins, density=True)\r\n yi = (yi[np.arange(0, nbins, 1)]+yi[np.arange(1, nbins+1, 1)])/2 # bin centers\r\n\r\n # Limit for vertical axis\r\n ymin = np.min(Y)\r\n ymax = np.max(Y)\r\n if ymin == ymax: # (i.e., if all data have the same value)\r\n ymin = ymin - ymin/10\r\n ymax = ymax + ymax/10\r\n\r\n # Limt for horizontal axis\r\n fmin = 0\r\n fmax = np.max(fi)\r\n# if np.mean(fi)/np.std(fi) > 2: # if frequency is rather constant (''flat''),\r\n# # expand the vertical axis so to highlight this:\r\n# fmax = min(1,np.max(fi)+10*np.mean(fi))\r\n# else:\r\n# fmax = min(1,np.max(fi)+np.std(fi))\r\n\r\n # Plot\r\n #plt.figure()\r\n plt.plot(yi, fi, '.-', linewidth=2, color=lc)\r\n plt.xlim((ymin, ymax))\r\n plt.ylim((fmin, fmax))\r\n plt.xticks(**pltfont); plt.yticks(**pltfont)\r\n plt.xlabel(Y_Label, **pltfont)\r\n plt.ylabel('PDF', **pltfont)\r\n plt.box(on=True)\r\n\r\n return fi, yi", "def plot_density(outdir, sdata, mt, group, ref, pos, strand, mer, feature_names, colors, ext=\"pdf\"):\n fig, axes = plt.subplots(1, len(feature_names), figsize=(4*len(feature_names), 4))\n fig.suptitle(\"{} {} {}:{}{} {}\".format(mt, group, ref, pos, strand, mer))\n for fi, (ax, f) in enumerate(zip(axes, feature_names)):\n for si, (s, c) in enumerate(zip((mt, \"wt\"), colors)):\n sns.kdeplot(sdata[si][:, fi], color=c, linewidth=2, shade=True, alpha=.5, legend=False, ax=ax)\n ax.set_xlabel(f); ax.set_ylabel(\"\")\n axes[0].set_ylabel(\"Density\")\n fig.savefig(os.path.join(outdir, \"{}:{}{}.{}\".format(ref, pos, strand, ext)))\n plt.close()", "def density(self):\n return self.get_density()", "def _kde_example(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"density\")\n ch.set_title(\"KDE plot\")\n ch.plot.kde(data_frame=data, values_column=\"unit_price\", color_column=\"fruit\")\n ch.show(_OUTPUT_FORMAT)", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def pdf(self, grid, dataSegment):\n temp = grid[0][:] # make copy of parameter grid\n temp[temp > 1.] = 0. # p < 1\n temp[temp < 0.] = 0. # p > 0\n\n if dataSegment[0]:\n pass # pdf = p\n else:\n temp = 1. - temp # pdf = 1 - p\n\n return temp", "def plot(self, ax=None, ylabel=\"PDF(x)\", xlabel=\"x\", num_bins=50, show=False, outputname=None, color=\"C2\", logscale=False, xlims=None):\n \n \"\"\"Create figure if none was provided\"\"\"\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n \"\"\"Plot\"\"\"\n ax.hist(self.sample_x, bins=num_bins, color=color)\n\n \"\"\"Set plot attributes\"\"\"\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n \n \"\"\"Set xlim if requested\"\"\"\n if xlims is not None:\n ax.set_xlim(xlims[0], xlims[1])\n\n \"\"\"Set yscale to log if requested\"\"\"\n if logscale:\n ax.set_yscale(\"log\")\n \n \"\"\"Save if filename provided\"\"\"\n if outputname is not None:\n plt.savefig(outputname + \".pdf\")\n plt.savefig(outputname + \".png\", density=300)\n \n \"\"\"Show if requested\"\"\"\n if show:\n plt.show()", "def pdf(self, grid, dataSegment):\n # create dictionary from list\n freeParameterDict = {key: value for key, value in zip(self.freeParameters, grid)}\n\n # merge free/fixed parameter dictionaries\n parameterDict = freeParameterDict.copy()\n parameterDict.update(self.fixedParameterDict)\n\n # scipy.stats differentiates between 'pdf' and 'pmf' for continuous and discrete variables, respectively\n if self.isContinuous:\n return self.rv.pdf(dataSegment[0], **parameterDict)\n else:\n return self.rv.pmf(dataSegment[0], **parameterDict)", "def density(self, x):\n\t\tN = len(self.train_data)\n\t\tpoints = list(self.train_data)\n\t\tdists = [np.linalg.norm(x-point)**2 for point in points]\n\t\texps = [np.exp(-dist / (2 * (self.bandwidth ** 2))) for dist in dists]\n\t\tunnormalized_sum = sum(exps)\n\t\tprobability = (1 / N) * self.normalizing_constant() * unnormalized_sum\n\t\treturn probability", "def dirichlet_pdf(x, alpha):\n density = reduce(operator.mul, \n [x[i]**(alpha[i]-1.0) for i in range(len(alpha))])\n norm_top = gamma(np.sum(alpha))\n norm_bot = reduce(operator.mul, [gamma(a) for a in alpha])\n return (norm_top / norm_bot) * density", "def pdf(self, x, **kwargs):\n from scipy.stats import rv_continuous\n return self.rvdist.pdf(x, **kwargs) if rv_continuous in self.rvdist.__class__.__mro__ \\\n else self.rvdist.evaluate(x, **kwargs)", "def plot_pdf(self, param, plot_type, Nsplit=50, **kwargs):\n\n title = self.family.capitalize() + \" Copula PDF\" \n\n if plot_type == \"3d\":\n bounds = [0+1e-1/2, 1-1e-1/2]\n\n elif plot_type == \"contour\":\n bounds = [0+1e-2, 1-1e-2]\n\n U_grid, V_grid = np.meshgrid(\n np.linspace(bounds[0], bounds[1], Nsplit),\n np.linspace(bounds[0], bounds[1], Nsplit))\n \n Z = np.array( \n [self.get_pdf(uu, vv, param) for uu, vv in zip(np.ravel(U_grid), np.ravel(V_grid)) ] )\n \n Z = Z.reshape(U_grid.shape)\n\n if plot_type == \"3d\":\n\n plot_bivariate_3d(U_grid,V_grid,Z, [0,1], title, **kwargs)\n elif plot_type == \"contour\":\n plot_bivariate_contour(U_grid,V_grid,Z, [0,1], title, **kwargs)\n else:\n print(\"only \\\"contour\\\" or \\\"3d\\\" arguments supported for type\")\n raise ValueError", "def pdf(self, x: float) -> float:\n\n return (1.0 / (self.sigma * np.sqrt(2*math.pi))) * \\\n np.exp(-0.5*((x - self.mu) / self.sigma) ** 2)", "def _kde_example2(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"density\")\n ch.set_title(\"KDE plot + Histogram\")\n ch.plot.kde(data_frame=data, values_column=\"unit_price\", color_column=\"fruit\")\n ch.style.color_palette.reset_palette_order()\n ch.plot.histogram(\n data_frame=data,\n values_column=\"unit_price\",\n color_column=\"fruit\",\n method=\"density\",\n )\n ch.show(_OUTPUT_FORMAT)", "def plot1d(self, level = misc.DEF_LEVEL, fill = False, gpdf = False):\n if not self.__is1d:\n raise ValueError(\"This function does not make sense for \"\\\n \"mixtures which are not unidimensional\")\n\n from scipy.stats import norm\n pval = N.sqrt(self.va[:, 0]) * norm(0, 1).ppf((1+level)/2)\n\n # Compute reasonable min/max for the normal pdf: [-mc * std, mc * std]\n # gives the range we are taking in account for each gaussian\n mc = 3\n std = N.sqrt(self.va[:, 0])\n mi = N.amin(self.mu[:, 0] - mc * std)\n ma = N.amax(self.mu[:, 0] + mc * std)\n\n np = 500\n x = N.linspace(mi, ma, np)\n # Prepare the dic of plot handles to return\n ks = ['pdf', 'conf', 'gpdf']\n hp = dict((i, []) for i in ks)\n\n # Compute the densities\n y = D.multiple_gauss_den(x[:, N.newaxis], self.mu, self.va, \\\n log = True) \\\n + N.log(self.w)\n yt = self.pdf(x[:, N.newaxis])\n\n try:\n import pylab as P\n for c in range(self.k):\n h = P.plot(x, N.exp(y[:, c]), 'r', label ='_nolegend_')\n hp['pdf'].extend(h)\n if fill:\n # Compute x coordinates of filled area\n id1 = -pval[c] + self.mu[c]\n id2 = pval[c] + self.mu[c]\n xc = x[:, N.where(x>id1)[0]]\n xc = xc[:, N.where(xc<id2)[0]]\n \n # Compute the graph for filling\n yf = self.pdf_comp(xc, c)\n xc = N.concatenate(([xc[0]], xc, [xc[-1]]))\n yf = N.concatenate(([0], yf, [0]))\n h = P.fill(xc, yf, facecolor = 'b', alpha = 0.1, \n label='_nolegend_')\n hp['conf'].extend(h)\n if gpdf:\n h = P.plot(x, yt, 'r:', label='_nolegend_')\n hp['gpdf'] = h\n return hp\n except ImportError:\n raise GmParamError(\"matplotlib not found, cannot plot...\")", "def contingency(self, scale, distrib=True, dataname=''):\n print 'Generating the plot ...'\n\n cont = np.zeros((scale, scale))\n minLat, maxLat, minLon, maxLon = self.city[1]\n normLat = scale / (maxLat - minLat)\n normLon = scale / (maxLon - minLon)\n\n # syn = (index, rel index, class)\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n # print posx,posy,data[i][0],data[i][1], normLat, normLon\n try:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n except IndexError:\n print self.dataset[i][0], self.dataset[i][1]\n if distrib:\n cont = cont / np.max(cont)\n\n fig = plt.figure()\n\n ax = fig.add_subplot(111)\n plt.title('Density ')\n\n plt.imshow(cont, interpolation='bicubic', cmap=cm.gist_yarg)\n vmax = np.max(cont)\n # vmin=np.min(cont)\n\n if distrib:\n plt.colorbar(ticks=np.round(np.linspace(0, 1, 10), 2),\n orientation='vertical')\n nfile = self.application + '-' + dataname\n\n fig.savefig(homepath + 'Results/' + self.city[2] + '-' + nfile + '.pdf', orientation='landscape', format='pdf')\n\n #plt.show()", "def contrast_pdf(contdc, contdc_sigma, dc_tru, dc_sigma, contrast_axis, npts=8000, display=False):\n\n dc_axis = np.linspace(dc_tru - 8 * dc_sigma, dc_tru + 8 * dc_sigma, npts)\n dc_mesh, contrast_mesh = np.meshgrid(dc_axis, contrast_axis)\n contdc_mesh = dc_mesh * contrast_mesh\n\n pdf_contdc = scipy.stats.rice.pdf(contdc_mesh, contdc / contdc_sigma, scale=contdc_sigma, loc=0.)\n pdf_dc, _ = norm_pdf(dc_tru, dc_sigma, x=dc_mesh)\n joint_pdf = pdf_contdc * pdf_dc\n\n # normalise joint PDF\n area = np.trapz(np.trapz(joint_pdf, contdc_mesh, axis=0), dc_axis)\n joint_pdf /= area\n\n # calculate the ratio pdf\n integrand = abs(dc_mesh) * joint_pdf\n contrast_pdf = np.trapz(integrand, dc_mesh, axis=1)\n\n if display:\n plt.figure()\n plt.imshow(pdf_contdc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(pdf_dc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(joint_pdf)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(integrand)\n plt.colorbar()\n\n plt.figure()\n plt.plot(contrast_axis, contrast_pdf)\n\n plt.show()\n\n return contrast_pdf", "def pdf(self,x):\n pdfValue = self.pdfFunc(x)\n return pdfValue", "def _plot_prior_pdf(self, parameter, ax, **kwargs):\n y_label = kwargs.pop('y_label', 'Prior pdf')\n x_label = kwargs.pop('x_label', parameter)\n\n x_min = self.distribution_parameter_support[parameter][0]\n x_max = self.distribution_parameter_support[parameter][1]\n dx = (x_max - x_min)/100\n x_vals = np.arange(dx, x_max, dx)\n\n prior = self._prior_marginal_scipy(parameter)\n prior_mean = self.prior_mean(parameter)\n plot_parameter_pdf(ax, prior, prior_mean, x_vals, fill=None,\n x_fill=None, confidence=0.95,\n y_label=y_label, x_label=x_label)", "def plot_kde():\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def _estimate_density(self, x):\n\n self.density_, self.bins_ = np.histogram(x, bins=10, density=True)", "def density(self):\n return self._density", "def pdf(x):\n return - np.exp(self.ks_gaussian.score_samples(x.reshape(1, -1)))", "def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out", "def density(self) -> float:\n if self.is_directed():\n factor = 1\n else:\n factor = 2\n\n num_e = self._Impl.number_of_edges(directed_edges=True)\n num_v = self._Impl.number_of_vertices()\n\n density = (factor * num_e) / (num_v * (num_v - 1))\n return density", "def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):\n \n #kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)\n kde = gaussian_kde(x)\n pdf = kde.evaluate(x_grid)\n \n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n #ax.imshow((1,1), cmap=plt.cm.gist_earth_r,extent=[xmin, xmax])\n ax.plot(x_grid, pdf, 'k.', label=\"kernel = kde_scipy gaussian\", markersize=2)\n ax.text(700, 0.0035, \"N={0} points\".format(x.shape[0]))\n ax.legend(loc='upper left')\n ax.set_xlim([min(x), max(x)])\n ax.set_ylim(-0.001, 0.006)\n plt.show()", "def plot_density(df: pd.DataFrame, title: str, column_line_name: list, column_category_name: str, plot_width: int = 330,\n plot_height: int = 330, colours: list = ['#00BFA5', \"#8c9eff\", \"#536dfe\"]):\n\n hover = HoverTool()\n\n p = figure(title=title, plot_width=plot_width, plot_height=plot_height, tools=[\"save\", hover])\n\n for ind, category_ in enumerate(df[column_category_name].unique()):\n temp_df = df[df[column_category_name] == category_]\n density = stats.kde.gaussian_kde(temp_df[column_line_name])\n xs = np.linspace(0, 1, 100)\n source = ColumnDataSource(pd.DataFrame({'density': density(xs), 'xs': xs}))\n p.line(x='xs', y='density', source=source, line_color=colours[ind], legend=category_, line_width=2)\n\n p.title.text_font = p.xaxis.axis_label_text_font = p.yaxis.axis_label_text_font = \"Helvetica Neue\"\n p.xgrid.visible = p.ygrid.visible = False\n\n tooltips = [(\"density\", \"@ density {0%}\")]\n\n hover = p.select(dict(type=HoverTool))\n hover.tooltips = tooltips\n\n return p", "def normal_pdf(x: torch.Tensor) -> torch.Tensor:\n return torch.exp(-(x ** 2 / 2)) / np.sqrt(2 * np.pi)", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "def _dncb_pdf(x, a1, a2, mu1, mu2):\n out = st.beta.pdf(x, a1, a2, loc=0)\n out *= np.exp(-mu1-mu2)\n out *= hchg(x, a1, a2, mu1, mu2)\n return out", "def _calc_density(x: np.ndarray, y: np.ndarray):\n from scipy.stats import gaussian_kde\n\n # Calculate the point density\n xy = np.vstack([x, y])\n z = gaussian_kde(xy)(xy)\n\n min_z = np.min(z)\n max_z = np.max(z)\n\n # Scale between 0 and 1\n scaled_z = (z - min_z) / (max_z - min_z)\n\n return scaled_z", "def density(self):\n raise TypeError(\"The density function is not support on a Multigraph.\")", "def estimateDensity(self, sampledPDF, h=1, kernel='box'):\n\n if kernel=='box':\n kernel = np.ones((h,h))\n else:\n kernel = np.ones((h,h))\n PDF = image.correlate(input=sampledPDF, weights=kernel, mode='mirror')\n PDF = PDF/np.sum(PDF) #normalize pdf\n\n return PDF", "def density(self):\n return self.nnz/self.dim", "def pdf(self, x, norm=False):\n raise NotImplementedError(\"Normalized posterior not implemented\")", "def dplot(self):\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n from sys import stderr\n print(\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\", file=stderr)\n raise\n\n x_min = np.min(self.knot_vector)\n x_max = np.max(self.knot_vector)\n\n x = np.linspace(x_min, x_max, num=1000)\n\n ns = np.array([self.d(i) for i in x]).T\n\n for n in ns:\n plt.plot(x, n)\n\n return plt.show()", "def density(\n arr,\n ax=None,\n logx=False,\n logy=False,\n bins=25,\n mode=\"density\",\n extent=None,\n contours=[],\n percentiles=True,\n relim=True,\n cmap=DEFAULT_CONT_COLORMAP,\n shading=\"auto\",\n vmin=0.0,\n colorbar=False,\n **kwargs\n):\n if (mode == \"density\") & np.isclose(vmin, 0.0): # if vmin is not specified\n vmin = 0.02 # 2% max height | 98th percentile\n\n if arr.shape[-1] == 3:\n projection = \"ternary\"\n else:\n projection = None\n\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = (*ax.patch.get_facecolor()[:-1], 0.0)\n\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap) # without this, it would modify the global cmap\n cmap.set_under((1, 1, 1, 0))\n\n if mode == \"density\":\n cbarlabel = \"Kernel Density Estimate\"\n else:\n cbarlabel = \"Frequency\"\n\n valid_rows = np.isfinite(arr).all(axis=-1)\n\n if (mode in [\"hexbin\", \"hist2d\"]) and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n\n if (arr.size > 0) and valid_rows.any():\n # Data can't be plotted if there's any nans, so we can exclude these\n arr = arr[valid_rows]\n\n if projection is None: # binary\n x, y = arr.T\n grid = DensityGrid(\n x,\n y,\n bins=bins,\n logx=logx,\n logy=logy,\n extent=extent,\n **subkwargs(kwargs, DensityGrid)\n )\n if mode == \"hexbin\":\n # extent values are exponents (i.e. 3 -> 10**3)\n mappable = ax.hexbin(\n x,\n y,\n gridsize=bins,\n cmap=cmap,\n extent=grid.get_hex_extent(),\n xscale=[\"linear\", \"log\"][logx],\n yscale=[\"linear\", \"log\"][logy],\n **subkwargs(kwargs, ax.hexbin)\n )\n\n elif mode == \"hist2d\":\n _, _, _, im = ax.hist2d(\n x,\n y,\n bins=[grid.grid_xe, grid.grid_ye],\n range=grid.get_range(),\n cmap=cmap,\n cmin=[0, 1][vmin > 0],\n **subkwargs(kwargs, ax.hist2d)\n )\n mappable = im\n\n elif mode == \"density\":\n zei = grid.kdefrom(\n arr,\n xtransform=[lambda x: x, np.log][logx],\n ytransform=[lambda y: y, np.log][logy],\n mode=\"edges\",\n **subkwargs(kwargs, grid.kdefrom)\n )\n\n if percentiles: # 98th percentile\n vmin = percentile_contour_values_from_meshz(zei, [1.0 - vmin])[1][0]\n logger.debug(\n \"Updating `vmin` to percentile equiv: {:.2f}\".format(vmin)\n )\n\n if not contours:\n # pcolormesh using bin edges\n mappable = pcolor(\n grid.grid_xei,\n grid.grid_yei,\n zei,\n cmap=cmap,\n vmin=vmin,\n shading=shading,\n **subkwargs(kwargs, pcolor)\n )\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle(\"None\")\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(\n grid.grid_xei,\n grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape),\n ax=ax,\n contours=contours,\n percentiles=percentiles,\n cmap=cmap,\n vmin=vmin,\n **kwargs\n )\n if relim and (extent is not None):\n ax.axis(extent)\n elif projection == \"ternary\": # ternary\n if shading == \"auto\":\n shading = \"flat\" # auto cant' be passed to tripcolor\n # zeros make nans in this case, due to the heatmap calculations\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == \"hexbin\":\n raise NotImplementedError\n # density, histogram etc parsed here\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n\n if percentiles: # 98th percentile\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1][0]\n logger.debug(\"Updating `vmin` to percentile equiv: {:.2f}\".format(vmin))\n\n # remove coords where H==0, as ax.tripcolor can't deal with variable alpha :'(\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n\n if not contours:\n tri_poly_collection = pcolor(\n *coords.T,\n zi.flatten(),\n cmap=cmap,\n vmin=vmin,\n shading=shading,\n **subkwargs(kwargs, pcolor)\n )\n\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(\n *coords.T,\n zi=zi.flatten(),\n ax=ax,\n contours=contours,\n percentiles=percentiles,\n cmap=cmap,\n vmin=vmin,\n **kwargs\n )\n ax.set_aspect(\"equal\")\n else:\n if not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs[\"label\"] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n\n return ax", "def plotDFT(x):\n \n X = DFTdirect(x)\n plt.plot([c.re for c in x], [c.im for c in x], 'ro')\n plt.plot([c.re for c in X], [c.im for c in X], 'bo')\n plt.show()" ]
[ "0.73690754", "0.71617067", "0.71548754", "0.6999526", "0.6962633", "0.6902345", "0.690083", "0.6887748", "0.6878603", "0.68121535", "0.67572474", "0.66559654", "0.66133654", "0.66133654", "0.6585173", "0.6584147", "0.6578231", "0.6572529", "0.6552081", "0.654156", "0.6535898", "0.65328676", "0.6523914", "0.6508804", "0.65021825", "0.6473875", "0.6469751", "0.64497054", "0.64180297", "0.6397427", "0.63697946", "0.63618433", "0.6358163", "0.6355792", "0.6331857", "0.63266426", "0.6320994", "0.63167435", "0.6311974", "0.63102263", "0.63035035", "0.6299866", "0.6254244", "0.62332636", "0.6224594", "0.62233126", "0.6222408", "0.6198302", "0.6195015", "0.61548495", "0.61425155", "0.6141023", "0.61384463", "0.61240286", "0.61213315", "0.6118562", "0.61093825", "0.6105255", "0.6103129", "0.6091674", "0.60703254", "0.60673445", "0.60629624", "0.605688", "0.6029581", "0.6022804", "0.60121745", "0.60046476", "0.6000303", "0.59903044", "0.5977097", "0.5969224", "0.5960234", "0.5949554", "0.59400713", "0.5939158", "0.5937948", "0.59337974", "0.5932878", "0.5931393", "0.5931393", "0.5931393", "0.5922156", "0.5917142", "0.591313", "0.59009814", "0.5888341", "0.58863974", "0.5885529", "0.5880653", "0.5878437", "0.58764327", "0.5875846", "0.58665514", "0.58622193", "0.5860819", "0.5853676", "0.5848469", "0.58419853", "0.5829808" ]
0.5843463
98
Plot the output of the dispatch.pdf() procedure
def plot_pdf(pdf,**kwargs): pl.hist(pdf.bins,bins=pdf.bins,weights=pdf.counts,**kwargs) return pdf.time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()", "def plot_pdf(data,b,X,outfile):\t\r\n\tme = \"LE_Plot.plot_pdf: \"\r\n\tshowplot = False\r\n\tt0 = time.time()\r\n\t## Data\r\n\tx, y = data\r\n\txmax, ymax = np.abs(x).max(), np.abs(y).max()\r\n\t## Plot pdf\r\n\tfs = 25\r\n\t# counts, xedges, yedges, im = plt.hist2d(x,y, bins=100, range=[[-2*X,+2*X],blim(b,X)], normed=True)\r\n\tcounts, xedges, yedges, im = plt.hist2d(x,y, bins=100, range=[[-xmax,+xmax],[-ymax,ymax]], normed=True)\r\n\tplt.xlabel(\"$x$\",fontsize=fs);plt.ylabel(\"$\\eta$\",fontsize=fs)\r\n\tplt.suptitle(outfile)\r\n\ttry:\r\n\t\tplt.savefig(outfile+\".png\")\r\n\t\tprint me+\"Plot saved as\",outfile+\".png\"\r\n\texcept IOError:\r\n\t\tprint me+\"ERROR: ouput direcotry not found, could not save\",outfile+\".png\"\r\n\t## Output\r\n\tif showplot:\tplt.show()\r\n\tplt.close()\t\t\r\n\tprint me+\"Plotting PDF:\",round(time.time()-t0,1),\"seconds\"\r\n\treturn counts.T, xedges, yedges", "def plot():\n pass", "def plot_pdf(self, param, plot_type, Nsplit=50, **kwargs):\n\n title = self.family.capitalize() + \" Copula PDF\" \n\n if plot_type == \"3d\":\n bounds = [0+1e-1/2, 1-1e-1/2]\n\n elif plot_type == \"contour\":\n bounds = [0+1e-2, 1-1e-2]\n\n U_grid, V_grid = np.meshgrid(\n np.linspace(bounds[0], bounds[1], Nsplit),\n np.linspace(bounds[0], bounds[1], Nsplit))\n \n Z = np.array( \n [self.get_pdf(uu, vv, param) for uu, vv in zip(np.ravel(U_grid), np.ravel(V_grid)) ] )\n \n Z = Z.reshape(U_grid.shape)\n\n if plot_type == \"3d\":\n\n plot_bivariate_3d(U_grid,V_grid,Z, [0,1], title, **kwargs)\n elif plot_type == \"contour\":\n plot_bivariate_contour(U_grid,V_grid,Z, [0,1], title, **kwargs)\n else:\n print(\"only \\\"contour\\\" or \\\"3d\\\" arguments supported for type\")\n raise ValueError", "def plotPDF(self,thresholds,norm=False,fig=None,ax=None,**kwargs):\n\n\t\tif not matplotlib:\n\t\t\traise ImportError(\"matplotlib is not installed, cannot plot the PDF!\")\n\n\t\t#Instantiate figure\n\t\tif (fig is None) or (ax is None):\n\t\t\t\n\t\t\tself.fig,self.ax = plt.subplots()\n\n\t\telse:\n\n\t\t\tself.fig = fig\n\t\t\tself.ax = ax\n\n\t\t#Measure the PDF of the pixels\n\t\tkappa,pdf = self.pdf(thresholds,norm)\n\n\t\t#Plot the PDF\n\t\tself.ax.plot(kappa,pdf,**kwargs)\n\n\t\t#Adjust the labels\n\t\tif norm:\n\t\t\tself.ax.set_xlabel(r\"$\\sigma_{\\kappa}$\",fontsize=22)\n\t\t\tself.ax.set_ylabel(r\"$PDF(\\sigma_\\kappa)$\",fontsize=22)\n\t\telse:\n\t\t\ts = self.data.std()\n\t\t\tax_top = self.ax.twiny()\n\t\t\tax_top.set_xticks(self.ax.get_xticks())\n\t\t\tax_top.set_xlim(self.ax.get_xlim())\n\t\t\tax_top.set_xticklabels([ \"{0:.2f}\".format(n/s) for n in ax_top.get_xticks() ])\n\n\t\t\tself.ax.set_xlabel(r\"$\\kappa$\",fontsize=22)\n\t\t\tax_top.set_xlabel(r\"$\\kappa/\\sigma_\\kappa$\",fontsize=22)\n\t\t\tself.ax.set_ylabel(r\"${\\rm PDF}(\\kappa)$\",fontsize=22)", "def plot_pdfs(meta):\n f = plt.figure(figsize=(5,5))\n sps = f.add_subplot(1,1,1)\n sps.set_title(meta.name+r' PDFs')\n plotstep(sps,meta.binends,meta.intPz,c=c_int,l=l_int+r'$P(z)$',s=s_int,w=w_int,d=d_int,a=a_int)\n dummy_x,dummy_y = np.array([-1,-2,-3]),np.array([-1,-2,-3])\n plotstep(sps,dummy_x,dummy_y,c=c_exp,s=s_map,w=w_exp,l=r' MLE $z$',d=d_map,a=a_map)\n sps.legend(loc='upper right',fontsize='x-small')\n np.random.seed(seed=meta.ngals)\n randos = random.sample(xrange(meta.ngals),len(meta.colors))\n for r in lrange(randos):\n plotstep(sps,meta.binends,meta.pdfs[randos[r]],c=meta.colors[r%len(meta.colors)],s=s_smp,w=w_smp,d=d_smp,a=a_smp)\n sps.vlines(meta.mleZs[randos[r]],0.,max(meta.pdfs[randos[r]]),color=meta.colors[r],linestyle=s_map,linewidth=w_map,dashes=d_map,alpha=a_map)\n sps.set_ylabel(r'$p(z|\\vec{d})$')\n sps.set_xlabel(r'$z$')\n sps.set_xlim(meta.binlos[0]-meta.bindif,meta.binhis[-1]+meta.bindif)\n sps.set_ylim(0.,1./meta.bindif)\n f.savefig(os.path.join(meta.topdir,'samplepzs.pdf'),bbox_inches='tight', pad_inches = 0)\n return", "def pdf(self, x):\n raise NotImplementedError", "def worker_plot(fname):\n with Database() as base:\n _filter = base.get_filter(fname)\n plt.clf()\n plt.plot(_filter.trans_table[0], _filter.trans_table[1], color='k')\n plt.xlim(_filter.trans_table[0][0], _filter.trans_table[0][-1])\n plt.minorticks_on()\n plt.xlabel('Wavelength [nm]')\n plt.ylabel('Relative transmission')\n plt.title(\"{} filter\".format(fname))\n plt.tight_layout()\n plt.savefig(\"{}.pdf\".format(fname))", "def plotclass_pdf(pp, s, t=None):\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n\n try:\n X4, Y4 = zip(*s.mean_cln_4)\n X6, Y6 = zip(*s.mean_cln_6)\n ax1.plot(X4, Y4, 'bo', color=\"blue\", alpha=0.4, label=\"IPv4\")\n ax1.plot(X6, Y6, 'bo', color=\"red\", alpha=0.4, label=\"IPv6\")\n except Exception as e:\n print(\"Plotting failed for host {} with error {}\".format(s.domain, e))\n return\n\n try:\n ax1.plot(s.xs4, s.spl_arr4, linewidth=4, color=\"blue\", alpha=0.4)\n ax1.plot(s.xs6, s.spl_arr6, linewidth=4, color=\"red\", alpha=0.4)\n except Exception as e:\n print(\"Not plotting host {} due to exception {}\".format(s.domain, e))\n return\n\n plt.legend(loc='lower right')\n plt.title('Host: {} ({} / {})\\n Decision: {}'.format(\n s.domain, s.ip4, s.ip6, s.dec), fontsize=10)\n plt.xlabel('measurement time (h)')\n plt.ylabel('observed offset (msec)')\n ticks = ax1.get_xticks() / 3600\n ticks = [round(t, 1) for t in ticks]\n ax1.set_xticklabels(ticks)\n # saving all in PDF\n pp.savefig(fig)\n tikz_save(\"{}.{}-{}.tex\".format(t, s.domain, hash((s.ip4, s.ip6))))\n plt.close(fig)", "def plot(self):\n pass", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot(self, **kwargs):\n if self.order != None:\n name = str(_constructModelName(self.teff, self.logg, \n self.metal, self.en, self.order, self.path))\n output = kwargs.get('output', str(name) + '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', \n alpha=.8, linewidth=1, label=name)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim) \n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()\n\n else:\n output = kwargs.get('output'+ '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', alpha=.8, linewidth=1)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim)\n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()", "def plot_pdf(pop_name, pop_val, pop_file, full_pop_file, outdir='.'):\n try:\n plt.style.use(\n \"https://gist.githubusercontent.com/avivajpeyi/4d9839b1ceb7d3651cbb469bc6b0d69b/raw/4ee4a870126653d542572372ff3eee4e89abcab0/publication.mplstyle\")\n except Exception:\n pass\n\n plt.close('all')\n all = pd.read_csv(full_pop_file, sep=\" \")\n all['cos_theta_1'] = all['cos_tilt_1']\n all = process_samples(all)\n sub = pd.read_csv(pop_file, sep=\" \")\n sub = process_samples(sub)\n sub['cos_theta_1'] = sub['cos_tilt_1']\n\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for ax, l in zip(axes, [\"cos_theta_1\", \"cos_theta_12\"]):\n ax.hist(all[l], density=True, histtype='step', color=\"tab:blue\", label=\"ALL\", lw=2, alpha=0.8)\n ax.scatter(all[l], [0 for _ in all[l]], color=\"tab:blue\",marker=\"+\")\n ax.hist(sub[l], density=True, histtype='step', color=\"tab:purple\", label=\"HIGH SNR\", lw=2, alpha=0.6)\n ax.scatter(sub[l], [0 for _ in sub[l]], color=\"tab:purple\", marker=\"+\")\n\n x = np.linspace(-1, 1, 100)\n y1 = TruncatedNormal(mu=1, sigma=pop_val[0], minimum=-1, maximum=1).prob(x)\n y2 = TruncatedNormal(mu=1, sigma=pop_val[1], minimum=-1, maximum=1).prob(x)\n axes[1].plot(x, y2, color='tab:gray', zorder=-10, lw=3, label=\"TRUE\")\n axes[0].plot(x, y1, color='tab:gray', zorder=-10, lw=3)\n\n for i in range(len(axes)):\n if (i == 0):\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_1$\")\n axes[i].set_ylabel(\"PDF\")\n else:\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_{12}$\")\n axes[i].set_yticklabels([])\n axes[i].legend()\n axes[i].grid(False)\n axes[i].set_xlim(-1, 1)\n\n plt.suptitle(f\"POP {pop_name}\")\n plt.tight_layout()\n plt.savefig(f\"{outdir}/pop_trues_{pop_name}.png\")", "def create_pdf():\n\n fig = plt.gcf()\n fig.set_size_inches(OutFileParameter.width, OutFileParameter.height)\n fig.savefig(OutFileParameter.name + '.' + OutFileParameter.ext, dpi=OutFileParameter.dpi)\n\n return None", "def plotPRC(yscore, true, datasets, title, outfile):\n \n fig = plt.figure()\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title(title)\n \n for i in range(len(datasets)):\n precision, recall, _ = precision_recall_curve(true[i], yscore[i][:,1])\n prc_auc = average_precision_score(true[i], yscore[i][:,1])\n plt.plot(recall, precision, label=datasets[i]+' (area = %0.2f)' % (prc_auc),linewidth=1)\n \n plt.legend(loc=\"lower right\")\n \n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()", "def render_as_pdf(self, width, height):\n pass", "def plot_to_pdf(pdf_fname, cmts_directory, misfit_windows_collection, iterations_list, snr_threshold, event_depth):\n rep_key = sorted(misfit_windows_collection.keys())[0]\n all_events = sorted(misfit_windows_collection[rep_key].keys())\n with PdfPages(pdf_fname) as pdf:\n for each_event in tqdm.tqdm(all_events):\n # we should plot the beachball and plot the source parameter table here\n plot_source_parameters(\n each_event, pdf, cmts_directory, iterations_list)\n # prepare information to plot\n each_misfit_windows_collection = {}\n for each_iteration in iterations_list:\n each_misfit_windows_collection[each_iteration] = (\n misfit_windows_collection[each_iteration][each_event])\n event_depth_dict = event_depth[each_event]\n data_collection, category_phases, category_list = get_plotting_data(\n each_misfit_windows_collection, iterations_list, snr_threshold, event_depth_dict)\n for each_category, phase_list_for_each_category in zip(category_list, category_phases):\n # one page for each category\n figs = plt.figure(figsize=(50, 50))\n collecction_all = {}\n if (each_category != \"surface\"):\n collecction_all[\"deltat\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n collecction_all[\"similarity\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n collecction_all[\"cc\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n # we plot for each phases\n for row_index, each_phase in enumerate(phase_list_for_each_category):\n # we plot for deltat,similarity,cc\n for column_index, plot_type in enumerate([\"deltat\", \"similarity\", \"cc\"]):\n # num must be 1 <= num <= num_max, not 0\n # keep different category's figsize the same\n ax = figs.add_subplot(\n 8, 3, row_index * 3 + column_index+1)\n\n for interation_index, each_iteration in enumerate(iterations_list):\n sns.distplot(data_collection[each_iteration][each_category][row_index]\n [plot_type], ax=ax, hist=False, label=f\"before iteration {each_iteration}\",\n kde_kws={\"linewidth\": 6})\n # collect to the category summary\n if(each_category != \"surface\"):\n if (column_index == 0):\n collecction_all[\"deltat\"][interation_index] = np.concatenate(\n (collecction_all[\"deltat\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n elif (column_index == 1):\n collecction_all[\"similarity\"][interation_index] = np.concatenate(\n (collecction_all[\"similarity\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n elif (column_index == 2):\n collecction_all[\"cc\"][interation_index] = np.concatenate(\n (collecction_all[\"cc\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n if (plot_type == \"deltat\"):\n ax.set_xlim((-10, 10))\n elif(plot_type == \"similarity\"):\n ax.set_xlim((0, 1))\n elif(plot_type == \"cc\"):\n ax.set_xlim((0, 1))\n # ax.legend()\n if (column_index == 0):\n ax.get_yaxis().set_ticklabels([])\n ax.set_ylabel(each_phase, fontsize=50, rotation=90)\n else:\n ax.get_yaxis().set_ticklabels([])\n ax.tick_params(axis=\"x\", labelsize=30)\n if(plot_type != \"similarity\"):\n ax.set_xlabel(plot_type, fontsize=30)\n else:\n ax.set_xlabel(\"zero-lag cc\", fontsize=30)\n if (row_index == 0 and column_index == 1):\n ax.set_title(\n f\"gcmtid: {each_event}\\ncategory: {each_category}\", fontsize=50)\n if (each_category != \"surface\"):\n for column_index, plot_type in enumerate([\"deltat\", \"similarity\", \"cc\"]):\n ax = figs.add_subplot(\n 8, 3, (row_index+1) * 3 + column_index+1) # pylint: disable=undefined-loop-variable\n for interation_index, each_iteration in enumerate(iterations_list):\n sns.distplot(collecction_all[plot_type][interation_index], ax=ax, hist=False, label=f\"before iteration {each_iteration}\",\n kde_kws={\"linewidth\": 6})\n if (plot_type == \"deltat\"):\n ax.set_xlim((-10, 10))\n elif(plot_type == \"similarity\"):\n ax.set_xlim((0, 1))\n elif(plot_type == \"cc\"):\n ax.set_xlim((0, 1))\n if (column_index == 0):\n ax.get_yaxis().set_ticklabels([])\n ax.set_ylabel(\n \"all phases\", fontsize=50, rotation=90)\n else:\n ax.get_yaxis().set_ticklabels([])\n ax.tick_params(axis=\"x\", labelsize=30)\n if(plot_type != \"similarity\"):\n ax.set_xlabel(plot_type, fontsize=30)\n else:\n ax.set_xlabel(\"zero-lag cc\", fontsize=30)\n\n figs.tight_layout()\n pdf.savefig(figs)\n plt.close(fig=figs)", "def run_plot(args):\n # print(\"running chronqc_plot\")\n chronqc_plot.main(args)", "def main():\n argparser = argparse.ArgumentParser(description=\"Convert plot to table\")\n\n argparser.add_argument(\"pdf\", action=\"store\", help=\"pdf file\",\n default=None, nargs=\"*\")\n\n args = argparser.parse_args()\n\n if len(args.pdf) == 0:\n open_gui()\n else:\n process_pdf(args.pdf[0])\n\n generate_data()", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_front_title(data_obj, is_ansi=True):\n\n new_pdf_page(data_obj.pdf_obj, close_fig=False) #Create a new page\n plt.axis('off')\n\n #Determine which standard is being used, to choose which to include on the title page\n if is_ansi:\n test_std = \"ANSI N42.55\"\n else:\n test_std = \"ASTM F792\"\n\n # Display Title, that includes standard and x-ray information\n plt.text(0.15, 0.9, test_std + \" test results for\", ha='left', va='center', fontsize=20)\n date_str = (time.strftime(\"PDF generated %Y-%m-%d at %H:%M\"))\n plt.text(0.15, 0.85, data_obj.xray_detector + ' system using', ha='left', va='center', fontsize=20)\n plt.text(0.15, 0.8, data_obj.xray_source + ' source', ha='left', va='center', fontsize=20)\n\n y0 = 0.8\n dy = 0.022\n x0 = 0.2\n\n # Display the comments, date, and info about code\n plt.text(x0, y0 - 3 * dy, data_obj.xray_comments, ha='left', va='center', fontsize=8)\n plt.text(x0, y0 - 4 * dy, date_str, ha='left', va='center', fontsize=8)\n plt.text(x0, y0 - 5 * dy, 'Analyzed using Glover ' + test_std + ' Python code (version 0.13)',\n ha='left', va='center', fontsize=8)\n\n\n #-------------------------------------------------------------------------------------#\n # Display all the images tested during the analysis, including the path to each image #\n #-------------------------------------------------------------------------------------#\n\n fs_files = 6\n ypos = y0 - 8 * dy\n dy = 0.019\n plt.text(x0, ypos, 'Main test object images:', ha='left', va='center', fontsize=8)\n ypos = ypos - dy\n for i, file in enumerate(data_obj.files):\n plt.text(x0, ypos, \" \" + file, ha='left', va='center', fontsize=fs_files)\n ypos = ypos - dy\n\n # Include the Image extent if the standard being tested to is ANSI N42.55\n if is_ansi:\n ypos = ypos - 0.01\n plt.text(x0, ypos, 'Image extent image:', ha='left', va='center', fontsize=8)\n ypos = ypos - dy\n plt.text(x0, ypos, \" \" + data_obj.extent_img_filename, ha='left', va='center', fontsize=fs_files)\n ypos = ypos - dy\n\n ypos = ypos - 0.01\n plt.text(x0, ypos, 'Noise images:', ha='left', va='center', fontsize=8)\n ypos = ypos - dy\n for i, file in enumerate(data_obj.noise_img_data):\n plt.text(x0, ypos, \" \" + data_obj.noise_img_data[i].filename, ha='left', va='center', fontsize=fs_files)\n ypos = ypos - dy", "def show():\n setup()\n plt.show()", "def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plotBatch(inp,output='spectra_plot.pdf',comparisons=None,classify=False,normalize=False,normrange=[0.9,1.4],layout=[2,2],basecolors=['k','m'],legend=[],fontscale=0.7,classify_kwargs={},plot_kwargs={},**kwargs):\n\n# alt keyword check\n for k in ['file','filename']: \n if kwargs.get(k,'') != '': output = kwargs[k]\n for k in ['legends','labels']: \n if kwargs.get(k,'') != '': legend = kwargs[k]\n\n# force input into a list\n if isinstance(inp,list): inputlist = copy.deepcopy(inp)\n else: inputlist = [inp]\n\n# if input is a string of filenames, read in each file to a spectrum object\n if isinstance(inputlist[0],str):\n# try a glob search string \n files = glob.glob(os.path.normpath(inputlist[0]))\n if len(files) > 1 or (len(files) == 1 and inputlist[0].find('*') != -1):\n inputlist = files\n# try reading in files into Spectrum object\n try:\n splist = [splat.Spectrum(file = f) for f in inputlist]\n except:\n raise ValueError('\\nCould not read in list of files {} - make sure the full path is specified and the files are correctly formatted'.format(inputlist))\n\n# if filenames, read in each file to a spectrum object\n elif isinstance(inputlist[0],splat.Spectrum):\n splist = copy.deepcopy(inputlist)\n else:\n raise ValueError('\\nInput should be list of splat.Spectrum objects or filenames')\n\n# normalize if desired\n if normalize==True:\n tmp = [sp.normalize(normrange) for sp in splist]\n\n# comparison files are present\n complist = []\n if comparisons != None:\n comp = copy.deepcopy(comparisons)\n if not isinstance(comp,list): comp = [comp]\n if isinstance(comp[0],str):\n try:\n complist = [splat.Spectrum(file = f) for f in comp]\n except:\n print('\\nCould not read in comparison files: ignoring comparisons')\n if isinstance(comp[0],splat.Spectrum):\n complist = comp\n if len(complist) < len(splist):\n while len(complist) < len(splist):\n complist.append(complist[-1])\n# normalize\n if normalize==True:\n tmp = [sp.normalize(normrange) for sp in complist]\n\n# set comparison files to be standards for spectral classification\n# overrules input comparison sample\n if classify == True:\n complist = []\n base_kwargs={\n 'return_standard': True,\n 'method': 'kirkpatrick',\n }\n base_kwargs.update(classify_kwargs)\n for sp in splist:\n complist.append(splat.classifyByStandard(sp,**base_kwargs))\n\n# prep for plotting\n plotlist = []\n clist = []\n for i,sp in enumerate(splist):\n if len(complist) == len(splist):\n plotlist.append([sp,complist[i]])\n clist.extend(basecolors)\n else:\n plotlist.append([sp])\n clist.extend(basecolors[0])\n\n# manage legends\n if len(legend) != 0:\n if not isinstance(legend,list): legend = [legend]\n if len(legend) < (len(splist)+len(complist)):\n# guess: just left out the comparison legends \n if len(complist) > 0 and len(legend) == len(splist):\n legtmp = []\n for i,l in enumerate(legend):\n legtmp.extend([l,'{}'.format(complist[i].name)])\n legend = legtmp\n else:\n# otherwise: pad the remaining legends with the last legend (pairs) \n while len(legend) < (len(splist)+len(complist)):\n if len(complist)>0:\n legend.extend([legend[-2],legend[-1]])\n else:\n legend.extend([legend[-1]])\n if len(legend) > (len(splist)+len(complist)):\n legend = legend[0:(len(splist)+len(complist))]\n else:\n legend = []\n for i,sp in enumerate(splist):\n l = []\n if 'name' in list(sp.__dict__.keys()): l.append(sp.name)\n else: l.append(os.path.basename(sp.filename))\n if len(complist)>0:\n if 'name' in list(complist[i].__dict__.keys()): l.append(complist[i].name)\n else: l.append(os.path.basename(complist[i].filename))\n legend.extend(l)\n\n# generate plot\n base_kwargs={\n 'multiplot': True,\n 'multipage': True,\n 'legends': legend,\n 'colors': clist,\n 'layout': layout,\n 'fontscale': fontscale,\n 'output': output,\n }\n base_kwargs.update(plot_kwargs)\n fig = plotSpectrum(plotlist,**base_kwargs)\n\n return fig", "def generatePlot (self, Xdata_exp, Xdata_model, Ydata_exp, Ydata_model, Component_name):\n \n #self.clear_results_directory(results_dir)\n \n XaxisLabel = 'TCD Conversion [%]'\n YaxisLabel = 'Product Yield [wt %]'\n \n self.drawplot(XaxisLabel, YaxisLabel, Xdata_exp, Xdata_model, Ydata_exp, Ydata_model, Component_name)", "def pdf(data, args):\n return Plot._dist(data, args)", "def main(self, args):\n for plot in args.plots:\n if plot == 'no_plot':\n break\n print \"plotting\", plot\n\n fig = self.plot_figure(plot)\n\n fformat = '{plot}_{index}.{ext}'\n fname = fformat.format(plot=plot, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.distributions == 'all':\n distributions = ['Uf', 'Wf', 'uf_abs',\n 'vorticity', 'vertical_shear']\n else:\n distributions = args.distributions\n for dist in distributions:\n range = self.properties[dist]['range']\n name = self.properties[dist]['name']\n print \"plotting distribution\", dist, name\n fig = self.plot_distribution(getattr(self, dist), range, name)\n\n fformat = 'distribution_{q}_{index}.{ext}'\n fname = fformat.format(q=dist, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.funcs:\n for func in args.funcs:\n print \"multiprocessing\", func\n f = getattr(self, 'plot_' + func)\n f()", "def plot(metric1, metric2, label1, label2, save_location, id, batch_size):\n try: \n fig = plt.figure()\n plt.plot(metric1, label=label1)\n plt.plot(metric2, label=label2, linestyle='dashed')\n plt.legend()\n plt.xlabel('Epoch')\n plt.grid(linestyle='dotted')\n # plt.ylim(top=)\n # plt.show()\n plt.savefig(save_location + id + '_' + DRUM_INSTRUMENT + '_' + str(EPOCHS) + '_' + str(batch_size) + '.pdf')\n plt.clf()\n plt.cla()\n plt.close(fig=fig)\n except Exception as e:\n print('Failed to create plot: ', e)", "def plot(self, *args, **kwargs):\n pass", "def plot(self, ax=None, ylabel=\"PDF(x)\", xlabel=\"x\", num_bins=50, show=False, outputname=None, color=\"C2\", logscale=False, xlims=None):\n \n \"\"\"Create figure if none was provided\"\"\"\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n \"\"\"Plot\"\"\"\n ax.hist(self.sample_x, bins=num_bins, color=color)\n\n \"\"\"Set plot attributes\"\"\"\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n \n \"\"\"Set xlim if requested\"\"\"\n if xlims is not None:\n ax.set_xlim(xlims[0], xlims[1])\n\n \"\"\"Set yscale to log if requested\"\"\"\n if logscale:\n ax.set_yscale(\"log\")\n \n \"\"\"Save if filename provided\"\"\"\n if outputname is not None:\n plt.savefig(outputname + \".pdf\")\n plt.savefig(outputname + \".png\", density=300)\n \n \"\"\"Show if requested\"\"\"\n if show:\n plt.show()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def show():\n\tplt.show()", "def main(logy):\n filep = sys.stdin\n dataf = pd.read_csv(filep, sep=\" \", index_col=0, header=None)\n dataf.plot(logy=logy)\n print(dataf)\n plt.savefig(\"scaling.pdf\")", "def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()", "def plot_vis_test(plotfile,pdf_file):\n\t# First some parameters looked up from configfile---------------------------------\n\t\n\tgrbdir = runconf['l2file'][0:10]\n\tpre_tstart = runconf['bkg1start']\n\tpre_tend = runconf['bkg1end']\n\ttrigtime = runconf['trigtime']\n\tgrb_tstart = runconf['transtart']\n\tgrb_tend = runconf['tranend']\n\tpost_tstart = runconf['bkg2start']\n\tpost_tend = runconf['bkg2end']\n\tt_src = grb_tend - grb_tstart \n\tt_tot = (pre_tend-pre_tstart)+(post_tend-post_tstart)\n\tra_tran = runconf['ra']\n\tdec_tran = runconf['dec']\n\tlc_bin = runconf['lc_bin']\n\talpha = runconf['alpha']\n\tbeta = runconf['beta']\n\tE0 = runconf['E0']\n\tA = runconf['A']\n\tsim_scale = t_src\n\tpixbin = int(runconf['pixsize'])\n\tcomp_bin = int(runconf['comp_bin'])\n\ttyp = runconf['typ']\n\n\t# Calling txy to calculate thetax thetay and the coordinates----------------------\n\t\n\tthetax,thetay,x,y,z,t = txy(runconf['mkffile'], trigtime, ra_tran, dec_tran)\n\t\n\t# Plot the 3d visualisation for the position of the transient---------------------\n\tplt.figure()\n\tfig = visualize_3d(grbdir,x,y,z, t, thetax, thetay, grbdir)\t\n\tpdf_file.savefig(fig)\n\t\n\t# Plotting the lightcurves for the four quadrants---------------------------------\n\tfig = plt.figure()\n\tclean_file = fits.open(runconf['infile'])\n\tplt.title('Light curves for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\t\n\tquad0 = clean_file[1].data\n\tdata0,bin_edge = np.histogram(quad0['time'], bins=np.arange(quad0['time'][0],quad0['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data0,label='Quad 0',lw=0.7)\n quad1 = clean_file[2].data\n\tdata1,bin_edge = np.histogram(quad1['time'], bins=np.arange(quad1['time'][0],quad1['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data1,label='Quad 1',lw=0.7) \n\tquad2 = clean_file[3].data\n\tdata2,bin_edge = np.histogram(quad2['time'], bins=np.arange(quad2['time'][0],quad2['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data2,label='Quad 2',lw=0.7)\n quad3 = clean_file[4].data\n data3,bin_edge = np.histogram(quad3['time'], bins=np.arange(quad3['time'][0],quad3['time'][-1],lc_bin))\n plt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data3,label='Quad 3',lw=0.7)\n\tplt.axvspan(grb_tstart,grb_tend,color='blue',alpha=0.1,label='GRB')\n\tplt.axvspan(pre_tstart,pre_tend,color='orange',alpha=0.2)\n\tplt.axvspan(post_tstart,post_tend,color='orange',alpha=0.2,label='Background')\n\tplt.legend(prop={'size':6})\n\tplt.xlim(pre_tstart-100,post_tend+100)\n\tpdf_file.savefig(fig)\n\t\n\t# Calling the sim_dph--------------------------------------------------------------\n\t\n\tgrb_flat,bkgd_flat,grb_dph,bkgd_dph,t_src,t_total = data_bkgd_image(grbdir,pre_tstart,pre_tend,grb_tstart,grb_tend,post_tstart,post_tend)\n\n\tsim_flat,sim_dph,badpix_mask,sim_err_dph = simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A)\n\n\tsrc_dph = grb_dph-bkgd_dph*t_src/t_tot\n\n print \"Total counts in simulated dph: \",(sim_dph).sum()\n print \"Total counts after badpix mask is applied: \",(sim_dph*badpix_mask).sum()\n\tprint \"Excess counts in badpix masked src dph: \",(src_dph*badpix_mask).sum()\n \n\t# Plotting the DPHs before badpix correction---------------------------------------\n\t\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs before badpix correction for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 - 0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\n\t # Source \n\tim = ax4.imshow(src_dph,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n \t# Source + Background\n\tim = ax1.imshow(grb_dph,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\n \t# Background\n\tim = ax2.imshow(bkgd_dph*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\t\n\t# Plotting the Badpix mask---------------------------------------------\n\n\tfig = plt.figure()\n\tax = plt.subplot(111)\n\tplt.title('Badpix Mask for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\tim = ax.imshow(badpix_mask,interpolation='none')\n\tax.set_xlim(-9,128 -0.5)\n\tax.axvline(x=-5.,ymin=0,ymax=64,linewidth=5,color='k')\n\tax.spines['left'].set_position(('data',-0.5))\n\tax.xaxis.set_ticks(np.arange(0,128,16))\n\tax.yaxis.set_ticks(np.arange(0,128,16))\n\tfig.colorbar(im,ax=ax,fraction=0.046, pad=0.04)\n\t\n\tpdf_file.savefig(fig) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs--------------------------------------------\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph*badpix_mask,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 -0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\n\t # Source \n\tim = ax4.imshow(src_dph*badpix_mask,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n\t # Source + Background\n\tim = ax1.imshow(grb_dph*badpix_mask,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\n\t # Background\n\tim = ax2.imshow(bkgd_dph*badpix_mask*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs (Binned) ----------------------------------------------------\n\tfor p in [4,8,16]:\n\t\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\t\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay)+ \"pixsize=\"+str(p))\n\t\t # Sim\n\t\tim = ax3.imshow(resample(sim_dph*badpix_mask,p),interpolation='none')\n\t\tax3.set_title('Sim DPH',fontsize=8)\n\t\tax3.set_xlim(-1,128/p -0.5)\n\t\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax3.spines['left'].set_position(('data',-0.5))\n\t\tax3.set_yticklabels([])\n\t\tax3.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n\t\tax3.set_xticklabels(np.arange(0,128,16))\n\t\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source \n\t\tim = ax4.imshow(resample(src_dph*badpix_mask,p),interpolation='none',vmin=0)\n\t\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\t\tax4.set_xlim(-1,128/p -0.5)\n\t\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax4.spines['left'].set_position(('data',-0.5))\n\t\tax4.set_yticklabels([])\n ax4.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax4.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source + Background\n\t\tim = ax1.imshow(resample(grb_dph*badpix_mask,p),interpolation='none')\n\t\tax1.set_title('Src + Bkg DPH',fontsize=10)\n\t\tax1.set_xlim(-1,128/p -0.5)\n\t\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax1.spines['left'].set_position(('data',-0.5))\n\t\tax1.set_yticklabels([])\n ax1.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax1.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Background\n\t\tim = ax2.imshow(resample(bkgd_dph*badpix_mask*t_src/t_total,p),interpolation='none')\n\t\tax2.set_title('Bkg DPH',fontsize=8)\n\t\tax2.set_xlim(-1,128/p -0.5)\n\t\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax2.spines['left'].set_position(('data',-0.5))\n\t\tax2.set_yticklabels([])\n ax2.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax2.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\t\tf.set_size_inches([6.5,6.5])\n\t\t\n\t\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\tprint \"No. of pixels with zero counts in sim_dph: \",sim_dph[sim_dph==0].size\n\tprint \"No. of pixels with zero counts in grb_dph(no bkg subtration): \",grb_dph[grb_dph==0].size\n\t\n\t# Generating the array for module number ------------------------------------------------\n\tA = ['A'+str(i) for i in range(16)]\n\tB = np.flip(['B'+str(i) for i in range(16)],0)\n\tC = np.flip(['C'+str(i) for i in range(16)],0)\n\tD = ['D'+str(i) for i in range(16)]\n\tquad_a = np.reshape(A,(4,4))\n\tquad_b = np.reshape(B,(4,4))\n\tquad_c = np.reshape(C,(4,4))\n\tquad_d = np.reshape(D,(4,4))\n\tMod_arr = np.ndarray((8,8),dtype='|S3')\n\tMod_arr[:4,:4] = quad_a\n\tMod_arr[:4,4:] = quad_b\n\tMod_arr[4:,4:] = quad_c\n\tMod_arr[4:,:4] = quad_d\n\tMod_names = Mod_arr.flatten()\n\t#print \"Module name array : \",Mod_names\n\t#-----------------------------------------------------------------------------------------\n\t\t\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\tmodel = sim_flat_bin\n\tmodel_copy = np.copy(model)\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\tdata_copy = np.copy(data)\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\terr_model = sim_err_flat_bin\n\terr_model_copy = np.copy(err_model)\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\terr_data_copy = np.copy(err_data)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f}\".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model\",elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tprint \"No. of pixels with zero counts in sim_flat: \",sim_flat[sim_flat==0].size\n\tprint \"No. of pixels with zero counts in src_flat: \",src_flat[src_flat==0].size\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\t#print \"The bin edges: \",x # ---------------------------------------------------------------\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n\tprint \"Total sim_flat_bin : \",sim_flat_bin.sum() #-----------------------------------------\n\t#print \" Max(cumsum) : \",max(np.cumsum(sim_flat)) #-----------------------------------------\n\n # Defining model background and data\n model = sim_flat_bin #avg_flat_bin\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n err_model = sim_err_flat_bin\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\t# Plotting observed vs predicted counts------------------------------------------------------\n\n\tfig = plt.figure()\n\tplt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$={cs:0.1f}\".format(cs=chi_sq))\n\tplt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n\tplt.plot(np.arange(-1000,1000),np.arange(-1000,1000),'k',linewidth=0.5)\n\tplt.xlim(min(model_copy)-5,max(model_copy)+5)\n\tplt.ylim(min(data_copy)-5,max(data_copy)+5)\n\tplt.xlabel('Predicted Counts')\n\tplt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n\tpdf_file.savefig(fig)\n\n\t# Scaling the model using curve fit =============================================================== \n\t\n\tparam,pcov = curve_fit(fit_line_int,model_copy,data_copy)\n\tscaling = param[0]\n\tintercept = param[1]\n\t\n\t# Plotting the scaled plots ===================================================================\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\t#model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\t#err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated (scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f},offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\t\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model(scaling = {s:0.2f}, offset={o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n # Defining model background and data\n #model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n #err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated(scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f}, offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\n\t# Plotting observed vs predicted counts--------------------------------------------------------\n\n\tfig = plt.figure()\n plt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$ = {cs:0.1f}\".format(cs=chi_sq))\n plt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\t\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n #plt.plot(np.arange(-1000,1000),fit_line(np.arange(-1000,1000),scaling),'k',linewidth=0.5,label='m = {s:0.2f}'.format(s=scaling))\n\tplt.plot(np.arange(-1000,1000),fit_line_int(np.arange(-1000,1000),scaling,intercept),'k',linewidth=0.5,label='scaling = {s:0.2f}, offset = {i:0.2f}'.format(s=scaling,i=intercept))\n\tplt.plot(np.arange(min(model_copy)-5,max(model_copy)+5),np.ones(len(np.arange(min(model_copy)-5,max(model_copy)+5)))*intercept,'r-',label='intercept',linewidth=0.5)\n plt.xlim(min(model_copy)-5,max(model_copy)+5)\n plt.ylim(min(data_copy)-5,max(data_copy)+5)\n plt.xlabel('Predicted Counts')\n plt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n pdf_file.savefig(fig)\n\t\t\n\tprint \"===============================================================================================\"\n\t\n\treturn", "def plot(self):\n\t\tself.plotOfSpect()", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n # plt.show()\n return buf", "def plot_graph(self) -> None:", "def _plot_posterior_pdf(self, parameter, ax, **kwargs):\n y_label = kwargs.pop('y_label', 'Posterior pdf')\n x_label = kwargs.pop('x_label', parameter)\n\n x_min = self.distribution_parameter_support[parameter][0]\n x_max = self.distribution_parameter_support[parameter][1]\n dx = (x_max - x_min)/100\n x_vals = np.arange(dx, x_max, dx)\n\n posterior = self._posterior_marginal_scipy(parameter)\n posterior_mean = self.posterior_mean(parameter)\n\n N = sum(self.data.values())\n if N > 0:\n fill_type = 'hdcr'\n hdcr = self.posterior_high_density_credible_region\n low_p, high_p = hdcr(parameter)\n else:\n fill_type = 'ccr'\n low_p, high_p = self.posterior_central_credible_region(parameter)\n\n x_fill = np.arange(low_p, high_p, 0.01)\n\n plot_parameter_pdf(ax, posterior, posterior_mean, x_vals,\n fill=fill_type, x_fill=x_fill, confidence=0.95,\n y_label=y_label, x_label=x_label, color='b')", "def draw_perf(best_per_lr, learning_rate_updates_epoch, fignumber=0, mode=\"loss\", pdf=None):\n plt.figure(fignumber, figsize=(6,3))\n plt.clf()\n ax = plt.subplot(1,1,1)\n plot_perf(ax, best_per_lr, learning_rate_updates_epoch, mode)\n if pdf is None:\n plt.show()\n else:\n pdf.savefig()\n plt.close()", "def make_F792_plots(data_obj, title_pages=False):\n\n print(\"Generating plots...\")\n\n # Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n # Call the\n plot_front_title(data_obj)\n\n # -----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n # -----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n # -----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n # -----------------------------------------------------------------------------------#\n\n # Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Steel Differentiation\n\n\n # Plot the overall results text of the second test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the third test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 3, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the fourth test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 4, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] # Make sure the local yi is updated\n\n # Plot the overall results text of the fifth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 5, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the sixth test, Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 6, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) # Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n # --------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n # --------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs, standard=\"ASTM F792\")\n\n\n #---------------------------------------------------------#\n # Plot the cropped and rotated images from the processing #\n #---------------------------------------------------------#\n plot_images(data_obj, fs) # Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) # Add in the footnotes to the pdf\n\n # NOTE: Above image plotting the same, with the same footnotes, for F792???\n\n #-----------------------------#\n # Steel differentiation plots #\n #-----------------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Steel Differentiation\")\n\n #Call the function to plot the Steel Differentiation results to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Penetration\")\n\n # Call the function to plot the Steel Penetration results to the pdf\n #plot_steel_pen(data_obj, 2)\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 3, cmap)\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 4)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 5)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 6)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def pdf(x, point):\n raise NotImplementedError(\"The pdf method has not yet been implemented.\")", "def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0", "def plotfile(self):\r\n filename = self.locatefile()\r\n if filename == \"\":\r\n print \"\\nNo file was chosen, exiting ...\\n\"\r\n return\r\n else:\r\n print \"\\nXYZ Data file:\\n\" + filename\r\n \r\n print \"\\nReading XYZ data file....\"\r\n xyz = XYZImporter(filename)\r\n geodata = xyz.genericdata\r\n print \"FINISHED reading XYZ data file\"\r\n\r\n # Note PNG is only 8 bit, and so PDF has greater colour\r\n # depth \r\n print \"\\nAbout to render plot ...\"\r\n gp = GridPlotterCustom()\r\n gp.shownulls = False\r\n title = \"Plot of XYZ data file: \" + filename\r\n outfname = (filename.replace('.', '_') +\r\n '_PLOT_custom.pdf')\r\n gp.plotgeodata(geodata, title, outfname)\r\n print \"FINISHED rendering plot to:\\n\" + outfname\r\n print \"\\n\\n\"", "def plot(self, filename, title=None, labels=None, **kwargs):\n if self._logger:\n self._logger.info(\"starting plot of probabilities...\")\n\n t1 = datetime.now()\n\n plt.cla()\n plt.clf()\n\n axis = self._mesh.axis()\n\n if self._mesh.is_1d():\n pdf = np.zeros(self._shape, dtype=float)\n\n for i in self.data.collect():\n pdf[i[0]] = i[1]\n\n plt.plot(\n axis,\n pdf,\n color='b',\n linestyle='-',\n linewidth=1.0\n )\n\n if labels:\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n else:\n plt.xlabel('Position')\n plt.ylabel('Probability')\n\n if title:\n plt.title(title)\n elif self._mesh.is_2d():\n pdf = np.zeros(self._shape, dtype=float)\n\n for i in self.data.collect():\n pdf[i[0], i[1]] = i[2]\n\n figure = plt.figure()\n axes = figure.add_subplot(111, projection='3d')\n\n axes.plot_surface(\n axis[0],\n axis[1],\n pdf,\n rstride=1,\n cstride=1,\n cmap=plt.cm.YlGnBu_r,\n linewidth=0.1,\n antialiased=True\n )\n\n if labels:\n axes.set_xlabel(labels[0])\n axes.set_ylabel(labels[1])\n axes.set_zlabel(labels[2])\n else:\n axes.set_xlabel('Position x')\n axes.set_ylabel('Position y')\n axes.set_zlabel('Probability')\n\n if title:\n axes.set_title(title)\n axes.view_init(elev=50)\n\n # figure.set_size_inches(12.8, 12.8)\n else:\n if self._logger:\n self._logger.error(\"mesh dimension not implemented\")\n raise NotImplementedError(\"mesh dimension not implemented\")\n\n plt.savefig(filename, **kwargs)\n plt.cla()\n plt.clf()\n\n if self._logger:\n self._logger.info(\"plot in {}s\".format((datetime.now() - t1).total_seconds()))", "def make_kde_plot(x, pdf):\n\n fig = plt.figure(figsize=(768/96, 400/96), dpi=9)\n ax = plt.gca()\n ax.plot(x, pdf)\n ax.fill_between(x, pdf, alpha=.5)\n\n # Formatting\n plt.xlabel('Hourly rate ($)', fontsize=18)\n plt.xticks(fontsize=12)\n plt.ylabel('Number of tutors', fontsize=18)\n plt.yticks(fontsize=12)\n plt.title(\"Pricing distribution for similar tutors\", fontsize=24)\n plt.tight_layout()\n plt.show()\n\n # Save file to variable instead of writing to disk.\n img_io = StringIO()\n plt.savefig(img_io, dpi=96, format='png')\n img_io.seek(0)\n\n return img_io", "def make_jpdf_plot(x_data,\n y_data,\n x_label,\n y_label, \n axis=\"\", \n title=\"\"):\n \n xmin = 0.\n ymax = 0.\n ymin = 0.\n ymax = 0.\n if axis == \"\":\n xmin = x_data.min()\n xmax = x_data.max()\n ymin = y_data.min()\n ymax = y_data.max()\n axis = [xmin,xmax,ymin,ymax]\n else:\n xmin = axis[0]\n xmax = axis[1]\n ymin = axis[2]\n ymax = axis[3]\n\n # prepare data for jpdf plot\n X, Y = np.mgrid[xmin:xmax:100j,ymin:ymax:100j]\n positions = np.vstack([X.ravel(), Y.ravel()])\n values = np.vstack([x_data,y_data])\n kernel = scipy.stats.gaussian_kde(values)\n Z = np.reshape(kernel(positions).T, X.shape)\n \n \n plt.figure()\n plt.pcolor(X,Y,Z)\n plt.plot(x_data, y_data, 'k.', markersize=3)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.axis([xmin, xmax,ymin,ymax])\n if not title == \"\":\n plt.title(title)\n #plt.set_ylim([ymin, ymax])\n cb = plt.colorbar()\n cb.set_label(\"probability density\")\n plt.show()", "def plot_mpdf(self, param, margin, plot_type, Nsplit=50, **kwargs):\n \n title = self.family.capitalize() + \" Copula PDF\" \n\n # We retrieve the univariate marginal distribution from the list\n univariate1 = margin[0][\"distribution\"]\n univariate2 = margin[1][\"distribution\"]\n \n bounds = [-3, 3]\n\n U_grid, V_grid = np.meshgrid(\n np.linspace(bounds[0], bounds[1], Nsplit),\n np.linspace(bounds[0], bounds[1], Nsplit))\n \n mpdf = lambda uu, vv : self.get_pdf(\n univariate1.cdf(uu, margin[0][\"loc\"], margin[0][\"scale\"]), \\\n univariate2.cdf(vv, margin[1][\"loc\"], margin[1][\"scale\"]), param) \\\n * univariate1.pdf(uu, margin[0][\"loc\"], margin[0][\"scale\"]) \\\n * univariate2.pdf(vv, margin[1][\"loc\"], margin[1][\"scale\"])\n\n Z = np.array(\n [mpdf(uu, vv) for uu, vv in zip(np.ravel(U_grid), np.ravel(V_grid)) ] )\n Z = Z.reshape(U_grid.shape)\n\n if plot_type == \"3d\":\n plot_bivariate_3d(U_grid,V_grid,Z, bounds, title, **kwargs)\n elif plot_type == \"contour\":\n plot_bivariate_contour(U_grid,V_grid,Z, bounds, title, **kwargs)\n else:\n print(\"only \\\"contour\\\" or \\\"3d\\\" arguments supported for type\")\n raise ValueError", "def plot(self,pre_plotname,data,modeltype=None,**kwd):\n import ROOT\n from PyAnUtils.plotstyles import njStyle\n \n _style = njStyle()\n _style.cd()\n\n from PyAnUtils.pyanfunctions import ExtraOpt\n \n aux = ExtraOpt( [ ('sample',''),('xtitle','N_{t}'),\n ('ytitle','Number of Events'),('title',''),\n ('layoutlabel',''),('components',''), ('bins',''),\n ('plot_suffix','pdf')] )\n aux.setkwd(kwd)\n \n ROOT.gROOT.SetBatch(1)\n\n # format the suffix name\n try:\n plotname = pre_plotname.split('.')[:-1][0]+\".{0}\".format(aux.plot_suffix)\n except IndexError:\n plotname = pre_plotname+\".{0}\".format(aux.plot_suffix)\n \n frame = self.__getattribute__(self.__observable).frame()\n frame.SetXTitle(aux.xtitle)\n frame.SetYTitle(aux.ytitle)\n frame.SetTitle(aux.title)\n # Just if the data has to be rebinnined\n if aux.bins:\n data.plotOn(frame,ROOT.RooFit.Binning(int(aux.bins)))\n else:\n data.plotOn(frame)\n # Model to use -->\n if not modeltype:\n modeltype = self.__models.keys()[0]\n model = self.__models[modeltype][0]\n model.plotOn(frame)\n # -- The components, if any\n components = model.getComponents()\n componentsIter = components.iterator()\n modelName = model.GetName()\n for i in xrange(len(components)):\n comp = componentsIter.Next()\n compName = comp.GetName()\n if compName != modelName:\n model.plotOn(frame,ROOT.RooFit.Components(compName),\n ROOT.RooFit.LineStyle(ROOT.kDashed))\n # --- The parameters of the fit\n model.paramOn(frame,ROOT.RooFit.Layout(0.55,0.9,0.8),\n ROOT.RooFit.Label(aux.layoutlabel))\n frame.Draw()\n c = ROOT.gROOT.GetListOfCanvases()[0]\n #plotname_nosuffix = plotname.split('.')[0]\n #plotname_suffix = plotname.split('.')[-1]\n c.SetLogy(0)\n c.SaveAs(plotname)\n #c.SaveAs(aux.sample+\"_\"+self.__observable+\".pdf\")\n c.SetLogy(1)\n c.SaveAs(plotname.split('.')[0]+\"_log.\"+plotname.split(\".\")[-1])\n #c.SaveAs(aux.sample+\"_\"+self.__observable+\"_log.pdf\")", "def plot(self, *args):\n return self.vocab().plot(*args)", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def showPlot1(): \n raise NotImplementedError", "def make_four_pdf(args):\n params = make_four_params(args)\n m4_filename = params['m4_filename']\n prefix = params['prefix']\n min_matching_length = params['min_matching_length']\n output_prefix = params['output_prefix']\n\n # if there are fewer than threshold reads then skip it\n threshold = 25 # threshold before plotting.\n if len(open(m4_filename).readlines()) < threshold:\n print('skipping %s because it has %d lines' % (\n m4_filename,\n len(open(m4_filename).readlines()))\n )\n return\n\n plb.rcParams['figure.figsize'] = 30, 30\n plt.clf()\n plt.figure(1)\n\n remove_punctuation = lambda x: ''.join(e for e in x if e.isdigit() or e == '.')\n coords = [int(remove_punctuation(a)) for a in prefix.split('_')[1:3]]\n dist = coords[1] - coords[0]\n\n graph = generate_graph(params)\n preset, postset, spanset, gapset = get_read_classifications(params)\n # Draw Ground Truth\n plt.subplot(2, 3, 1)\n node_colors = node_set_colors(graph.nodes(), spanset, gapset, preset, postset)\n pos = nx.spring_layout(graph)\n\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; Ground Truth Colors\\n\\\n Red=Preset, Yellow=Postset, Blue=GapSet, Green=SpanSet\\n\\\n num_edges = {2}\\\n \"\\\n .format(prefix, min_matching_length, nx.number_of_edges(graph))\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n plt.title(title)\n\n # Draw histogram of smith waterman scores and remove bad edges\n\n # squash preset and postset nodes\n graph = nx_helpers.remove_nodes(graph, preset)\n graph = nx_helpers.remove_nodes(graph, postset)\n\n # filter nodes by smith_waterman\n with utils.Timer(\"smith_waterman_filter\"):\n flanking_reads = preset.union(postset)\n # subplots 2 and 3 occur in smith_waterman_filter\n graph = smith_waterman_filter(graph, flanking_reads, params)\n\n # Draw groudn truth with squashed nodes\n plt.subplot(2, 3, 4)\n node_colors = node_set_colors(graph.nodes(), spanset, gapset, preset, postset)\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; Ground Truth Colors \\n\\\n Removed Preset and Postsetnodes; Blue=GapSet, Green=SpanSet\\n\\\n number of edges = {2}\"\\\n .format(prefix, min_matching_length, nx.number_of_edges(graph))\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n plt.title(title)\n\n # Drop Small Communities and Draw\n plt.subplot(2, 3, 5)\n communities = nx_helpers.get_communities(graph)\n graph, communities = drop_small_communities(graph, communities)\n node_colors = node_community_colors(graph, communities)\n assert(len(node_colors) == nx.number_of_nodes(graph))\n title = \"Chr {0}; L={1}; After Removing Small Communities; NumCom={2}\\n\\\n ComQual={3}, MapQual={4}\\n\\\n number of edges = {5}\"\\\n .format(prefix, min_matching_length, len(communities),\n community_quality(communities, spanset, gapset),\n mapping_quality(graph, spanset, gapset),\n nx.number_of_edges(graph))\n #nx.draw(graph, node_color=node_colors, node_size=100, pos=pos)\n nx.draw_spring(graph, node_color=node_colors, node_size=100)\n plt.title(title)\n\n # IGV Line Plot\n plt.subplot(2, 3, 6)\n make_line_plot((spanset, gapset, preset, postset), params)\n\n plt.savefig(output_prefix + '_figs/%s-communities.pdf' % (prefix))\n\n ret_string = '%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\tchr%s_slop5000.png\\t%s-communities.pdf' % (\n prefix,\n prefix.split('_')[0],\n coords[0],coords[1],coords[1]-coords[0],\n len(communities),\n community_quality(communities, spanset, gapset),\n mapping_quality(graph, spanset, gapset),\n prefix,prefix\n )\n\n return ret_string", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def evaluate(self, plot):", "def distribution_plot(data):\r\n ready_data = sorted((data))\r\n fit = stats.norm.pdf(ready_data, np.mean(ready_data), np.std(ready_data))\r\n plt.plot(ready_data, fit, '-o')\r\n plt.ylabel(\"Prob\")\r\n plt.xlabel(\"Prices\")\r\n plt.title(\"Distribution of prices (Under 50 days) Demand Function\")\r\n plt.show()", "def generate_pdf(self):\n x = 100\n y = 100\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=\"A4\")\n p.drawString(x, y, \"TO DO\")\n p.showPage()\n p.save()\n pdf = buffer.getvalue()\n buffer.close()\n return pdf", "def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")", "def make_plot(x,y):", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def pdf(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n return self.pdf_link(link_f, y, extra_data=extra_data)", "def printPlot(self,nameHD):\n plt.title(nameHD)\n plt.xticks(range(len(self.xList)), self.xList)\n plt.plot(self.writeList,label=\"Write\")\n plt.plot(self.readList,label=\"Read\")\n plt.legend(loc=\"upper left\")\n plt.xlabel('Size (kb)')\n plt.ylabel('Time (s)')\n fname= nameHD + '.png'\n plt.savefig(fname,dpi=60)\n \n plt.close()", "def renderDocument(self, plot, filename, sizeMM=(300, 200), resolution=85,\r\n format_=None):\r\n if isinstance(sizeMM, tuple):\r\n sizeMM = QSizeF(*sizeMM)\r\n if format_ is None:\r\n ext = osp.splitext(filename)[1]\r\n if not ext:\r\n raise TypeError(\"Unable to determine target format from filename\")\r\n format_ = ext[1:]\r\n if plot is None or sizeMM.isEmpty() or resolution <= 0:\r\n return\r\n title = plot.title().text()\r\n if not title:\r\n title = \"Plot Document\"\r\n mmToInch = 1./25.4\r\n size = sizeMM * mmToInch * resolution\r\n documentRect = QRectF(0.0, 0.0, size.width(), size.height())\r\n fmt = format_.lower()\r\n if fmt in (\"pdf\", \"ps\"):\r\n printer = QPrinter()\r\n if fmt == \"pdf\":\r\n printer.setOutputFormat(QPrinter.PdfFormat)\r\n else:\r\n printer.setOutputFormat(QPrinter.PostScriptFormat)\r\n printer.setColorMode(QPrinter.Color)\r\n printer.setFullPage(True)\r\n printer.setPaperSize(sizeMM, QPrinter.Millimeter)\r\n printer.setDocName(title)\r\n printer.setOutputFileName(filename)\r\n printer.setResolution(resolution)\r\n painter = QPainter(printer)\r\n self.render(plot, painter, documentRect)\r\n painter.end()\r\n elif fmt == \"svg\":\r\n generator = QSvgGenerator()\r\n generator.setTitle(title)\r\n generator.setFileName(filename)\r\n generator.setResolution(resolution)\r\n generator.setViewBox(documentRect)\r\n painter = QPainter(generator)\r\n self.render(plot, painter, documentRect)\r\n painter.end()\r\n elif fmt in QImageWriter.supportedImageFormats():\r\n imageRect = documentRect.toRect()\r\n dotsPerMeter = int(round(resolution*mmToInch*1000.))\r\n image = QImage(imageRect.size(), QImage.Format_ARGB32)\r\n image.setDotsPerMeterX(dotsPerMeter)\r\n image.setDotsPerMeterY(dotsPerMeter)\r\n image.fill(QColor(Qt.white).rgb())\r\n painter = QPainter(image)\r\n self.render(plot, painter, imageRect)\r\n painter.end()\r\n image.save(filename, fmt)\r\n else:\r\n raise TypeError(\"Unsupported file format '%s'\" % fmt)", "def __init__(self, eulers_list):\n self.__dpi = 150\n self.__title = \"default\"\n self.__data = eulers_list\n self.__plane_list = [[0, 0, 1]]\n self.__is_literal = True # whether to use permutation to get a family of planes\n self.__lattice_vector = np.array([1.0, 1.0, 1.0]) # most simple case as default\n self.__output = \"pdf\"\n self.__clr_list = None\n self.__ref = np.eye(3) # matrix used to define xtal unit cell in reference configuration\n # set up pyplot\n self.__fig = plt.figure()\n self.__fig.add_subplot(111, aspect='equal')\n self.__fig.gca().add_artist(plt.Circle((0, 0), 1, color='k', fill=False))\n self.__unique_marker = False\n plt.plot([-1, 1], [0, 0], c=\"k\")\n plt.plot([0, 0], [-1, 1], c=\"k\")\n plt.gca().set_xlim((-1.15, 1.15))\n plt.gca().set_ylim((-1.15, 1.15))\n plt.gca().axes.get_xaxis().set_visible(False)\n plt.gca().axes.get_yaxis().set_visible(False)", "def render(results, out_file):\n fig, (ax_backlog, ax_merges, ax_open, ax_health) = plt.subplots(\n 4, sharex=True, figsize=(16, 10), dpi=100)\n\n fig.autofmt_xdate()\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))\n plt.gca().xaxis.set_major_locator(mdates.DayLocator())\n if results.dts:\n render_queue(results, ax_open)\n render_merges(results, ax_merges)\n render_backlog(results, ax_backlog)\n render_health(results, ax_health)\n fig.text(\n 0.1, 0.00,\n 'image: %s, sample: %s' % (\n datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M'),\n results.dts[-1].strftime('%Y-%m-%d %H:%M'),\n ),\n horizontalalignment='left',\n fontsize='x-small',\n color=fresh_color(results.dts[-1]),\n )\n\n plt.savefig(out_file, bbox_inches='tight', format='svg')\n plt.close()", "def plot_spatial_res(data_obj, test_num):\n\n #---------------------------------------------------------------------------------------------#\n # Go through each file [image] and plot the spatial resolution ROI images and MTF data graphs #\n #---------------------------------------------------------------------------------------------#\n for i in range(len(data_obj.files)):\n\n fig = new_pdf_page(data_obj.pdf_obj) #Create a new pdf page\n plt.axis('off')\n\n # Create and plot the (centered) title of the page - that also states the MTF20 values\n str1 = 'Test ' + str(test_num) + ': Spatial Resolution \\n' + \\\n 'MTF20x = ' + \"{0:.2f}\".format(data_obj.MTF_data.MTF20_x) + \\\n '\\n MTF20y = ' + \"{0:.2f}\".format(data_obj.MTF_data.MTF20_y)\n plt.suptitle(str1)\n\n #-----------------------------------------------#\n # Plot the main image in the middle of the page #\n #-----------------------------------------------#\n ax1 = fig.add_subplot(312)\n plt.imshow(data_obj.img_data[i].lead_foil_ROI.img) #Display the ROI image of the lead foil\n\n # Display the title of the ROI image - the orientation\n plt.title('Orientation ' + str(data_obj.img_data[i].orientation))\n plt.xticks([]) # labels\n plt.yticks([])\n ax = plt.gca()\n ax.xaxis.set_ticks_position('none') # tick markers\n ax.yaxis.set_ticks_position('none')\n\n if data_obj.img_data[i].orientation % 2 == 1:\n plt.xlabel('x axis')\n plt.ylabel('y axis')\n else:\n plt.xlabel('y axis')\n plt.ylabel('x axis')\n\n #-----------------------------#\n # Display the Horizontal plot #\n #-----------------------------#\n ax2 = fig.add_subplot(325)\n plt.ylabel('MTFx')\n plt.xlabel('Spatial frequency (cycles/mm)')\n plt.plot(data_obj.img_data[i].MTF_obj.MTF_x_f, data_obj.img_data[i].MTF_obj.MTF_x)\n text = 'MTF20x = ' + \"{0:.2f}\".format(data_obj.img_data[i].MTF_obj.MTF20_x)\n\n ax2.annotate(text,\n xy=(data_obj.img_data[i].MTF_obj.MTF20_x, 0.2),\n xytext=(np.max(data_obj.img_data[i].MTF_obj.MTF_x_f), 0.7),\n arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=4),\n horizontalalignment='right')\n\n #---------------#\n # Vertical plot #\n #---------------#\n ax3 = fig.add_subplot(326)\n plt.ylabel('MTFy')\n plt.xlabel('Spatial frequency (cycles/mm)')\n plt.plot(data_obj.img_data[i].MTF_obj.MTF_y_f, data_obj.img_data[i].MTF_obj.MTF_y)\n text = 'MTF20y = ' + \"{0:.2f}\".format(data_obj.img_data[i].MTF_obj.MTF20_y)\n\n ax3.annotate(text,\n xy=(data_obj.img_data[i].MTF_obj.MTF20_y, 0.2),\n xytext=(np.max(data_obj.img_data[i].MTF_obj.MTF_y_f), 0.7),\n arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=4),\n horizontalalignment='right')", "def plot_parcles_run(k, v):\n\n L.info(f'Received Input: {v}')\n\n L.info(f\"Plotting results from: {v['filepath']}\")\n\n plotpath = Path(__file__).parent.parent / 'plots'\n filename = v['id'] + '.png'\n plotfile = str(plotpath / filename) \n\n plotTrajectoriesFile(v['filepath'], mode='2d', show_plt=False)\n f = plt.gcf()\n f.savefig(plotfile)\n\n L.info(f'Saved plot to: {plotfile}')", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n return buf", "def three_PDF_plots(res=200,table_exts=[''],**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n GR = glo.global_results()\n\n fig, axs = plt.subplots(3, sharex='col',\\\n figsize=(8,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n # First print cell data distribution\n i = 0\n for gal_index in zip(p.gal_index):\n ax1 = axs[i]\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n df = gal_ob.cell_data.get_dataframe()\n lognH = np.log10(df.nH)\n hist = np.histogram(lognH[df.nH.values > 0],bins=200,weights=df.m[df.nH.values > 0])\n hist1 = np.asarray(hist[0]) # histogram\n hist2 = np.asarray(hist[1]) # bin edges\n hist1 = hist1*1./sum(hist1)\n ax1.plot(hist2[0:len(hist1)],hist1,drawstyle='steps',ls='-',lw=1.5,\\\n alpha=0.7,color=p.color[0],label='Original cell distribution')\n \n for table_ext,ls,color in zip(table_exts,['--',':'],p.color[1::]):\n if '_M10' in table_ext: lab = 'Mach = 10'\n if '_arepoPDF_M51' in table_ext: lab = 'AREPO parametrized PDF'\n PDF(gal_index,color=color,table_ext=table_ext,ls=ls,res=200,add=True,ax=ax1,label=lab,ow=p.ow)\n \n if i == 0: ax1.legend(loc='upper right',fontsize=12)\n if i == 2: ax1.set_xlabel(getlabel('lnH'))\n ax1.set_ylabel('Mass fraction per bin')\n\n i += 1\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'cell_data/PDFs/'): os.mkdir(p.d_plot + 'cell_data/PDFs/') \n plt.savefig(p.d_plot + 'cell_data/PDFs/simple_PDF_%s%s%s_x3.png' % (p.sim_name,p.sim_run,p.table_ext), format='png', dpi=250, facecolor='w')", "def plotDistribution(lXs, lYs, out=\"\", title=\"\", xax=\"\", yax=\"\", color=\"blue\", legend=\"\", grid=[]):\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n ax = fig.add_subplot(111)\n ax.plot(lXs,lYs, color=color)\n if legend:\n ax.legend(legend, fontsize=22)\n for line in grid:\n ax.axvline(x=line, linestyle='dashed', linewidth=1, color='black')\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def plot(self, *args, **kwargs):\n raise NotImplementedError", "def pdf(self,x):\n returnPdf = self._distribution.pdf(x)\n return returnPdf", "def plot_noise(data_obj, test_num):\n\n #=======================================================================#\n # Create and plot the page for the X values of the test (NEQ, NPS, MTF) #\n #=======================================================================#\n\n fig = new_pdf_page(data_obj.pdf_obj) #Create a new page in the pdf\n\n # Display the (centered) title of the figures / page\n plt.suptitle('Test ' + str(test_num) +\n ': Noise ($NEQ_x$ at 1 lp/mm: ' +\n \"{:,}\".format(int(data_obj.NEQ_x_1)) + ')')\n\n #--------------------------------------------------------------------------#\n # Display the equation and variables of the test to the middle of the page #\n #--------------------------------------------------------------------------#\n fs3 = 10\n plt.subplot(3, 1, 1)\n plt.axis('off')\n\n # Display the main equation for calculating the NEQ\n str1 = \"$ NEQ = \\\\frac{S_{out}^2 MTF^2}{NPS} $\"\n plt.text(0.3, 0.6, str1, ha='left', fontsize=16)\n\n # Display the values of the different variables in the NEQ equation\n str1 = \"$NPS_x$ (at 1 lp/mm) = \" + \"{0:.1f}\".format((data_obj.NPS_x_1))\n plt.text(0.3, 0.4, str1, ha='left', fontsize=fs3)\n\n str1 = \"$MTF_x$ (at 1 lp/mm) = \" + \"{0:.3f}\".format((data_obj.MTF_x_1))\n plt.text(0.3, 0.3, str1, ha='left', fontsize=fs3)\n\n str1 = \"$S_{out}$ (at 1 lp/mm) = \" + \"{:,}\".format(int(data_obj.S_out))\n plt.text(0.3, 0.2, str1, ha='left', fontsize=fs3)\n\n str1 = \"$NEQ_x$ (at 1 lp/mm) = \" + \"{:,}\".format(int(data_obj.NEQ_x_1))\n str1 = str1 + ' $\\pm$ ' + \"{:,}\".format(int(data_obj.NEQ_x_1_err))\n plt.text(0.3, 0.1, str1, ha='left', fontsize=fs3)\n\n\n #------------------------------------------------#\n # Display the graphs for NEQ_x, MTF_x, and NPS_x #\n #------------------------------------------------#\n\n plt.subplot(3, 2, 3)\n plt.ylabel('$MTF_x$')\n plt.xlabel('Line-pairs per mm')\n plt.plot(data_obj.MTF_data.MTF_x_f, data_obj.MTF_data.MTF_x)\n\n plt.subplot(3, 2, 4)\n plt.ylabel('$\\\\sqrt{NPS_x}$')\n plt.xlabel('Line-pairs per mm')\n plt.plot(data_obj.MTF_data.MTF_x_f[1:], np.sqrt(data_obj.NPS_x_interp[1:]))\n\n plt.subplot(3, 1, 3)\n plt.ylabel('$\\\\sqrt{NEQ_x}$')\n plt.xlabel('Line-pairs per mm')\n plt.plot(data_obj.MTF_data.MTF_x_f, np.sqrt(data_obj.NEQ_x))\n\n ind = int(1 / 0.02)\n plt.plot(data_obj.MTF_data.MTF_x_f[ind], np.sqrt(data_obj.NEQ_x[ind]), 'ro')\n plt.tight_layout()\n\n\n ##=======================================================================#\n # Create and plot the page for the X values of the test (NEQ, NPS, MTF) #\n #=======================================================================#\n\n fig = new_pdf_page(data_obj.pdf_obj) #Create a new page in the pdf\n\n # Display the (centered) title of the figures / page\n plt.suptitle('Test ' + str(test_num) +\n ': Noise ($NEQ_y$ at 1 lp/mm: ' +\n \"{:,}\".format(int(data_obj.NEQ_y_1)) + ')')\n\n # --------------------------------------------------------------------------#\n # Display the equation and variables of the test to the middle of the page #\n # --------------------------------------------------------------------------#\n fs3 = 10\n plt.subplot(3, 1, 1)\n plt.axis('off')\n\n # Display the main equation for calculating the NEQ\n str1 = \"$ NEQ = \\\\frac{S_{out}^2 MTF^2}{NPS} $\"\n plt.text(0.3, 0.6, str1, ha='left', fontsize=16)\n\n # Display the values of the different variables in the NEQ equation\n str1 = \"$NPS_y$ (at 1 lp/mm) = \" + \"{0:.1f}\".format((data_obj.NPS_y_1))\n plt.text(0.3, 0.4, str1, ha='left', fontsize=fs3)\n\n str1 = \"$MTF_y$ (at 1 lp/mm) = \" + \"{0:.3f}\".format((data_obj.MTF_y_1))\n plt.text(0.3, 0.3, str1, ha='left', fontsize=fs3)\n\n str1 = \"$S_{out}$ (at 1 lp/mm) = \" + \"{:,}\".format(int(data_obj.S_out))\n plt.text(0.3, 0.2, str1, ha='left', fontsize=fs3)\n\n str1 = \"$NEQ_y$ (at 1 lp/mm) = \" + \"{:,}\".format(int(data_obj.NEQ_y_1))\n str1 = str1 + ' $\\pm$ ' + \"{:,}\".format(int(data_obj.NEQ_y_1_err))\n plt.text(0.3, 0.1, str1, ha='left', fontsize=fs3)\n\n #------------------------------------------------#\n # Display the graphs for NEQ_y, MTF_y, and NPS_y #\n #------------------------------------------------#\n\n plt.subplot(3, 2, 3)\n plt.ylabel('$MTF_y$')\n plt.xlabel('Line-pairs per mm')\n plt.plot(data_obj.MTF_data.MTF_y_f, data_obj.MTF_data.MTF_y)\n\n plt.subplot(3, 2, 4)\n plt.ylabel('$\\\\sqrt{NPS_y}$')\n plt.xlabel('Line-pairs per mm')\n plt.plot(data_obj.MTF_data.MTF_y_f[1:], np.sqrt(data_obj.NPS_y_interp[1:]))\n\n plt.subplot(3, 1, 3)\n plt.ylabel('$\\\\sqrt{NEQ_y}$')\n plt.xlabel('Line-pairs per mm')\n plt.plot(data_obj.MTF_data.MTF_y_f, np.sqrt(data_obj.NEQ_y))\n\n ind = int(1 / 0.02)\n plt.plot(data_obj.MTF_data.MTF_y_f[ind], np.sqrt(data_obj.NEQ_y[ind]), 'ro')\n plt.tight_layout()", "def plot(self, corner = True):\n pos = self.posterior_samples\n if self.verbose>=3 and self.NS.prior_sampling is False:\n pri = self.prior_samples\n mc = self.mcmc_samples\n elif self.verbose>=3 or self.NS.prior_sampling is True:\n pri = self.prior_samples\n mc = None\n else:\n pri = None\n mc = None\n from . import plot\n if self.NS.prior_sampling is False:\n for n in pos.dtype.names:\n plot.plot_hist(pos[n].ravel(), name = n,\n prior_samples = self.prior_samples[n].ravel() if pri is not None else None,\n mcmc_samples = self.mcmc_samples[n].ravel() if mc is not None else None,\n filename = os.path.join(self.output,'posterior_{0}.pdf'.format(n)))\n for n in self.nested_samples.dtype.names:\n plot.plot_chain(self.nested_samples[n],name=n,filename=os.path.join(self.output,'nschain_{0}.pdf'.format(n)))\n if self.NS.prior_sampling is False:\n import numpy as np\n plotting_posteriors = np.squeeze(pos.view((pos.dtype[0], len(pos.dtype.names))))\n if pri is not None:\n plotting_priors = np.squeeze(pri.view((pri.dtype[0], len(pri.dtype.names))))\n else:\n plotting_priors = None\n\n if mc is not None:\n plotting_mcmc = np.squeeze(mc.view((mc.dtype[0], len(mc.dtype.names))))\n else:\n plotting_mcmc = None\n\n if corner:\n plot.plot_corner(plotting_posteriors,\n ps=plotting_priors,\n ms=plotting_mcmc,\n labels=pos.dtype.names,\n filename=os.path.join(self.output,'corner.pdf'))\n plot.plot_indices(self.NS.insertion_indices, filename=os.path.join(self.output, 'insertion_indices.pdf'))", "def test_draw():\n circ_m = test_QFTn(3)\n print(launch(1024, circ_m))\n fig = circ_m.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/test.png')\n return fig", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def publish_pdf(\n self,\n filename: str | io.BytesIO,\n notes: str = None,\n open_file: bool = False,\n metadata: dict = None,\n bins: int = 10,\n logo: Path | str | None = None,\n ) -> None:\n plt.ioff()\n canvas = pdf.PylinacCanvas(\n filename, page_title=\"Picket Fence Analysis\", metadata=metadata, logo=logo\n )\n data = io.BytesIO()\n self.save_analyzed_image(data, leaf_error_subplot=True)\n canvas.add_image(data, location=(3, 5), dimensions=(15, 15))\n canvas.add_text(\n text=self.results(as_list=True), location=(1.5, 22), font_size=14\n )\n if notes is not None:\n canvas.add_text(text=\"Notes:\", location=(1, 5.5), font_size=14)\n canvas.add_text(text=notes, location=(1, 5))\n\n canvas.add_new_page()\n hist = io.BytesIO()\n self.save_histogram(hist, bins)\n canvas.add_image(hist, location=(3, 8), dimensions=(15, 15))\n canvas.finish()\n\n if open_file:\n webbrowser.open(filename)", "def make_N4255_plots(data_obj, aspect_corr=1.0, title_pages=False):\n\n print(\"Generating plots...\")\n\n #Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n #Call the function to create the title page of the pdf document\n plot_front_title(data_obj)\n\n #-----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n #-----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n #-----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n #-----------------------------------------------------------------------------------#\n\n #Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 1, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the first test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the third test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 3, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] #Make sure the local yi is updated\n\n #Plot the overall results text of the fourth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 4, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the fifth test, NEQ Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 5, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) #Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n #Plot the overall results text of the sixth test, Flatness of field\n yi = yi - dy[0]\n plot_ff_text(data_obj, 6, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the seventh test, Image Extent\n yi = yi - dy[0]\n plot_extent_text(data_obj, 7, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the eighth test, Image Area\n yi = yi - dy[0]\n plot_area_text(data_obj, 8, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the ninth test, Aspect Ratio\n yi = yi - dy[0]\n plot_a_ratio_text(data_obj, 9, yi, xpos, ha, va, fs, dfs)\n\n #--------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n #--------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs)\n\n\n #-----------------#\n # Plot the images #\n #-----------------#\n plot_images(data_obj, fs) #Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) #Add in the footnotes to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Penetration\")\n\n #Call the function to plot the Steel Penetration results to the pdf\n plot_steel_pen_N4255(data_obj, 1)\n\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 2, cmap)\n\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 3)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 4)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 5)\n\n #-------------------#\n # Flatness of field #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Flatness of Field\")\n\n # Call the function to plot the Flatness of Field results to the pdf\n plot_field_flatness(data_obj, 6)\n\n #--------------#\n # Image extent #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 7: Image Extent\")\n\n # Call the function to plot the Image Extent results to the pdf\n plot_image_extent(data_obj, 7)\n\n\n #------------#\n # Image Area #\n #------------#\n if title_pages:\n fig = new_pdf_page(data_obj.pdf_obj)\n plt.axis('off')\n plt.text(0.5, 0.5, 'Test 8: Image Area', ha='center', va='center', fontsize=20)\n str1 = str(data_obj.image_area[0]) + ' by ' + str(data_obj.image_area[1]) + ' pixels'\n plt.text(0.5, 0.4, str1, ha='center', va='center', fontsize=12)\n\n #--------------#\n # Aspect Ratio #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 9: Aspect Ratio\")\n\n #Call the function to plot the Aspect Ratio results to the pdf\n plot_aspect_ratio(data_obj, 9, cmap, aspect_corr)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def plot_ppplot(obj1,sheet1,variable1,obj2,sheet2,variable2,title,opath):\n p1 = np.percentile(obj1.me[sheet1][variable1],range(0,101,1))\n p2 = np.percentile(obj2.me[sheet2][variable2],range(0,101,1))\n p1c = np.cumsum(np.array(p1))/np.cumsum(np.array(p1)).max()\n p2c = np.cumsum(np.array(p2))/np.cumsum(np.array(p2)).max()\n fig = plt.figure(figsize=(8,8),dpi=120)\n plt.scatter(p1c,p2c,color='#566c73',s=30)\n plt.plot([0,1],[0,1],color='red',alpha=0.3)\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.grid()\n plt.xlabel(sheet1+'_'+variable1)\n plt.ylabel(sheet2+'_'+variable2)\n plt.title(title)\n plt.savefig(opath+'.png')\n plt.close()", "def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')", "def show(self):\n plt.show()", "def Diagnostic_plot2(self):\n\n probs = pd.read_csv(self.probfile)\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'$P_{\\rm det}$')\n plt.scatter(probs['f0'], probs['Pdet_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['Pdet_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['Pdet_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_Pdet' + self.ds.epic + '.pdf')\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'SNR')\n plt.scatter(probs['f0'], probs['SNR_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['SNR_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['SNR_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n #plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_SNR' + self.ds.epic + '.pdf')", "def plot_work_trajectories(pdf, work, title=\"\"):\n plt.figure(figsize=(12, 8))\n\n nrows = 2\n ncols = 6\n workcols = 2\n for (row, direction) in enumerate(['delete', 'insert']):\n #\n # Plot work vs step\n #\n\n col = 0\n plt.subplot2grid((nrows,ncols), (row, col), colspan=(ncols-workcols))\n\n # Plot average work distribution in think solid line\n plt.plot(work[direction].mean(0), 'k-', linewidth=1.0, alpha=1.0)\n # Plot bundle of work trajectories in transparent lines\n plt.plot(work[direction].T, 'k-', linewidth=0.5, alpha=0.3)\n # Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)\n workvals = np.ravel(np.abs(work[direction]))\n worklim = np.percentile(workvals, 98)\n nsteps = work[direction].shape[1]\n plt.axis([0, nsteps, -worklim, +worklim])\n # Label plot\n if row == 1: plt.xlabel('steps')\n plt.ylabel('work / kT')\n plt.title(\"%s NCMC in environment '%s' : %s\" % (title, envname, direction))\n plt.legend(['average work', 'NCMC attempts'])\n\n #\n # Plot work histogram\n #\n\n col = ncols - workcols\n plt.subplot2grid((nrows,ncols), (row, col), colspan=workcols)\n\n # Plot average work distribution in think solid line\n #nbins = 40\n workvals = work[direction][:-1,-1]\n #plt.hist(workvals, nbins)\n if workvals.std() != 0.0:\n sns.distplot(workvals, rug=True)\n else:\n print('workvals has stddev of zero')\n print(workvals)\n # Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)\n #worklim = np.percentile(workvals, 98)\n #oldaxis = plt.axis()\n #plt.axis([-worklim, +worklim, 0, oldaxis[3]])\n # Label plot\n if row == 1: plt.xlabel('work / kT')\n plt.title(\"total %s work\" % direction)\n\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()", "def _draw_plot(self, *args, **kw):\n # Simple compatibility with new-style rendering loop\n return self._draw_component(*args, **kw)", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()", "def new_title_page(data_obj, text):\n\n new_pdf_page(data_obj.pdf_obj)\n plt.axis('off')\n plt.text(0.5, 0.5, text, ha='center', va='center', fontsize=20)", "def test_make_plot_ui(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='ui')\n except Exception as e:\n raise\n plt.close('all')", "def plot(self,displayplt = True,saveplt = False,savepath=''):\n figure1 = plt.figure()\n axa = figure1.add_subplot(2, 1, 1)\n sensitivity =hyd_calibration_multiple_freq(self.cfreq)\n pnp = -1e-6 * np.min(self.hydoutput, axis=1) / sensitivity\n figure2 = axa.plot(self.cfreq, pnp, 'x')\n axa.set_title('Frequency Sweep')\n plt.xlabel('Frequency (MHz)')\n plt.ylabel('Peak Negative Pressure (MPa)')\n axb = figure1.add_subplot(2, 1, 2)\n mi_fs = pnp / np.sqrt(self.cfreq)\n figure4 = axb.plot(self.cfreq, mi_fs, 'x')\n plt.xlabel('Frequency (MHz)')\n plt.ylabel('MI')\n if displayplt:\n plt.show()\n if saveplt:\n if savepath == '':\n # prompt for a save path using a default filename\n defaultfn = self.txdr + '_' + self.collectiondate + '_' + self.collectiontime + '_freqsweep.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize = 14)\n plt.ylabel(\"TPR\", fontsize = 14)\n plt.title(\"ROC Curve\", fontsize = 14)\n plot = plt.plot(fpr, tpr, linewidth = 2)\n buf = io.BytesIO()\n plt.savefig(buf, format = 'jpeg')\n buf.seek(0)\n plt.close()\n\n return buf", "def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize = 14)\n plt.ylabel(\"TPR\", fontsize = 14)\n plt.title(\"ROC Curve\", fontsize = 14)\n plot = plt.plot(fpr, tpr, linewidth = 2)\n buf = io.BytesIO()\n plt.savefig(buf, format = 'jpeg')\n buf.seek(0)\n plt.close()\n\n return buf", "def _plot_marginal_pdfs( res, nbins=101, **kwargs):\n\tfrom matplotlib import pyplot as pl\n\timport numpy as np\n\n\tnparam = len(res.vparam_names)\n\t# nrow = np.sqrt( nparam )\n\t# ncol = nparam / nrow + 1\n\tnrow, ncol = 1, nparam\n\n\tpdfdict = _get_marginal_pdfs( res, nbins )\n\n\tfig = plt.gcf()\n\tfor parname in res.vparam_names :\n\t\tiax = res.vparam_names.index( parname )+1\n\t\tax = fig.add_subplot( nrow, ncol, iax )\n\n\t\tparval, pdf, mean, std = pdfdict[parname]\n\t\tax.plot( parval, pdf, **kwargs )\n\t\tif np.abs(std)>=0.1:\n\t\t\tax.text( 0.95, 0.95, '%s %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.01:\n\t\t\tax.text( 0.95, 0.95, '%s %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.001:\n\t\t\tax.text( 0.95, 0.95, '%s %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telse :\n\t\t\tax.text( 0.95, 0.95, '%s %.3e +- %.3e'%( parname, mean, std),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\n\tplt.draw()" ]
[ "0.70713794", "0.69506466", "0.65899605", "0.6551687", "0.65347886", "0.6530138", "0.65197337", "0.6519003", "0.6465785", "0.6432504", "0.63811475", "0.6373067", "0.6363945", "0.63360846", "0.6330689", "0.63069654", "0.62943375", "0.62903255", "0.6219337", "0.62005454", "0.61842597", "0.61550355", "0.61494744", "0.61490554", "0.6143087", "0.6140236", "0.6132672", "0.61034155", "0.6078749", "0.60765165", "0.60514706", "0.6048969", "0.604609", "0.60321605", "0.6022103", "0.6018915", "0.6018603", "0.5995694", "0.598629", "0.5963297", "0.5963297", "0.5963297", "0.5942147", "0.59411174", "0.5936349", "0.5936324", "0.5931653", "0.5927641", "0.5912776", "0.5906116", "0.58943385", "0.5888619", "0.5886485", "0.5878481", "0.5876919", "0.5875737", "0.58642375", "0.5863972", "0.5862639", "0.585973", "0.5859026", "0.5857617", "0.5856346", "0.58549607", "0.58473116", "0.58332086", "0.58324957", "0.5820695", "0.5814791", "0.580684", "0.58067745", "0.5805993", "0.5803762", "0.57902783", "0.57888234", "0.57854205", "0.57813853", "0.57799685", "0.5777679", "0.5775771", "0.57756436", "0.5773705", "0.57699585", "0.57633823", "0.575726", "0.5756901", "0.5755768", "0.575114", "0.5749293", "0.5747637", "0.57417774", "0.57404613", "0.5740048", "0.5738318", "0.5735911", "0.57163393", "0.57087845", "0.57036525", "0.57036525", "0.5702013" ]
0.591895
48
Delete matches category and all its channels.
async def delete_matches_category(self): existing_categories = self.get_channels( 'matches', ChannelType.category) for c in existing_categories: try: await asyncio.gather(*(chan.delete() for chan in c.channels)) await c.delete() # We can't delete channels not created by us. except discord.HTTPException as e: log.warning(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup(self, channel=None):\n # falls `channel` angegeben wurden, werden nur diese bereinigt,\n # ansonsten wird alles bereinigt\n if channel:\n # ueberpruefe, ob `channel` eine Zahl ist und erstelle eventuell eine Liste nur mit dieser Zahl\n # dies ist wichtig, weil eine For-Schleife nicht ueber eine Zahl,\n # sondern in meinem Fall nur ueber eine Liste, iterieren kann\n if type(channel) == int:\n channel = [channel]\n for c in channel:\n # loesche den channel `c` aus dem dictionary `self.channels`\n del self.channels[c]\n print(f\"cleanup von channel {c}\")\n else:\n print(\"cleanup\")\n self.channels = {}", "async def channeldelete(ctx):\r\n await ctx.send(\"🉐Deleting all channels...\")\r\n for channel in ctx.guild.channels:\r\n try:\r\n await channel.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]CHANNEL => {Fore.RESET}Failed to delete: {channel}\")", "def prune(cls):\n keep_ids = cls.objects.distinct(\"channel_id\", \"action\").order_by(\"channel_id\", \"action\", \"-performed\").values_list(\"id\", flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()", "async def deleteCategory(self, ctx, reason=\"No reason available\"):\n for category in ctx.guild.categories:\n if category.name == self.categoryName:\n try:\n for chan in category.channels:\n await chan.delete()\n await category.delete(reason=reason)\n except discord.errors.Forbidden:\n self.msgToDelete.append(await ctx.message.channel.send(\n \"Erreur, permission non accordée, la suppression des catégories n'est pas complète.\"))\n print(\"Deleted all category.\")", "def delete_slack_generated(self, channel_name=None, channel_id=None):\n self.delete_messages(\n channel_name=channel_name, channel_id=channel_id, confirmation_override=True,\n restrict={'type': 'subtype', 'values': cfg.SUBTYPES}\n )", "def clear_all():\n viewer = connect_to_ginga()\n shell = viewer.shell()\n chnames = shell.get_channel_names()\n for ch in chnames:\n shell.delete_channel(ch)", "async def handleChannelDelete(self, channel: discord.abc.GuildChannel):\n self.logger.info(\n \"Channel deletion has been detected. Name: %s, ID: %s\", channel.name, channel.id\n )\n\n if not isinstance(channel, discord.TextChannel):\n return\n\n async with self.config.guild(channel.guild).get_attr(KEY_CHANNEL_IDS)() as channelIds:\n if str(channel.id) in channelIds:\n self.logger.info(\"%s detected, removing exceptions\", AH_CHANNEL)\n ctx = await self.getContext(channel)\n if not ctx:\n return\n await self.notifyChannel(ctx, remove=True)\n await self.makeHighlightChanges(ctx, channel, remove=True)\n await self.makeStarboardChanges(ctx, channel, remove=True)\n await self.makeWordFilterChanges(ctx, channel, remove=True)\n del channelIds[str(channel.id)]", "def delete_matches(self, pattern):\n with self.connect() as c:\n cur = c.cursor()\n cur.execute(self.create_query(\"DELETE\", pattern))", "async def remove_bot_channels(self, guild_id):\n api_cog = self.bot.get_cog('RR_API')\n channelInfo = await api_cog.get_channel_info(guild_id)\n\n if not channelInfo:\n print(\"Server Name Not in DB, Can't delete channels. Server: \" + str(guild_id))\n return\n if channelInfo['futurechannelid']:\n await self.bot.get_channel(int(channelInfo['futurechannelid'])).delete()\n if channelInfo['pastchannelid']:\n await self.bot.get_channel(int(channelInfo['pastchannelid'])).delete()\n if channelInfo['lootchannelid']:\n await self.bot.get_channel(int(channelInfo['lootchannelid'])).delete()\n if channelInfo['commandschannelid']:\n await self.bot.get_channel(int(channelInfo['commandschannelid'])).delete()\n if channelInfo['categoryid']:\n await self.bot.get_channel(int(channelInfo['categoryid'])).delete()", "async def fulldelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(\r\n f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete role: {role}\"\r\n )\r\n for channel in ctx.guild.channels:\r\n try:\r\n await channel.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]CHANNEL => {Fore.RESET}Failed to delete: {channel}\")", "def delete_channel(channel_id: NewsChannelID) -> None:\n db.session.execute(\n delete(DbNewsChannel).where(DbNewsChannel.id == channel_id)\n )\n db.session.commit()", "def delete_cube(channel_id, database_connection):\n conn, cur = connect_to_database(database_connection)\n cur.execute(\"DELETE FROM cubes WHERE channel_id = %s\", [channel_id])\n commit_and_close_database(conn, cur)", "def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()", "def deleteMatches():\n DB = dbc()\n DB.cursor().execute('DELETE FROM matches')\n DB.commit()\n DB.close()", "def remove_servers_channels(self):\n for _hash in self._sections.keys():\n if not re.match(ur'^ server ', _hash) and not re.match(ur'^ channel ', _hash):\n continue\n del self._sections[_hash]", "def delete_category(self, category: str) -> None:\n for letter in self.data:\n if category in self.data[letter]:\n self.data[letter].pop(category)\n print(f'Categoria: {category} apagada do dicionário.')\n self.save()\n self.beautify_json()", "def remove_categories(self, scheme):\n for category in list(self.get_categories(scheme)):\n self.category.remove(category)", "def delete_connections(self, **kwargs):\n rv = False\n for c in self.find_connections(**kwargs):\n self.delete(c)\n rv = True\n return rv", "def deleteMatches():\n conn, c = connect()\n c.execute(\"DELETE FROM matches;\")\n conn.commit()\n conn.close()", "async def votechannel_remove(self, ctx, *, channel: discord.TextChannel):\n await self.bot.db.execute(\n \"DELETE FROM voting_channel WHERE guild_id = %s and channel_id = %s\",\n ctx.guild.id,\n channel.id,\n )\n self.bot.cache.votechannels.discard(channel.id)\n await util.send_success(ctx, f\"{channel.mention} is no longer a voting channel.\")", "def deleteMatches():\n conn, c = connect()\n\n q = \"DELETE FROM MATCHES;\"\n c.execute(q)\n c.close()\n conn.commit()\n conn.close()", "def deleteMatches():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM matches\")\n DB.commit()\n DB.close()", "def deleteMatches():\n c.execute(\"DELETE FROM matchup\");\n print \"All matches have been successfully deleted\"\n return", "def test_remove_channel_removes_channel(self):\n channel = Mock()\n with mock.patch.object(self.notifier, \"_silenced_channels\") as silenced_channels:\n self.notifier.remove_channel(channel)\n silenced_channels.__delitem__.assert_called_with(channel)", "async def delete_category(self, category):\n stmt = delete(self.model)\n\n if self.category_field:\n stmt = stmt.where(getattr(self.model, self.category_field) == category)\n\n self.session.execute(stmt)", "def deleteMatches():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"DELETE FROM match\")\n dbConn.commit()\n dbConn.close()", "def test_category_delete_with_forum(forum):\n forum.category.delete()\n\n assert forum is not None\n assert forum.category is not None\n\n category = Category.query.filter_by(id=forum.category.id).first()\n forum = Forum.query.filter_by(id=forum.id).first()\n\n assert forum is None\n assert category is None", "def remove_channel(self, channel):\n to_exec = \"DELETE FROM channel WHERE channel_id = %s\"\n self.__cursor.execute(to_exec, (str(channel.id),))\n self.__connection.commit()", "def destroy(self, request, *args, **kwargs):\n response = super(CategoryViewSet, self).destroy(request, *args, **kwargs)\n response.data = {'message': 'Categoria ha sido eliminada'}\n return response", "def delete_matches():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM matches\")\n DB.commit()\n DB.close()", "def deleteMatches():\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes delete query to delete all records in MATCH table\n c.execute(\"DELETE FROM MATCH;\")\n # commits the changes perform on MATCH table after delete statement executes\n conn.commit()\n # closes the connection to tournament database\n conn.close()", "def remove(self, irc, msg, args, channel):\n res = self._checkDBhasChannel(channel)\n if res is True:\n SQL = 'DELETE FROM registry WHERE channel = ?'\n SQLargs = (channel,)\n self._SQLexec(SQL, SQLargs)\n self.lock.acquire()\n for x in range(0, len(self.channelscontrol)):\n v0 = str(self.channelscontrol[x][0])\n if v0 == channel:\n self.channelscontrol.pop(x)\n break\n self.lock.release()\n irc.reply(\"Channel removed from DB.\", prefixNick=True)\n else:\n irc.reply(\"Channel does not exist in DB.\", prefixNick=True)", "def deleteMatches():\n db = connect()\n db_cursor = db.cursor()\n query = \"DELETE FROM matches\"\n db_cursor.execute(query)\n db.commit()\n db.close()", "def deleteChannel(channelName):\n renameTo = ''.join([char if char.isnumeric() else '_' for char in str(datetime.utcnow())])\n renameChannel(channelName, renameTo)\n\n post(f\"https://slack.com/api/conversations.archive?{parse.urlencode({'channel' : channelNameToID(renameTo)})}\", headers=slackHeader(current_user.slackUserToken))\n return \"Zulip deleted a Slack channel\"", "def deleteMatches():\n conn, cur = connect()\n cur.execute(\"DELETE FROM MATCHES;\")\n conn.commit()\n conn.close()", "def deleteMatches():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"delete from matches;\")\n db_conn.commit()\n db_conn.close()", "async def remove(self, ctx, channel : discord.Channel):\r\n \r\n server = ctx.message.server\r\n if server.id not in self.set:\r\n await self.bot.say(\":x: Uninitialized server!\")\r\n return\r\n if channel.id not in self.set[server.id][\"channels\"]:\r\n await self.bot.say(\":x: This is not a counting channel!\")\r\n return\r\n del self.set[server.id][\"channels\"][channel.id]\r\n self.save()\r\n await self.bot.edit_channel(channel,topic = None)\r\n await self.bot.say(\"Channel removed!\")", "def deleteMatches():\n conn = connect()\n c = conn.cursor()\n # Clears the \"matches\" table, but does not get rid of the table.\n c.execute(\"delete from matches;\")\n conn.commit()\n conn.close()", "async def delete_bot_msg(self, channel):\n await channel.purge(limit=100, check=self.is_me)", "def delete_category(self, category):\n # Remove mapping of metrics-to-category\n category_key = self._category_key(category)\n self.r.delete(category_key)\n\n # Remove category from Set\n self.r.srem(self._categories_key, category)", "def delete(self, id_):\n \n db.categories.remove({'_id': ObjectId(id_)})\n return '', 204", "def deleteMatches(cur=None, conn=None):\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"DELETE FROM matches;\"\"\")\n cur.execute(\"\"\"DELETE FROM byes;\"\"\")\n conn.commit()", "def unlink(self, cr, uid, ids, context=None):\n if len(self.pool.get('payment.enrich').search(cr, uid,[('enrich_category','in',ids),('state','!=','draft')], context=context)) > 0:\n raise osv.except_osv(_('Invalid Action Error'), _('Can not delete category(categories), Where there are some enrich with this category'))\n return super(enrich_category, self).unlink(cr, uid, ids, context=context)", "def deleteMatches():\n db, cursor = connect()\n cursor.execute(\"DELETE FROM matches\")\n db.commit()\n db.close()", "def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())", "def delete(self, **kwargs):\n\n response = self._requester.request(\n \"DELETE\",\n \"users/{}/communication_channels/{}\".format(self.user_id, self.id),\n _kwargs=combine_kwargs(**kwargs),\n )\n\n return response.json().get(\"workflow_state\") == \"deleted\"", "def deleteMatches():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n dbcursor.execute(\"DELETE FROM matches\")\n dbconnection.commit()\n dbconnection.close()", "def remove_channels(self, *channels):\n channels = set(c.id for c in channels)\n conf_to_remove = set()\n\n # Check every FollowConfig\n for chan_conf in self.follows:\n if set(c.id for c in chan_conf.discord_channels) & channels:\n # Remove the given channels from this FollowConfig\n dchans_to_remove = set(c for c in chan_conf.discord_channels if c.id in channels)\n chan_conf.discord_channels = [c for c in chan_conf.discord_channels if c not in dchans_to_remove]\n\n # If this FollowConfig ended up with 0 channel, save it to remove it later\n if not chan_conf.discord_channels:\n conf_to_remove.add(chan_conf)\n\n if conf_to_remove:\n self.follows = [c for c in self.follows if c not in conf_to_remove]", "def test_delete_category(self):\n pass", "def deleteMatches():\n\n query = (\"DELETE FROM matches;\")\n results = executeQuery({'dbname': 'tournament', 'query' : query, 'type' : 'delete'})", "def deleteMatches():\n cur3 = conn.cursor()\n query = \"DELETE from MATCHES;\"\n cur3.execute(query)\n cur3.execute(\"commit;\")\n print \"\\t\\t\\tMatches Table DELETED\\n\"", "def deleteMatches():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM results\")\n conn.commit()\n conn.close()", "async def vote_clear(ctx: commands.Context):\n session = session_maker()\n old_channel = session.query(Channel).filter_by(channel_id=ctx.channel.id).one_or_none()\n if old_channel is None:\n await ctx.send('This channel was never setup for votes.')\n return\n old_votes = session.query(Vote).filter_by(channel_id=ctx.channel.id).all()\n for old_vote in old_votes:\n session.delete(old_vote)\n session.commit()\n await ctx.send(f'Votes for {ctx.channel} cleared!')", "def deleteMatches():\n db = connect()\n c = db.cursor()\n query = (\"DELETE FROM results;\")\n c.execute(query)\n db.commit()\n db.close()", "def reset(self):\n for item in TextChannelFilterItem.objects(channel_filter=self):\n item.delete()\n self.reset_counters()\n self.retrain()", "def delete_at_index(self, idx):\n del self.timeseries[idx]\n del self.freq[idx]\n del self.ch_name[idx]\n del self.units[idx]\n\n if self.trigger_idx == idx:\n LGR.warning(\"Removing trigger channel - are you sure you are doing\" \"the right thing?\")\n self.trigger_idx = 0", "def post_filter_channels(self, channels=None): # pragma: no cover\n # Remove the DC component...\n # level_data_for(channels)\n pass", "def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass", "def delete(self):\n key = self.request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n self.request, 'delete-category', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n errors = []\n course_category.CourseCategoryDAO.delete_category(key, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Deleted.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "async def delete_category(self, category: str) -> dict:\n async with self.lock:\n await self.storage.delete_category(category)\n return await self.cache.delete_category(category)", "async def _delete_log(self, ctx):\n try:\n config = self.bot.db['questions'][str(ctx.guild.id)][str(ctx.channel.id)]\n except KeyError:\n return\n\n log_channel = ctx.guild.get_channel(config['log_channel'])\n if not log_channel:\n await hf.safe_send(ctx, \"The original log channel was not found. Please run `;q setup`.\")\n return\n try:\n last_message = None\n async for msg in log_channel.history(limit=5).filter(lambda m: m.author == m.guild.me and m.embeds):\n last_message = msg\n break\n if last_message.embeds[0].title.startswith('⁣List⁣'):\n try:\n await last_message.delete() # replace the last message in the channel (it should be a log)\n except discord.NotFound:\n pass\n except (TypeError, AttributeError, discord.Forbidden):\n return", "async def logremove(self, ctx):\n if await check_if_logged(channel_id=ctx.channel.id):\n c.execute(\"DELETE FROM logging.channels WHERE channelid = %s\", (ctx.channel.id,))\n DBconn.commit()\n await ctx.send(\"> **This channel is no longer being logged.**\")\n else:\n await ctx.send(f\"> **This channel is not being logged.**\")", "def test_delete_conversation(self):\n conv = G(Conversation, type=CONVERSATION_TYPE_CHAT, creator=self.user1,\n users=[self.user1])\n self.login(self.user1)\n resp = self.client.delete(self.get_url(conv.pk))\n self.assert204(resp)", "def unsubscribe(self, channel, update_handler=None):\n pass", "def collector_remove(self, msg, args):\n client = self._connect()\n collector_name = args.pop(0)\n collector = sumologic.Collectors(client)\n collector.delete(collector_name)\n message = 'collector {0} deleted.'.format(collector_name)\n self.send(msg.frm,\n message,\n message_type=msg.type,\n in_reply_to=msg,\n groupchat_nick_reply=True)", "def del_category(self, category):\n if category in self.categories:\n self.categories.remove(category)", "def delete(self, session: Session) -> None:\n session.query(Match).filter_by(id=self.id).delete()", "async def remove(self, ctx, *, channel: discord.VoiceChannel):\n auto_channels = await self.config.guild(ctx.guild).auto_channels()\n auto_channels.remove(channel.id)\n await self.config.guild(ctx.guild).auto_channels.set(auto_channels)\n await ctx.send(\n _(\"Startchannel used for automatic voicechannels removed: {channel}\").format(\n channel=channel.name\n )\n )", "def match_delete(establecimiento_id):\n # borrar todos los matches humanos anteriores para un establecimiento\n q = DELETE_MATCHES_QUERY % (establecimiento_id)\n db.query(q)\n return flask.Response('')", "def delTags(self):\r\n for tag in self.tags:\r\n self.canvasCirkt.delete(tag)\r\n self.canvasCirkt.update()", "def removeCategory(self, c):\n\t\tif c not in self._categories:\n\t\t\treturn\n\t\tself._categories.remove(c)\n\t\tCONNECTOR.removeCategory(self, c)", "def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)", "def delete(self, **kwargs):\n self._cycles.delete(**kwargs)", "def test_delete_a_category(self):\n self.test_add_category_success()\n response = self.client.delete('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('category deleted permanently',\n response.data.decode())", "def delete(self):\r\n delete_tracks(self.project, [self])", "def delete(self, request, queryset):\n for webhook in queryset:\n webhook.delete()\n name_plural = force_text(self.model._meta.verbose_name_plural)\n self.message_user(request, _(\"Deleted selected %s\" % name_plural))", "def delete_set_messages(self, messages, channel_name=None, channel_id=None):\n self.delete_messages(\n channel_name=channel_name, channel_id=channel_id, messages=messages, confirmation_override=True\n )", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def deleteMatches():\n cursor.execute(\"\"\"delete from matches\"\"\")", "def part(self, channel):\n\n self._pubsub.unsubscribe('cluster:%s' % channel)", "def _drop_channels_func(cfg, raw, subject, session) -> None:\n if cfg.drop_channels:\n msg = f'Dropping channels: {\", \".join(cfg.drop_channels)}'\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n raw.drop_channels(cfg.drop_channels)", "def test_delete_mediapackage_channel(self):\n with Stubber(\n medialive_utils.mediapackage_client\n ) as mediapackage_client_stubber:\n mediapackage_client_stubber.add_response(\n \"list_origin_endpoints\",\n service_response={\"OriginEndpoints\": [{\"Id\": \"1\"}, {\"Id\": \"2\"}]},\n expected_params={\"ChannelId\": \"1\"},\n )\n mediapackage_client_stubber.add_response(\n \"delete_origin_endpoint\",\n service_response={},\n expected_params={\"Id\": \"1\"},\n )\n mediapackage_client_stubber.add_response(\n \"delete_origin_endpoint\",\n service_response={},\n expected_params={\"Id\": \"2\"},\n )\n mediapackage_client_stubber.add_response(\n \"delete_channel\",\n service_response={},\n expected_params={\"Id\": \"1\"},\n )\n deleted_endpoints = medialive_utils.delete_mediapackage_channel(\"1\")\n mediapackage_client_stubber.assert_no_pending_responses()\n self.assertEqual(deleted_endpoints, [\"1\", \"2\"])", "def delete_cards(self, filterdict):\n\n self._collection.delete_many(self._constrain_keys(filterdict))", "def test_category_delete(self):\n login = self.autheniticate()\n token = json.loads(login.data.decode()).get('token')\n self.app.post(category_url,\n data=json.dumps(self.data),\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res = self.app.delete('/api/v2/categories/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n res1 = json.loads(res.data.decode())\n self.assertEqual(res1['status'], 'Deleted!')\n self.assertEqual(res.status_code, 200)", "def deleteMatches():\n conn, cur = connect()\n query = \"TRUNCATE matches CASCADE;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered deleting all matches.\")\n conn.commit()\n conn.close()", "async def deleted_channel(self, ctx, *, channel: ChannelSetting):\n await queries.update_setting(\n ctx,\n \"logging_settings\",\n \"message_log_channel_id\",\n channel.id if channel is not None else None,\n )\n if channel is None:\n await util.send_success(ctx, \"Deleted message logging **disabled**\")\n else:\n await util.send_success(\n ctx, f\"Deleted messages will now be logged to {channel.mention}\"\n )", "def delete_messages(\n self, channel_name=None, channel_id=None,\n messages=None, confirmation_override=False,\n restrict=None, remove_files=True\n ):\n if not channel_id:\n channel_id = self.find_channel_id(channel_name)\n\n if not confirmation_override:\n confirmation = input(\n f\"Are you sure you want to delete all messages from the channel #{channel_name}? Y/N\\n\")\n if 'Y' not in confirmation:\n print(f\"Aborting delete on channel #{channel_name}\")\n return\n\n if not messages:\n messages = self.get_messages(channel_id=channel_id)\n\n for message in messages:\n if not restrict or (restrict['type'] in message and message[restrict['type']] in restrict['values']):\n\n if 'subtype' in message and message['subtype'] == 'tombstone':\n continue\n\n response = self.client.api_call(\n f'chat.delete?channel={channel_id}&ts={message[\"ts\"]}'\n )\n assert response['ok']\n\n if remove_files and 'files' in message:\n for file in message['files']:\n response_delete = self.client.api_call(\n f'files.delete?'\n f'file={file[\"id\"]}'\n )\n assert response_delete['ok']", "def delete_pattern(self, pattern, version=None):\r\n\r\n pattern = self.make_key(pattern, version=version)\r\n\r\n keys = []\r\n for server, connection in self._serverdict.items():\r\n keys.extend(connection.keys(pattern))\r\n\r\n res = 0\r\n if keys:\r\n for server, connection in self._serverdict.items():\r\n res += connection.delete(*keys)\r\n return res", "def cmd_conversation_delete(client, args):\n delete_conversation = client.delete_conversation(args.conversation_id)\n generate_output({'delete_conversation': delete_conversation})", "async def clear(self, ctx):\n await self.config.guild(ctx.guild).channels.clear()\n await ctx.send(\"Spoiler channel list cleared.\")", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def destroy(self, request, pk=None):\n\n if not request.auth.user.is_staff:\n return Response(\n {'message': 'You must be an admin to delete categories.'},\n status=status.HTTP_403_FORBIDDEN\n )\n\n try:\n category = Categories.objects.get(pk=pk)\n\n except Categories.DoesNotExist:\n return Response(\n {'message': 'There is no category with the specified ID.'},\n status=status.HTTP_404_NOT_FOUND\n )\n\n if category.label == 'Uncategorized':\n return Response(\n {'message': 'Deleting the `Uncategorized` category is forbidden.'},\n status=status.HTTP_403_FORBIDDEN\n )\n\n category.delete()\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def test_message_remove(url):\n test_clear(url)\n admin_tk = channel_user_create_0(url)[0]\n \n test_channels = {\n 'token': admin_tk,\n 'name': 'channel_1',\n 'is_public': True,\n }\n requests.post(url + \"channels/create\", json=test_channels)\n\n test_message_1 = {\n 'token': admin_tk,\n 'channel_id': 1,\n 'message': 'Hello'\n }\n resp = requests.post(url + \"message/send\", json=test_message_1)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 1\n\n test_message_2 = {\n 'token': admin_tk,\n 'channel_id': 1,\n 'message': 'Hello Again'\n }\n resp = requests.post(url + \"message/send\", json=test_message_2)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 2\n\n test_remove_msg_1 = {\n 'token': admin_tk,\n 'message_id': 1 \n }\n resp = requests.delete(url + \"message/remove\", json=test_remove_msg_1)\n message_remove_resp = resp.json()\n assert message_remove_resp == {}\n\n test_channel_msgs_1 = {\n 'token': admin_tk,\n 'channel_id': 1,\n 'start': 0,\n }\n resp = requests.get(url + \"channel/messages\", params=test_channel_msgs_1)\n channel_msgs_resp = resp.json()\n assert channel_msgs_resp['messages'][0]['message_id'] == 2\n assert channel_msgs_resp['messages'][0]['u_id'] == 1\n assert channel_msgs_resp['messages'][0]['message'] == 'Hello Again'\n\n test_remove_msg_2 = {\n 'token': admin_tk,\n 'message_id': 2\n }\n resp = requests.delete(url + \"message/remove\", json=test_remove_msg_2)\n message_remove_resp = resp.json()\n assert message_remove_resp == {}\n\n resp = requests.get(url + \"channel/messages\", params={\n 'token': admin_tk,\n 'channel_id': 1,\n 'start': 0, \n })\n channel_msgs_resp = resp.json()\n assert channel_msgs_resp['messages'] == []", "def test_delete_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.del_category()\n self.assertIn(b'successfully deleted category', rv.data)", "def test_CategoriesDelete(self):\n trans1 = DebitsCredits.objects.create(account=self.account,\n currency=self.euro,\n name=\"Shopping\",\n amount=1,\n category=self.cat1)\n self.cat1.delete()\n self.assertEqual(self.cat1.active, False)\n\n trans1.delete()\n self.assertEqual(self.cat1.transactions.all().count(), 0)\n\n self.cat1.delete()\n self.assertEqual(Category.objects.all().count(), 1)", "async def channel_remove(\n self, ctx: commands.Context, channel: discord.abc.GuildChannel, *team_ids: int):\n if set(team_ids) - set(self.teams):\n await ctx.send('Missing data for the following team IDs: %s' % (\n ', '.join(map(str, set(team_ids) - set(self.teams))),))\n return\n\n await asyncio.gather(*[\n self._forbid_team_in_channel(self.teams[team_id], channel)\n for team_id in team_ids],\n return_exceptions=True)\n await ctx.send('Removed team%s `%s` from channel %s' % (\n nl.s(len(team_ids)),\n '`, `'.join(self.teams[team_id].username for team_id in team_ids),\n channel.mention))", "def delete_bot_messages(self, channel_name=None, channel_id=None):\n self.delete_messages(\n channel_name=channel_name, channel_id=channel_id, confirmation_override=True,\n restrict={'type': 'subtype', 'values': ['bot_message']}\n )", "def remove(self, channels=None):\n if channels is None:\n channels = self.get_channels()\n self.remove_from_frames(\n self.data, self.integration.frames, channels)", "async def channel(self, ctx, limit: int=100, channel: discord.TextChannel=None):\n\n if channel is None:\n channel = ctx.channel\n\n # noinspection PyUnresolvedReferences\n messages = await channel.purge(limit=limit)\n messages = len(messages)\n\n plural = '' if messages == 1 else 's'\n\n await ctx.send('Purged {} message{}.'.format(messages, plural), delete_after=10)" ]
[ "0.64337945", "0.6401022", "0.6107181", "0.6081878", "0.5946473", "0.5895014", "0.587733", "0.5871811", "0.5821551", "0.5815859", "0.57719624", "0.5770912", "0.5759033", "0.57166386", "0.5705552", "0.5697941", "0.5684132", "0.56386846", "0.55753577", "0.5554672", "0.55487365", "0.55464906", "0.55449486", "0.5544254", "0.5538076", "0.5534445", "0.5534186", "0.5528104", "0.5517963", "0.55171096", "0.55086684", "0.55008763", "0.5493417", "0.54825824", "0.5480582", "0.54781425", "0.54722786", "0.54521155", "0.5448275", "0.54440904", "0.54423976", "0.54382896", "0.5436861", "0.5434631", "0.5427099", "0.5409785", "0.5399307", "0.5398333", "0.538714", "0.5370422", "0.5370239", "0.5365636", "0.5357411", "0.5352846", "0.53459597", "0.5337955", "0.5332895", "0.5330828", "0.5329293", "0.5316611", "0.529331", "0.52896863", "0.526728", "0.52638626", "0.5263742", "0.52628464", "0.52595544", "0.5257946", "0.5257469", "0.5256457", "0.5250192", "0.52423453", "0.52379483", "0.52348393", "0.5227559", "0.5208501", "0.52019244", "0.5201046", "0.51988643", "0.51927817", "0.5190632", "0.5185474", "0.5185136", "0.5184089", "0.5174562", "0.51726764", "0.51713336", "0.51634794", "0.5159136", "0.515648", "0.51411736", "0.51411736", "0.51266426", "0.5119041", "0.51097214", "0.51057684", "0.51023334", "0.5091528", "0.5089965", "0.5088474" ]
0.8869049
0
Prevent both players from reporting at the same time.
def _add_to_recently_called(self, match, reporter): if utils.istrcmp(match.player1_tag, reporter): other = match.player2_tag else: other = match.player1_tag self.recently_called[other] = time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def still_deciding(self):\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n return True\n return False", "def playerdefeated(self):\n globalvalues.gameover_combat()", "def losesChance(self):\n self.second_chance = False", "def uber_check_win(self):\n if self.player1.score == self.player2.score:\n print(\"It's a draw!\")\n elif self.player1.score > self.player2.score:\n print(\"Player 1 is a proper bad ass mother fucker\")\n else:\n print(\"Player numma 2 is a proper bad ass mother fucker\")", "def interferes(self, other):\n return True", "def player_collision(self, player):\n return True", "def playerForfeit(self):\n self.handleWin(self.currentplayer*-1)", "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "async def test_skipped_already_unsilenced(self):\n self.cog.scheduler.__contains__.return_value = False\n self.cog.previous_overwrites.get.return_value = None\n\n for channel in (MockVoiceChannel(), MockTextChannel()):\n with self.subTest(channel=channel):\n self.assertFalse(await self.cog._unsilence(channel))\n channel.set_permissions.assert_not_called()", "def __negated_player(self, player):\n\t\treturn self.PLAYER2 if self.current_player == self.PLAYER1 else self.PLAYER1", "def killSubscribers(self):\n if (self.off_TB1_Viewer.isChecked()):\n pass", "def doNotTrack(self):\n # return False\n return 'lol'", "def collect(self, player: Player):\n player.set_invincible(True)", "def update(self):\r\n if not self.tr.game_over and self.tr.turn_tracker:\r\n self.computer_play()", "def force_hold(self):\n names = \"\"\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n player.hold()\n names += player.mention_user() + \",\"\n if names:\n return \"Forced {} to hold because they took too long to decide last round.\".format(names)", "def is_game_over(cls):\n cls.record_winner()\n cls.record_tie()", "def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False", "def on_flip(self):\r\n self.next_data_has_pump = not self.next_data_has_pump\r\n self.probe_only_data, self.pump_probe_data = (\r\n self.pump_probe_data, self.probe_only_data)", "def check_players_collision(self):\n # Check if bullet hit the player 1\n bullet_hit_p = len(pygame.sprite.spritecollide(self.player1, self.player2_bullet, True))\n bullet_hit_m = len(pygame.sprite.spritecollide(self.player1, self.mob_bullet, True))\n if bullet_hit_p and not self.player1.isInvincible():\n self.p2_score += 500\n if bullet_hit_p + bullet_hit_m > 0 and not self.player1.is_destroyed():\n self.player1.destroy()\n self.explosions.add(Explosion(self.fps // 4, self.player1.get_x(), self.player1.get_y(), self.screen_width,\n self.screen_height, 0, self.debug))\n\n # Check if Player 2 bullet hit the player 1\n bullet_hit_p = len(pygame.sprite.spritecollide(self.player2, self.player1_bullet, True))\n bullet_hit_m = len(pygame.sprite.spritecollide(self.player2, self.mob_bullet, True))\n if bullet_hit_p and not self.player2.isInvincible():\n self.p1_score += 500\n if bullet_hit_p + bullet_hit_m > 0 and not self.player2.is_destroyed():\n self.player2.destroy()\n self.explosions.add(Explosion(self.fps // 4, self.player2.get_x(), self.player2.get_y(), self.screen_width,\n self.screen_height, 0, self.debug))", "def stop2p():\n if timer.is_running():\n timer.stop()\n global result2pline, result2p\n result2pline = 60\n if p1score > p2score:\n result2p = \"Player 1 wins!\"\n elif p2score > p1score:\n result2p = \"Player 2 wins!\"\n else:\n result2p = \"Nobody wins!\"", "async def skip(self, ctx):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n # Skips track\n await player.skip()\n # Add reaction\n await ctx.message.add_reaction(\"⏩\")", "def set_first_player(self):\n if self.player2.won_previous:\n self.current_player = self.player2\n else: self.current_player = self.player1", "def next_player(self):\n if self.player1.turn_status == 1:\n self.player1.turn_status = 0\n self.turn(self.player2)\n else:\n self.player2.turn_status = 0\n self.turn(self.player1)", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def reset_players(self):\n self.dealer.reset()\n for player in self.players:\n player.reset()\n if player.bank <= 500:\n player.set_bank(1000)", "def notifyPlayers(self) -> None:\n # TODO: Used for external communication to a front-end module.\n pass", "def player_kick(self, player_ip):\r\n\t\tself._comm_server.force_disconnection(player_ip)", "def on_skip(self, event):\n self.pre_check(event)\n if (event.author.id not in self.cool_down[\"general\"] or\n time() - self.cool_down[\"general\"][event.author.id] >= 2):\n self.get_player(event.guild.id).skip()\n self.cool_down[\"general\"][event.author.id] = time()\n else:\n cool = round(\n Decimal(\n 2 - (time() - self.cool_down[\"general\"][event.author.id]),\n ),\n )\n return event.channel.send_message(\n \"Cool down: {} seconds left.\".format(cool),\n )", "def resign_game(self):\n # If entered, it will return winner status for the opposing player\n if self._current_player == 'W':\n self._game_status = 'BLACK_WON'\n if self._current_player == 'B':\n self._game_status = 'WHITE_WON'", "def set_bankrupt(self):\n if self.status == self.PLAYER_BANKRUPT:\n return\n self.status = self.PLAYER_BANKRUPT\n self.game.player_bankrupted(self)", "def fix_unpaired_events(self):\n\n if self.observationId:\n\n r, msg = project_functions.check_state_events_obs(self.observationId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][self.observationId])\n if \"not PAIRED\" not in msg:\n QMessageBox.information(self, programName, \"All state events are already paired\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n return\n\n '''\n if self.playerType == VIEWER:\n # max time\n time_ = max(x[0] for x in self.pj[OBSERVATIONS][self.observationId][EVENTS])\n else:\n time_ = self.getLaps()\n '''\n\n w = dialog.JumpTo(self.timeFormat)\n w.setWindowTitle(\"Fix UNPAIRED state events\")\n w.label.setText(\"Fix UNPAIRED events at time\")\n\n if w.exec_():\n if self.timeFormat == HHMMSS:\n fix_at_time = utilities.time2seconds(w.te.time().toString(HHMMSSZZZ))\n elif self.timeFormat == S:\n fix_at_time = Decimal(str(w.te.value()))\n print(\"fix_at_time\", fix_at_time)\n\n events_to_add = project_functions.fix_unpaired_state_events(self.observationId,\n self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][self.observationId],\n fix_at_time - Decimal(\"0.001\")\n )\n if events_to_add:\n self.pj[OBSERVATIONS][self.observationId][EVENTS].extend(events_to_add)\n self.projectChanged = True\n self.pj[OBSERVATIONS][self.observationId][EVENTS].sort()\n self.loadEventsInTW(self.observationId)\n item = self.twEvents.item(\n [i for i, t in enumerate(self.pj[OBSERVATIONS][self.observationId][EVENTS]) if\n t[0] == fix_at_time][0], 0)\n self.twEvents.scrollToItem(item)\n\n # selected observations\n else:\n result, selected_observations = self.selectObservations(MULTIPLE)\n if not selected_observations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obs_id in selected_observations:\n r, msg = project_functions.check_state_events_obs(obs_id, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obs_id])\n print(\"msg\", msg)\n if \"NOT PAIRED\" in msg.upper():\n fix_at_time = max(x[0] for x in self.pj[OBSERVATIONS][obs_id][EVENTS])\n events_to_add = project_functions.fix_unpaired_state_events(obs_id,\n self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obs_id],\n fix_at_time\n )\n if events_to_add:\n events_backup = self.pj[OBSERVATIONS][obs_id][EVENTS][:]\n self.pj[OBSERVATIONS][obs_id][EVENTS].extend(events_to_add)\n\n # check if modified obs if fixed\n r, msg = project_functions.check_state_events_obs(obs_id, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obs_id])\n if \"NOT PAIRED\" in msg.upper():\n out += \"The observation <b>{}</b> can not be automatically fixed.<br><br>\".format(obs_id)\n self.pj[OBSERVATIONS][obs_id][EVENTS] = events_backup\n else:\n out += \"<b>{}</b><br>\".format(obs_id)\n self.projectChanged = True\n if out:\n out = \"The following observations were modified to fix the unpaired state events:<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Fixed observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n self.results.exec_()\n else:\n QMessageBox.information(self, programName, \"All state events are already paired\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def set_next_first_player(self):\n if self.current_player == self.player1:\n self.player1.won_previous = True\n self.player2.won_previous = False\n else:\n self.player2.won_previous = True\n self.player1.won_previous = False", "def testPlayer():\n\n print(\"\\n ---------- Test Player ---------\")\n\n failure = False\n hand = {'c':1, 'a':1, 'b':1 ,'d':1, 'o':1, 'e':1}\n\n p1 = Player(1, Hand(6, hand))\n p2 = Player(2, Hand(6, hand))\n\n if not p1.getHand() == hand and p2.getHand() == hand:\n failure = True\n print(\"FAILURE: Se esperaría la mano que se ingresó:\", hand,\"y se está regresando:\", p1.getHand())\n\n if not p1.getIdNum() == '1' and p2.getIdNum() == '2':\n failure = True\n print(\"FAILURE: Se espera que p1 sea el jugador 1 y p2 sea el jugador 2, y se está obteniendo:\", p1.getIdNum(),\n p2.getIdNum())\n\n print(\"Jugador 1\")\n print(\"\\t\", p1.addPoints(5))\n print(\"\\t\", p1.addPoints(12))\n if not p1.getScore() == 17:\n failure = True\n print(\"FAILURE: Se esperan 17 puntos, y se están obteniendo:\", p1.getScore())\n print(p1)\n\n print(\"Jugador 2\")\n print(\"\\t\", p2.addPoints(3))\n print(\"\\t\", p2.addPoints(10))\n if not p2.getScore() == 13:\n failure = True\n print(\"FAILURE: Se esperan 13 puntos, y se están obteniendo:\", p1.getScore())\n print(p2)\n\n if not (p1 > p2) == 1:\n failure = True\n print(\"FAILURE: Se esperaba un 1, indicando que el puntaje del P1 es mayor al del P2. Se está regresando:\",\n p1 > p2)\n if not (p1 < p2) == -1:\n failure = True\n print(\"FAILURE: Se esperaba un -1, indicando que el puntaje del P2 es menor al del P1. Se está regresando:\",\n p2 < p1)\n if (p1 == p2):\n failure = True\n print(\"FAILURE: Se esperaba un valor falso y se está obteniendo:\",\n p2 < p1)\n\n if not failure:\n print(\"SUCCESS: testPlayer()\")\n else:\n print(\"FAILURE: testPlayer()\")", "def reset(self) -> None:\n\n # Just to be safe, lets make sure no multi-kill timers are gonna go off\n # for no-longer-on-the-list players.\n for p_entry in list(self._player_records.values()):\n p_entry.cancel_multi_kill_timer()\n self._player_records = {}", "def check_tie(self, player1, player2):\n if self.check_win(player1) or self.check_win(player2):\n return False\n return self.check_grid_full()", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def do_nothing(self, player):\n return '%s spins \\'nun\\' and does nothing.' % (player,)", "def set_as_not_feedback(self):\n self.feedback = False", "def game_over(self):\n self.over = True", "def player_collision(self, player):\n events.trigger_event(\"print_message\", self.message)\n return self.let_player_through", "def _control_skip(self):\n self.player.skip()", "def reject(self):\n pass", "def pass_player(self):\n # pass control to next player by asking game who that is\n self.disable_all_buttons()\n self.game.pass_control_next(self)", "def track_off(self,numero):\n if numero in self.tiempos.actual().obtener_habilitados():\n self.tiempos.actual().deshabilitar_track(numero)", "def isGameOver(self):\n pass", "def skip(self):\n self.skip_votes.clear()\n if self.is_playing():\n self.player.stop()", "def ready_new_round_players(self):\n for player in self.players:\n if player.is_playing:\n player.has_played = False", "def take_control_over(self, other):\n a = self\n if a == other: return\n if util.onechancein(6): #make a master of b\n if other.master is not None:\n if other.master != a and a.master != other: #if b already had master, make a enemy of b.master\n a.history.append('In year %d %s tried to overtake the control over %s, but failed' % (world.year, a.name, other.name))\n other.master.conflict_with(a)\n else:\n if a.master == other: #if we overtook controll\n a.master = None\n try:\n other.minions.remove(a)\n except ValueError: pass\n try:\n other.master.minions.remove(other)\n except Exception : pass\n a.minions.append(other)\n other.master = a\n a.history.append('In year %d %s became boss over %s' %(world.year, a.name, other.name))", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def non_social_action_SB(self):\n self.pumpRate = 0.4\n if not self.agent.done:\n self.action_gating()", "def suppresses(self, other_describer):\n return False", "def still_betting(self):\n for player in self.players:\n if player.current_bet is 0:\n return True\n return False", "def notify_game_over(self):\n self.is_game_over = True", "def IgnorePersistedDecision(self) -> bool:", "def _checkRoundOver(self):\n\n # if we already ended it doesn't matter\n if self.hasEnded():\n return\n\n if not any(player.isAlive() for player in self.teams[0].players):\n # allow continuing after wave 1\n if self._wave > 1:\n self.continueOrEndGame()\n else:\n self.endGame()", "async def skip(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if player.current.requester not in map(lambda c: c.id, player.connected_channel.members):\n await ctx.send(\"Skipped.\")\n await player.skip()\n else:\n if player.current.requester == ctx.author.id:\n await ctx.send(\"Skipped.\")\n await player.skip()\n else:\n minpeople = math.ceil(len(set(filter(lambda x: not x.bot, player.connected_channel.members)))/2)\n votes = player.fetch(\"votes\") if player.fetch(\"votes\") else 0\n player.store(\"votes\", votes + 1)\n if votes + 1 >= minpeople:\n await ctx.send(\"Skipped.\")\n await player.skip()\n else:\n await ctx.send(\"Skip? {}/{}\".format(votes + 1, minpeople))", "def violated(self) -> bool:\n ...", "def is_summon(self):\n return False", "def delay_checks(self):\n return False", "def skip(self, player):\n if player not in self.players_skipped:\n self.players_skipped.append(player)\n self.skips += 1\n self.chat.update_chat(f\"Player has voted to skip ({self.skips}/{len(self.game.players) -2})\")\n if self.skips >= len(self.game.players) - 2:\n return True\n\n return False", "def user_disappears(self, user):\n pass", "def _prevent_dos(cls):\n if cls._last_query is not None:\n if time.time() - cls._last_query < cls._dos_timeout:\n raise RuntimeError(\"Too many scheduler requests within a short time!\")\n cls._last_query = time.time()", "def point_assigner_loss(self, group, player_sprites):\n # Grab playersprite\n if group != {}:\n for player in group:\n player.reset()\n player_sprites.add(player)", "async def test_preserved_other_overwrites_voice(self):\n prev_overwrite_dict = dict(self.voice_overwrite)\n await self.cog._set_silence_overwrites(self.voice_channel)\n new_overwrite_dict = dict(self.voice_overwrite)\n\n # Remove 'connect' & 'speak' keys because they were changed by the method.\n del prev_overwrite_dict[\"connect\"]\n del prev_overwrite_dict[\"speak\"]\n del new_overwrite_dict[\"connect\"]\n del new_overwrite_dict[\"speak\"]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)", "def consider_deactivation(self):\n pass", "def test_stream_stale_follows(self):\n self.user2.delete()\n self.assertNotIn('Two', str(user_stream(self.user1)))", "def newLogoff(self, playerID):\n if playerID in self.idList:\n playerIndex = self.idList.index(playerID)\n self.removePlayer(playerIndex)", "def otherPlayer(cls, player):\n return 0 if player == 1 else 1", "def notFainted(self):\n messages = self.effect.attemptAfterTurn(self.pkmn)\n assert messages == [AfterTurnEffect.message], \"Should receive messages from afterTurn function\"", "def other_player(cls, player):\n return 0 if player == 1 else 1", "def disallow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = False\n update.message.reply_text(\"Temprarily allowed disabled!\")", "def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)", "def exclude(self):\n\n self.eod.value = 0\n self.public.value = 0", "def check_loss(self):\n return POKEMON in self.get_game()", "def checkForSideChangeRequest(self):\n inThirdRound = self.wonRounds[\"Team1\"] == 1 and self.wonRounds[\"Team2\"] == 1\n oneTeamAt11AndOtherTeamUnder11 = (self.counter[\"Team1\"] == 11 and self.counter[\"Team2\"] < 11) or\\\n (self.counter[\"Team2\"] == 11 and self.counter[\"Team1\"] < 11)\n if inThirdRound and oneTeamAt11AndOtherTeamUnder11:\n self.__notifySideChangeRequest()", "def only_one_influence_per_turn(sender, instance, **kwargs):\n\tif BuyInfluenceOrder.objects.filter(player=instance.player, turn=instance.player.game.current_turn).exists():\n\t\traise OrderNotAvailable(\"Impossible d'acheter de l'influence deux fois par tour.\")", "def interferes(self, other):\n if not isinstance(other, Transmitter):\n return self == other\n\n return other.flows_to(self.id)", "def stop_sending(self, _):\n self.sending = False\n print(f'[INFO] Done sending DEAUTH packets to {self.target_addr} on AP {self.access_point}')", "def __ne__(self, other: 'Monitor') -> bool:\n return not self == other", "def on_reject(self, update, _context):\n self.send_message(update.message.chat_id, c.MSG_THANKS_NOTHANKS)", "def enough_players():\n return True", "def decide_infect(self, other):\n if (self._is_infected and not other._is_infected):\n if random.random() < self._transmission_prob and random.random() < other._infection_prob:\n other._is_infected = True\n\n if other._is_infected and not self._is_infected:\n if random.random() < other._transmission_prob and random.random() < self._infection_prob:\n self._is_infected = True", "def testNonReporting(self):\n self.ps = ProductStock.objects.get(supply_point=self.sp, \n product=self.pr)\n self.ps.use_auto_consumption = True\n self.ps.save()\n \n self.ps = self._report(100, 100, Reports.SOH) # 100 days ago we had 10 in stock. \n self.assertEqual(None, self.ps.daily_consumption) # not enough data\n self.ps = self._report(10, 90, Reports.REC) # 90 days ago, we received 10\n self.assertEqual(None, self.ps.daily_consumption) # not enough data\n # 90 days ago, we have 10 in stock \n self.ps = self._report(100, 90, Reports.SOH) \n self.assertEqual(1, self.ps.daily_consumption) # 10 stock/10 days\n \n # even though we received no reports between day 90 and day 50\n # the consumption gets updated all the same\n self.ps = self._report(490, 50, Reports.REC) \n self.ps = self._report(100, 50, Reports.SOH)\n self.assertEqual(10, self.ps.daily_consumption)", "def disable(self):", "def disable(self):\n # Check for new results and cache a copy in Django model\n self.update(do_update_assignments=True)\n self.connection.dispose_hit(self.mturk_id)", "def _notify_killed_players(self, players_to_notify):\n for player in players_to_notify:\n player.notify(self._format_move_result_notification(None, Moveresult.EJECT, name=player.name))", "def keep_score(self, move1, move2):\r\n scores = self.scores # Player scores\r\n # Converts Win/Loss to 0/1 & Saves it to List:\r\n score1 = int(beats(move1, move2))\r\n score2 = int(beats(move2, move1))\r\n scores[\"p1\"] += score1\r\n scores[\"p2\"] += score2\r\n # Declares Which Player Won the Round, or a Tie:\r\n if score1 != score2:\r\n round_winner = (score1 > score2 and \"ONE\" or \"TWO\")\r\n print(f\"** PLAYER {round_winner} WINS THE ROUND **\")\r\n else:\r\n print(\"** THIS ROUND IS A TIE **\")\r\n # Prints Total of Each Player's Scores:\r\n print(f\"Player One Score: {scores['p1']}, \"\r\n f\"\\nPlayer Two Score: {scores['p2']}\\n\")", "def ignore(self):\n self._ignore_transids = True", "async def team_unignore(self, ctx: commands.Context):\n await self.config.user(ctx.author).do_not_message.set(False)\n await ctx.send('Okay, I\\'ll include you back in team-wide DMs.')", "def test_bad_turn(self):\n board = Board()\n player1 = LegitPlayer()\n player2 = BadTurnPlayer()\n player1.start_of_game()\n player2.start_of_game()\n player_guard1 = PlayerGuard(player1)\n player_guard2 = PlayerGuard(player2)\n\n # set ids\n p1id = uuid.uuid4() \n p2id = uuid.uuid4() \n player_guard1.set_id(p1id)\n player_guard2.set_id(p2id)\n\n board.place_worker(*player_guard1.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard1.place_worker(board))\n\n self.assertRaises(PlayerInvalidTurn, player_guard2.play_turn, board)", "def other_player(self, player):\n if player == self.__opponent:\n return self.__pid\n else:\n return self.__opponent", "def _wake_players(self):\n self.__current_player = None\n players = self.get_players(self.get_state_info(\"wake_all\"))\n self.__player_wakeup_iter = iter(players.copy())\n self.player_done(None)", "def run_out_of_time(self):\n self.out_of_time = True", "def maybe_start(self):\r\n\t\tif not [p for p in self.players if not p.ready]\\\r\n\t\t and len(self.players) == self.max_players \\\r\n\t\t and not self.started:\r\n\t\t\tself.start()", "def propagation(self, other):\n if self.infected_pop >= 1 and other.infected_pop == 0:\n if random.random() <= self.infected_ratio()/10:\n other.infected_pop = 1", "def game_off(self) -> None:\n if self.msg.sender != self.owner:\n revert(\"Only the score owner can turn it off\")\n if self._game_on.get():\n self._game_on.set(False)", "def track_players(self, boom_pos):\n self.update_players()\n for player in self.players:\n if self.if_near_boom(player.get_char().get_pos(), boom_pos):\n player.update_lives(-1)\n self.gui.set_doneBlinkingAnimation(False)", "def __ne__(self, other: 'OfferingSpeed') -> bool:\n return not self == other", "def disable_receiver(self):\n self.set_receiver(False)", "def disable_reporting(self):\n self.reporting = False\n msg = chr(REPORT_DIGITAL + self.port_number)\n msg += chr(0)\n self.board.sp.write(msg)" ]
[ "0.61262506", "0.60236615", "0.5634644", "0.56011015", "0.5578245", "0.5538049", "0.5537875", "0.55358166", "0.5534296", "0.5521882", "0.5503166", "0.55023617", "0.5500676", "0.5480155", "0.5468607", "0.54599667", "0.5449898", "0.54205763", "0.5392923", "0.5390785", "0.5378159", "0.5366228", "0.5356806", "0.53478146", "0.5337448", "0.5312937", "0.5306508", "0.53053296", "0.5301153", "0.5297382", "0.52869606", "0.52868974", "0.5275266", "0.5273238", "0.5272803", "0.5268048", "0.52591944", "0.5254323", "0.5250063", "0.5247947", "0.5238988", "0.5237869", "0.52359366", "0.5224803", "0.5223402", "0.5222937", "0.5218046", "0.52160263", "0.5213001", "0.5211109", "0.5208461", "0.5207627", "0.5205807", "0.51847315", "0.5169907", "0.51580894", "0.51573586", "0.51556575", "0.5152608", "0.51524895", "0.51411605", "0.5138192", "0.5134381", "0.51281995", "0.51271087", "0.51205355", "0.51152575", "0.51097345", "0.51090366", "0.5107956", "0.5107738", "0.5098339", "0.50941086", "0.5091278", "0.50839764", "0.5083252", "0.50737995", "0.5062822", "0.5061145", "0.50575334", "0.50518614", "0.505141", "0.5049937", "0.5047315", "0.50470746", "0.50341356", "0.5032097", "0.50314796", "0.50230396", "0.50182444", "0.50171804", "0.5013735", "0.5012291", "0.5012221", "0.50055933", "0.50046575", "0.50036055", "0.5000715", "0.5000453", "0.5000401" ]
0.5070516
77
Check the participants list for players not on the server.
async def missing_tags(self, owner) -> List[str]: dms = await utils.get_dms(owner) missing = [player for player in await self.gar.get_players() if not self.get_user(player)] if not missing: return [] message = ['Missing Discord accounts for the following players:'] for p in missing: message.append(f'- {p}') await utils.send_list(dms, message) return missing
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_participants(self):\n for i in range(self.num):\n def check(m):\n if m.content.lower().strip() == \"i\" and m.author not in self.participants:\n return True\n\n return False\n\n # Wait with a timeout of 2 minutes and check each message with check(m)\n reply = await client.wait_for_message(timeout=120, channel=self.channel, check=check)\n\n if reply: # A user replied with a valid check\n asyncio.ensure_future(\n client.say(self.message,\n \"{} has entered! `{}/{}`. Type `I` to join!\".format(\n reply.author.mention, i + 1, self.num))\n )\n self.participants.append(reply.author)\n\n # Remove the message if bot has permissions\n if self.member.permissions_in(self.channel).manage_messages:\n asyncio.ensure_future(client.delete_message(reply))\n else:\n # At this point we got no reply in time and thus, gathering participants failed\n await client.say(self.message, \"**The {} game failed to gather {} participants.**\".format(\n self.name, self.num))\n started.pop(started.index(self.channel.id))\n\n return False\n\n return True", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "def ready_players(self):\n return self.players.filter_by(sitting_out=False).join(players_active).all()", "async def players(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n\n await amor_manager.say(\"Current Players: {}\".format(\", \".join(tod_games[room]['participants'].keys())))", "def all_players_finish(self):\n return len(self.game_winners) == len(self.players)", "def get_players(self):\n return self.server.status().players.online", "def enough_players():\n return True", "def verify_player_pending(self, player_email):\n try:\n self.pending_players.index(player_email)\n return True\n except ValueError:\n return False", "def ListIgnoredPlayers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "async def listplayers(self, ctx, *, server_name=None):\n if server_name:\n server_name = server_name.replace('_', ' ').title()\n msg = await ctx.send(f'**Getting Data for the {server_name} server**')\n await ctx.channel.trigger_typing()\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/{server_name}/listplayers/',\n headers=self.bot.auth_header\n )\n if resp.status == 200:\n message = '\\n'.join(await resp.json())\n await ctx.channel.trigger_typing()\n await msg.delete()\n await ctx.send(f'**Players currently on the {server_name} server:**\\n{message}')\n return\n elif resp.status < 500:\n message = (await resp.json()).get('details', 'There was a problem. Please try again')\n else:\n message = \"There was an error on my server. I have notified the maintainers.\"\n await ctx.send(message)\n else:\n futures = []\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/',\n headers=self.bot.auth_header\n )\n if resp.status != 200:\n await ctx.send('There was a problem getting the servers for this guild.')\n return\n guild_servers = await resp.json()\n for server in guild_servers:\n msg = await ctx.send(f'**Getting Data for the {server[\"name\"]} server**')\n\n # noinspection PyShadowingNames\n async def _listplayers(server_name: str, msg: discord.Message):\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/{server_name}/listplayers/',\n headers=self.bot.auth_header\n )\n if resp.status == 200:\n message = '\\n'.join(await resp.json())\n await ctx.channel.trigger_typing()\n await msg.delete()\n await ctx.send(f'**Players currently on the {server_name} server:**\\n{message}')\n return\n elif resp.status < 500:\n message = f'Error getting data for {server_name}' + \\\n (await resp.json()).get('details', 'Please try again')\n else:\n message = \"There was an error on my server. I have notified the maintainers.\"\n await ctx.send(message)\n\n futures.append(_listplayers(msg=msg, server_name=server['name']))\n if futures:\n asyncio.ensure_future(asyncio.gather(*futures))\n else:\n await ctx.send('There are no available servers for this guild.')", "def all_votesp(self, game_key):\n participants = models.Participant.query(\n models.Participant.playing == True,\n models.Participant.vote == None,\n ancestor=game_key).fetch()\n logging.info(\n \"participants who have not voted: %s\", \n [p.plus_id for p in participants])\n if participants:\n return False\n else:\n return True", "def get_accepted_players(self):\n return self.accepted_players", "def create_player_list(self, current_game):\n players = [Player(c['summonerId'], c['championId'], c['teamId']) for c in current_game['participants']]\n return players", "def ready_new_round_players(self):\n for player in self.players:\n if player.is_playing:\n player.has_played = False", "def list_unlisted_players(self, p_ids_and_prices):\n players_to_list = True\n try:\n while players_to_list:\n # Check if topmost player exists that should be listed\n topmost_player_location_unlisted = \"/html/body/main/section/section/div[2]/div/div/div/section[3]/ul/li[1]/div\"\n p_exists = self.check_exists_by_xpath(\n topmost_player_location_unlisted)\n if (p_exists == False):\n players_to_list = False\n else:\n # Click topmost player\n self.sleep_approx(2)\n self.clickButton(topmost_player_location_unlisted)\n self.sleep_approx(1)\n\n # Get bought price (to log profit)\n bought_player = self.check_exists_by_xpath(\n \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[1]/div[2]/div/span[2]\")\n bought_price = 0\n if bought_player:\n playercardname = self.getText(\n \"/html/body/main/section/section/div[2]/div/div/div/section[3]/ul/li[1]/div/div[1]/div[2]\")\n\n bought_price = self.getText(\n \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[1]/div[2]/div/span[2]\")\n\n # Detect if player was packed\n if (len(str(bought_price)) != 0):\n if \",\" in bought_price:\n bought_price = int(\n bought_price.replace(\",\", \"\"))\n else:\n bought_price = 0\n\n # Click list for transfer\n listfortransfer_location = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[1]/button\"\n self.clickButton(listfortransfer_location)\n self.sleep_approx(1)\n\n # Get player sell price\n playerrating = int(self.getText(\n \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[7]/div[2]/div[1]\"))\n \n playercardname = self.getText(\n \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[4]\")\n playerid = self.getPlayerID(playercardname, playerrating)\n sellprice = int(p_ids_and_prices[playerid])\n log_event(self.queue, \"Sell price to use for \" +\n str(playercardname) + \": \" + str(sellprice))\n\n # Log profit (only if player wasn't packed)\n bought_price = int(bought_price)\n if (bought_price != 0):\n # Sell price * .95 to account for EA tax\n potential_profit = (sellprice*0.95) - bought_price\n log_event(self.queue, \"Sell price \" + str(playercardname) +\n \": \" + str(sellprice) + \" Bought: \" + str(bought_price))\n log_event(self.queue, \"Sell price * .95 \" +\n str(playercardname) + \": \" + str(sellprice*.95))\n self.user_projected_profit += potential_profit\n self.update_autobidder_logs()\n\n startprice_loc = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[2]/div[2]/input\"\n buynow_loc = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/input\"\n listplayer_loc = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/button\"\n\n # Make sure text boxes are visible\n self.scrollIntoView(listplayer_loc)\n self.send_keys_and_sleep(buynow_loc, sellprice)\n self.send_keys_and_sleep(startprice_loc, sellprice-100)\n\n # Final step - list player on market\n self.clickButton(listplayer_loc)\n except Exception as e:\n log_event(self.queue, \" err 203, should be ok tho \")\n log_event(self.queue, e)", "def GetAllIgnoredPlayers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def check_for_user_not_in_system(player_name: str) -> bool:\n\n for uid, user in self.connected_users.items():\n if user['authorized'] and user['main']['player_name'] == player_name:\n return False\n return True", "def _test_player_list_size(self):\n return len(self.player_list)", "def maybe_start(self):\r\n\t\tif not [p for p in self.players if not p.ready]\\\r\n\t\t and len(self.players) == self.max_players \\\r\n\t\t and not self.started:\r\n\t\t\tself.start()", "def get_players():\n return [Mpris_Player(item)\n for item in Mpris_Utils.get_session().list_names()\n if re.match(Mpris_Interfaces.MEDIA_PLAYER, item) > 0]", "def has_player(self, p: Player) -> bool:\n return p in self.players", "def test_can_see_participant(self):\n ThreadParticipant.objects.add_participants(self.thread, [self.user])\n\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 200)\n\n response_json = response.json()\n self.assertEqual(response_json['title'], self.thread.title)\n self.assertEqual(\n response_json['participants'], [\n {\n 'id': self.user.id,\n 'username': self.user.username,\n 'avatars': self.user.avatars,\n 'url': self.user.get_absolute_url(),\n 'is_owner': False,\n },\n ]\n )", "def players():\n try:\n return template('players.html', players=SERVER.players.values())\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show a list of all registered players on the \"\n \"server\")\n return JsonResponse.error(101)", "def notifyPlayers(self) -> None:\n # TODO: Used for external communication to a front-end module.\n pass", "def check_death(player_list):\n\n count = 0\n remaining_players = []\n\n for cycle in player_list:\n if not cycle.is_dead():\n count += 1\n remaining_players.append(cycle.get_name())\n\n if count == 0:\n pygame.time.wait(1000)\n return True, \"None\"\n elif count == 1:\n pygame.time.wait(1000)\n return True, remaining_players[0]\n\n return False, remaining_players", "def still_deciding(self):\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n return True\n return False", "def getPlayers(self):\n players = []\n for pgp in self.sandboxplayergroupplayer_set.filter(quit=False):\n players.append(pgp.player)\n return players", "def players(self):\n return self._get(\"players\")", "def _update_players(self):\n if not self.game_state:\n raise RuntimeError(\"Cannot call update_players when the game has not started!\")\n for player in self.player_list:\n # Do not update a player that has exited or been expelled from the level\n if not self.game_state.is_character_expelled(player.entity) and not \\\n player.entity in self.game_state.get_completed_characters():\n self._update_player(player)", "def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id", "def listofParticipants():\n dirs1 = os.listdir(conf.participant_dir)\n for user in dirs1:\n direct=participant_dir + user + '/'\n previous={}\n print \"Checking for user %s\" % user\n for y in os.listdir(direct):\n if os.path.isdir(direct+'/'+y) and y[0] !='.':\n previous[y] = subprocess.check_output(['/usr/bin/git',\n 'log','-1',\n '--oneline',y],\n cwd=direct)\n subprocess.call(['/usr/bin/git', 'reset', '--hard', 'HEAD'], cwd=direct)\n subprocess.call(['/usr/bin/git', 'clean', '-d', '-fx', '\"\"'], cwd=direct)\n subprocess.call(['/usr/bin/git', 'pull', '-s', 'recursive', '-X', 'theirs'], cwd=direct)\n\n for y in os.listdir(direct):\n if os.path.isdir(direct+'/'+y) and y[0] !='.':\n after = subprocess.check_output(['/usr/bin/git',\n 'log','-1',\n '--oneline',y],\n cwd=direct)\n if y not in previous or previous[y] != after:\n yield user,y", "def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]", "def check_participants_avaliability(emails, startTime, endTime, curr_schedule_id = None):\n\n unavailable, available_userIDs = [], []\n for email in emails:\n available, userID = Participants.is_available(email, startTime, endTime, curr_schedule_id)\n if available:\n available_userIDs.append(userID)\n else:\n unavailable.append(email)\n print(\"Unavailable: \", unavailable)\n return unavailable, available_userIDs", "def player_exists(self, player):\n res = self._db.Players.aggregate([{'$match': {'Name': player}},\n {'$project':\n {'br': {'$ifNull': ['$br', 0]},\n 'fg': {'$ifNull': ['$fg', 0]}}}\n ])\n return list(res)", "def has_player(self, id):\r\n if id in self.players:\r\n return True\r\n \r\n return False", "def look_for_players(self):\n log.debug(\"Start looking for players [\" + self.team_link + \"]\")\n\n players_found = {}\n table = self.soup.find('table', {\"class\": \"table table-striped table-hover no-footer\"})\n for tr in table.find_all(\"tr\"):\n a = tr.find(\"a\")\n if a:\n # tag a container of the name player found\n player_name = str(a.string).strip()\n link = self.host_url + a[\"href\"]\n players_found[link] = player_name\n\n return players_found", "def checkForOnes(self, playersView: Player):\n # TODO checkForOnes not implemented\n raise NotImplementedError()", "def determine_winners(self, players=None):\n players_and_cards = [(holding.player.id, holding.codes) for holding in self.live_holdings]\n if players:\n player_ids = [p.id for p in players]\n players_and_cards = [d for d in players_and_cards if d[0] in player_ids]\n winners = determine_winners(players_and_cards, self.board.codes)\n return [Player.query.get(winner) for winner in winners]", "def still_playing_session(self):\n return len(self.players) != 0", "def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False", "def _checkRoundOver(self):\n\n if not any(player.isAlive() for player in self.teams[0].players):\n self.endGame()", "def get_player_list():\r\n return list(\r\n pymongo.MongoClient('mongodb://localhost:27017/')['wows']['na_player_list'].find( # !!!!!!!!!!!!!!!!!!!!!!!!!\r\n {'scraped': False}, {'_id': 0, 'player_id': 1, 'player_name': 1, 'clan': 1}\r\n )\r\n )", "def won_game(self):\n for player in self.players:\n if len(player.cards) == 0:\n\n return True\n return False", "def get_contracted_players(self, team):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's contracted players\n doc = self.get_html_document(team, 'contracts')\n\n # returning empty list if no system page could be found\n if doc is None:\n return players\n\n # collecting player names and links to capfriendly pages for different\n # player groups\n cf_links = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/@href\")\n cf_names = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/text()\")\n\n for lnk, name in zip(cf_links, cf_names):\n # retrieving capfriendly id from player page link\n cf_id = lnk.split(\"/\")[-1]\n # trying to find player in database\n plr = Player.find_by_capfriendly_id(cf_id)\n # trying to find player using suggestions\n if plr is None:\n last_name, first_name = name.split(\", \")\n suggested_players = self.get_suggested_players(\n last_name, first_name)\n for suggested_player in suggested_players:\n (\n sugg_plr_id, sugg_pos,\n sugg_last_name, sugg_first_name, _\n ) = (\n suggested_player\n )\n if (last_name, first_name) == (\n sugg_last_name, sugg_first_name):\n plr = Player.find_by_id(sugg_plr_id)\n if plr is None:\n plr = self.create_player(\n sugg_plr_id, last_name, first_name, sugg_pos)\n\n if plr is None:\n print(\"Unable to find player with name %s\" % name)\n else:\n players.append(plr)\n\n return players", "def test_gridironfootballplayers_get(self):\n pass", "async def _players(self, ctx: Context):\n\n guild = ctx.guild\n\n player_role = await self.role_from_config(guild, \"player_id\")\n\n players = [\n user.mention for user in guild.members if player_role in user.roles\n ]\n\n title = _(\"Total Players: {}\").format(len(players))\n txt = \"\\n\".join(players)\n\n embed = discord.Embed(\n colour=player_role.color, title=title, description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"I need embed permissions for this command.\")", "def players(self):\n return Player.objects.filter(team=self)", "async def tod_list(self, ctx, *args):\n message = \"__Currently Playing__\\n\"\n if len(self.players) == 0:\n message = \"There are currently no users playing.\"\n for player in self.players:\n message += f\"> {str(player)[:-5]}\\n\"\n await ctx.send(message)", "def should_keep_running(self):\n return len(self.party.active_users())", "def tell_all_players(message):\n print(\"tell_all_players - disabled\")\n #player_obj_list = leetcoin_client.getPlayerObjList()\n #for player_obj in player_obj_list:\n # #print(\"player_obj key: %s\" player_obj.get_key())\n # print(player_obj.get_userid())\n # \n # playerinfo = playerinfo_from_userid(player_obj.get_userid())\n # \n # i = index_from_playerinfo(playerinfo)\n # m = HintText(index=i, chat=1, message=message)\n # m.send(i)", "def __contains__(self, userid):\r\n userid = int(userid)\r\n return bool(userid in self.players)", "def is_participant(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.participants_group_name).count() > 0:\n return True\n else:\n return False", "async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output", "def check_collisions(game_grid, player_list):\n\n for cycle in player_list:\n cycle.check_collision(game_grid)", "def still_playing_game(self):\n for player in self.players:\n if player.is_playing:\n return True\n return False", "def active_players(self):\n return self.players.join(players_active).all()", "def setup_players(self, players):\n\t\tself.players.clear()\n\t\tids = set([p.get_player_id() for p in players])\n\t\tfor p in self.state.get_players():\n\t\t\tif p not in ids:\n\t\t\t\traise PlayerException(p)\n\t\tfor p in players:\n\t\t\tself.players[p.get_player_id()] = p", "def players(self):\n return self.currents.player", "def getPlayers(self):\n\t\tself.server.playerMutex.lock()\n\t\tplayers = [ (player[0], player[1][3]) for player in self.server.players.items() ]\n\t\tself.server.playerMutex.unlock()\n\t\treturn players", "def numberOfPlayers(self):\r\n return len(self.playerPreparers)", "def getPlayers(self):\n return iter(self.players)", "def getPlayersForGame(self, board):\r\n players = []\r\n for preparer in self.playerPreparers:\r\n player = Player()\r\n preparer.prepare(player, board)\r\n players.append(player)\r\n \r\n return players", "def get_players(self, address):\n room = None\n\n if self.config.get('wc_room_workaround', True):\n room = self.room_manager.find_room_for_client_ip(address[0])\n\n if room is None:\n room = self.room\n\n return room.players", "def get_list_non_match_participants(client, project_id, validation_dataset_id,\n hpo_id):\n\n # get the the hpo specific <hpo_id>_identity_match\n identity_match_table = bq_utils.get_table_id(hpo_id, IDENTITY_MATCH)\n result = []\n fq_identity_match_table = f'{project_id}.{validation_dataset_id}.{identity_match_table}'\n if not exist_identity_match(client, fq_identity_match_table):\n return result\n\n non_match_participants_query = get_non_match_participant_query(\n project_id, validation_dataset_id, identity_match_table)\n\n try:\n LOGGER.info(\n 'Identifying non-match participants in {dataset_id}.{identity_match_table}'\n .format(dataset_id=validation_dataset_id,\n identity_match_table=identity_match_table))\n\n results = bq_utils.query(q=non_match_participants_query)\n\n except (oauth2client.client.HttpAccessTokenRefreshError,\n googleapiclient.errors.HttpError) as exp:\n\n LOGGER.exception('Could not execute the query \\n{query}'.format(\n query=non_match_participants_query))\n raise exp\n\n # wait for job to finish\n query_job_id = results['jobReference']['jobId']\n incomplete_jobs = bq_utils.wait_on_jobs([query_job_id])\n if incomplete_jobs:\n raise bq_utils.BigQueryJobWaitError(incomplete_jobs)\n\n # return the person_ids only\n result = [row[PERSON_ID_FIELD] for row in bq_utils.response2rows(results)]\n return result", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def load_fixture_player_stats(self):\n stats_list = []\n\n print(\"Getting fixture players..\")\n with Pool(self.pool) as p:\n fixture_info = list(tqdm(p.imap(self.fixture_info_singel, self.fixture_ids, chunksize=1), total=len(self.fixture_ids)))\n print('Getting data from workers..')\n i = 0\n for info in fixture_info:\n stats = {}\n if info:\n stats = {info['id']: []}\n if 'teamLists' in info:\n team_list = info['teamLists']\n for lineups in team_list:\n if lineups:\n team_id = lineups['teamId']\n lineup = lineups['lineup']\n substitutes = lineups['substitutes']\n for l in lineup:\n stats[info['id']].append(l['id'])\n for s in substitutes:\n stats[info['id']].append(s['id'])\n else:\n i += 1\n if stats:\n stats_list.append(stats)\n print('Completed')\n if i >0:\n print(f'{i} games retreived had no stats')\n return stats_list", "def initialize_players():\n while True:\n nb_of_players = input(\"\\nEntrez le nombre de joueurs : \")\n if not nb_of_players.isdigit():\n print(\"You have to enter a number!\")\n else:\n nb_of_players = int(nb_of_players)\n if nb_of_players < 2:\n print(\"You have to enter at least two!\")\n else:\n break\n nb_of_players = int(nb_of_players)\n list_of_players = [] #This list is going to be returned\n names_secure = [] #stores player's names in lower mode for security\n for index in range(1, nb_of_players+1):\n while True:\n player_name = input(\"Entrer le nom du joueur {} \".format(index))\n if (player_name.lower() == 'end' or player_name.lower() in names_secure):\n print(\"Incorrect Name\")\n else:\n names_secure.append(player_name.lower())\n new_player = Player(player_name)\n list_of_players.append(new_player)\n break\n return list_of_players", "def nflplayers(self, irc, msg, args, optplayer):\n \n db_filename = self.registryValue('nflPlayersDb')\n \n if not os.path.exists(db_filename):\n self.log.error(\"ERROR: I could not find: %s\" % db_filename)\n return\n \n db = sqlite3.connect(db_filename)\n cursor = db.cursor()\n \n optplayer = optplayer.lower().strip()\n\n #cursor.execute(\"select id from players where name='?'\", ([optplayer]))\n \n query = \"select id, name from players WHERE name LIKE '%%%s%%'\" % optplayer\n cursor.execute(query)\n \n rows = cursor.fetchall()\n \n if len(rows) < 1:\n irc.reply(\"I did not find anything matching: %s\" % optplayer)\n return\n else:\n results = string.join([str(item[1]) + \" (\" + str(item[0]) + \")\" for item in rows], \" | \")\n output = \"I found {0} results for: {1} :: {2}\".format(len(rows), optplayer, results)\n irc.reply(output)", "def _wake_players(self):\n self.__current_player = None\n players = self.get_players(self.get_state_info(\"wake_all\"))\n self.__player_wakeup_iter = iter(players.copy())\n self.player_done(None)", "def test_list_respects_invite_only_bit(self) -> None:\n\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n\n self.common_subscribe_to_streams(user, [\"Saxony\"], invite_only=True)\n self.common_subscribe_to_streams(user, [\"Normandy\"], invite_only=False)\n result = self.api_get(user, \"/api/v1/users/me/subscriptions\")\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscriptions\", response_dict)\n for sub in response_dict[\"subscriptions\"]:\n if sub[\"name\"] == \"Normandy\":\n self.assertEqual(\n sub[\"invite_only\"], False, \"Normandy was mistakenly marked private\"\n )\n if sub[\"name\"] == \"Saxony\":\n self.assertEqual(sub[\"invite_only\"], True, \"Saxony was not properly marked private\")", "def RegisterPlayers(self):\n self.tcf.RegisterPlayers(self.player_handler.GetPlayers())", "def is_on_waiting_list(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.event.waiting_list:\n return True\n return False", "def list_games(self, user_id: UUID) -> Iterable[UUID]:\n return (game_id for game_id, game in self.games.items() if user_id in game.players)", "def test_private_rooms_do_not_have_profiles_collected(self) -> None:\n room_id = self.helper.create_room_as(\n self.alice, is_public=False, tok=self.alice_tok\n )\n self.get_success(\n event_injection.inject_member_event(\n self.hs,\n room_id,\n \"@bruce:remote\",\n \"join\",\n \"@bruce:remote\",\n extra_content={\n \"displayname\": \"super-duper bruce\",\n \"avatar_url\": \"mxc://remote/456\",\n },\n )\n )\n # Sending this event makes the streams move forward after the injection...\n self.helper.send(room_id, \"Test\", tok=self.alice_tok)\n self.pump(0.1)\n\n profiles = self.get_success(\n self.user_dir_helper.get_profiles_in_user_directory()\n )\n self.assertNotIn(\"@bruce:remote\", profiles)", "def get_players(self, hero, data, verbose):\n\n if len(self.players) > 1:\n out = f\"\\n\\nplayers:\"\n for name, player in data[\"players\"].items():\n if name != hero:\n out += \"\\n \" + name\n if verbose:\n out += Game._verbose_print(player)\n else:\n out = f\"\\n\\nThere's nobody else here.\"\n\n return out", "def has_member(self, player):\n return player in self.members", "def is_participant(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.event.participants:\n return True\n return False", "def skip(self, player):\n if player not in self.players_skipped:\n self.players_skipped.append(player)\n self.skips += 1\n self.chat.update_chat(f\"Player has voted to skip ({self.skips}/{len(self.game.players) -2})\")\n if self.skips >= len(self.game.players) - 2:\n return True\n\n return False", "def _get_connected_player_list(self):\r\n if not zpgapi.is_zgp_api_enabled():\r\n # API is not configured, skip this.\r\n return []\r\n\r\n cache_key = 'api_connected_players'\r\n cache_val = cache.get(cache_key)\r\n\r\n if cache_val != None:\r\n return cache_val\r\n\r\n api = zpgapi.get_zpg_api_iface()\r\n try:\r\n api_response = api.cmd_list_connected()\r\n cache_val = api_response['player_list']\r\n except urllib2.URLError:\r\n # Error with zombiepygman.\r\n # This will get cached, but that's OK. It will prevent request\r\n # pileup on the gunicorn workers.\r\n cache_val = []\r\n\r\n cache.set(cache_key, cache_val, 60)\r\n return cache_val", "def participants(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/participants'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def get_player_list(tournament):\n database = TinyDB('db.json')\n players_table = database.table('players')\n # retrieving the list of identifiers of players following a tournament\n id_list = tournament['Liste indice Joueurs']\n player_list = []\n for player_id in id_list:\n # getting the players\n player = players_table.get(doc_id=player_id)\n player_list.append(player)\n return player_list", "async def skip(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if player.current.requester not in map(lambda c: c.id, player.connected_channel.members):\n await ctx.send(\"Skipped.\")\n await player.skip()\n else:\n if player.current.requester == ctx.author.id:\n await ctx.send(\"Skipped.\")\n await player.skip()\n else:\n minpeople = math.ceil(len(set(filter(lambda x: not x.bot, player.connected_channel.members)))/2)\n votes = player.fetch(\"votes\") if player.fetch(\"votes\") else 0\n player.store(\"votes\", votes + 1)\n if votes + 1 >= minpeople:\n await ctx.send(\"Skipped.\")\n await player.skip()\n else:\n await ctx.send(\"Skip? {}/{}\".format(votes + 1, minpeople))", "def check_players_collision(self):\n # Check if bullet hit the player 1\n bullet_hit_p = len(pygame.sprite.spritecollide(self.player1, self.player2_bullet, True))\n bullet_hit_m = len(pygame.sprite.spritecollide(self.player1, self.mob_bullet, True))\n if bullet_hit_p and not self.player1.isInvincible():\n self.p2_score += 500\n if bullet_hit_p + bullet_hit_m > 0 and not self.player1.is_destroyed():\n self.player1.destroy()\n self.explosions.add(Explosion(self.fps // 4, self.player1.get_x(), self.player1.get_y(), self.screen_width,\n self.screen_height, 0, self.debug))\n\n # Check if Player 2 bullet hit the player 1\n bullet_hit_p = len(pygame.sprite.spritecollide(self.player2, self.player1_bullet, True))\n bullet_hit_m = len(pygame.sprite.spritecollide(self.player2, self.mob_bullet, True))\n if bullet_hit_p and not self.player2.isInvincible():\n self.p1_score += 500\n if bullet_hit_p + bullet_hit_m > 0 and not self.player2.is_destroyed():\n self.player2.destroy()\n self.explosions.add(Explosion(self.fps // 4, self.player2.get_x(), self.player2.get_y(), self.screen_width,\n self.screen_height, 0, self.debug))", "def participants(self):\r\n return Participants(self)", "def collect_players_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for party in added:\n Notify(context, party)", "def create_players_list(self):\n for p in self.players_names:\n self._players_list.append(Player(p))", "def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n Notify(context)", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n Notify(context)", "def get_names_users(self):\n user_1 = self.view.entry_player_1.get()\n user_2 = self.view.entry_player_2.get()\n if len(user_1) == 0 or len(user_2) == 0:\n\n tk.messagebox.showwarning(\"Warning\", \"Please enter players name\")\n self.logger.warning(\"Please enter players name\")\n return False\n self.update_players_name(user_1, user_2)\n return True", "def participants(self):\n return Participants(self)", "def find_users_missing_standup():\n token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_SLACK_TOKEN))['Plaintext']\n sc = SlackClient(token)\n channels = sc.api_call('channels.list')['channels']\n standup = (i for i in channels if i['name'] == SLACK_CHANNEL).next()\n members = standup['members']\n messages = sc.api_call('channels.history', channel=standup['id'])['messages']\n messages_within_last_10_hours = filter(check_in_date_range, messages) \n users_posted = (i['user'] for i in messages_within_last_10_hours if\n 'user' in i.keys())\n difference = set(members).difference(users_posted)\n return difference", "def get_players(self):\n\n # Append the current player to the list and return it\n players_list = list(self._players.queue)\n players_list.append(self._current_player)\n return players_list", "def load_member_ids(self):\n sql = \"\"\"\n INSERT INTO {schema}.participant_match\n (id, member_id, first_name, last_name, nickname,\n email, birth_date, is_birth_date_estimated)\n SELECT uuid_generate_v4(), id as member_id, first_name, last_name,\n nickname, email, birth_date, false\n FROM {schema}.members\n WHERE id NOT IN (SELECT member_id FROM {schema}.participant_match)\n \"\"\".format(schema=self.database.schema)\n self.database.run_query(sql)", "def test_get_player_upcoming_chests(self):\n pass", "def relist_expired_players(self, p_ids_and_prices):\n # TODO make this user configurable, like when I had Ronaldo on my transfer list\n players_to_list = True\n try:\n while players_to_list:\n # Check if topmost player exists that should be listed\n topmost_player_location_unlisted = \"/html/body/main/section/section/div[2]/div/div/div/section[2]/ul/li[1]/div\"\n p_exists = self.check_exists_by_xpath(\n topmost_player_location_unlisted)\n if (p_exists == False):\n players_to_list = False\n else:\n # Click topmost player\n self.sleep_approx(2)\n self.clickButton(topmost_player_location_unlisted)\n self.sleep_approx(1)\n\n # Click list for transfer\n listfortransfer_location = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[1]/button\"\n self.clickButton(listfortransfer_location)\n self.sleep_approx(1)\n\n # Get player sell price\n playerrating = int(self.getText(\n \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[7]/div[2]/div[1]\"))\n playercardname = self.getText(\n \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[1]/div/div[2]/div/div/div[1]/div/div[4]\")\n playerid = self.getPlayerID(playercardname, playerrating)\n sellprice = p_ids_and_prices[playerid]\n\n startprice_loc = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[2]/div[2]/input\"\n buynow_loc = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/input\"\n listplayer_loc = \"/html/body/main/section/section/div[2]/div/div/section/div/div/div[2]/div[2]/div[2]/button\"\n\n # Make sure text boxes are visible\n self.scrollIntoView(listplayer_loc)\n self.send_keys_and_sleep(buynow_loc, sellprice)\n self.send_keys_and_sleep(startprice_loc, sellprice-100)\n\n # Final step - list player on market\n self.clickButton(listplayer_loc)\n except:\n log_event(self.queue, \" error 204, should be ok tho\")", "def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)" ]
[ "0.6719684", "0.63900346", "0.6368742", "0.62854147", "0.6258194", "0.6222326", "0.6147451", "0.61474484", "0.6127953", "0.60543185", "0.598346", "0.59729105", "0.5960582", "0.5942845", "0.594048", "0.58446205", "0.5834904", "0.5820176", "0.5808948", "0.58085364", "0.5796258", "0.57725227", "0.57408744", "0.57369727", "0.57231367", "0.5709662", "0.56945777", "0.56907773", "0.56791586", "0.5677895", "0.56653017", "0.5639636", "0.5624959", "0.5617049", "0.5599757", "0.55986005", "0.5587752", "0.5585157", "0.5564278", "0.555871", "0.55511004", "0.5544612", "0.54911625", "0.54889715", "0.5487424", "0.5485945", "0.54851717", "0.54811054", "0.5480532", "0.54789394", "0.5477146", "0.54768133", "0.54767054", "0.5463002", "0.546021", "0.5453415", "0.5452074", "0.5448536", "0.5447634", "0.5447499", "0.5446554", "0.540297", "0.5392622", "0.53828245", "0.53810155", "0.53719306", "0.53706026", "0.5353701", "0.534693", "0.5343866", "0.5341681", "0.5341582", "0.5339297", "0.5335436", "0.53353584", "0.5333909", "0.53327256", "0.53319985", "0.53314346", "0.5329395", "0.53203905", "0.5317571", "0.5316051", "0.5308743", "0.5306102", "0.52871627", "0.5285416", "0.52848595", "0.5276", "0.5268224", "0.5266583", "0.5266583", "0.5263435", "0.52537894", "0.5249659", "0.52495974", "0.5237228", "0.5236309", "0.52234423", "0.5221516" ]
0.6249625
5
Gets our permissions on the server.
def permissions(self) -> discord.Permissions: return self.channel.permissions_for(self.guild.me)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_permissions(self):\n return self.settings[\"permissions\"]", "def octopus_permissions_get(self, msg, args):\r\n return self.permissions.get_permissions()", "def permissions(self):\n return self.get_permissions()", "def get_permissions():\n return config.get_cfg_storage(ID_PERMISSION)", "def permissions(self) -> 'outputs.PermissionsResponse':\n return pulumi.get(self, \"permissions\")", "def permissions(self):\n return self._permissions", "def get_permissions(self):\n if not hasattr(self, '_permissions'):\n self._permissions = self.permissions.all()\n return self._permissions", "async def fetch_permissions(self, condensed=False):\n\n logging.debug(\"Getting permissions (%scondensed)\" % (\n \"\" if condensed else \"not \"))\n\n if condensed:\n perms = await self.client.request.get(\n \"/auth/permissions\", params={\"condensed\": True})\n return perms[\"data\"]\n else:\n perms = await self.client.request.get(\"/auth/permissions\")\n return [BasePermission.build_permission(\n self.client, perm, self.loop) for perm in perms[\"data\"]]", "def get_permissions(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetPermissions', self.handle)", "def permissions(self):\n return self.proto.details.appDetails.permission", "def permissions(self):\n return None", "def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n elif self.action in ['retrieve']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [AllowAny]\n return [p() for p in permissions]", "def get_all_permissions(self):\n\t\turl = f'{self.root.url}/api/v1/sessions/permissions'\n\t\treturn self.root.r('GET', url, body=None, headers=None, verify=self.root.verify)", "def get_permissions(self):\n if self.action in ['signup', 'login', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['retrieve', 'update', 'partial_update', 'destroy', 'u', 'p']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]", "def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n return [permission() for permission in permissions]", "def get_permissions(self):\n \n if self.action in ['signup', 'login', 'verify']:\n permissions =[AllowAny]\n # cualquiera que vaya a acceder a estas peticiones lo podra hacer\n # si la accion es de tipo retrieve se debe validar el permiso de acceso\n elif self.action in ['retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n # si no hay ninguna opcion debe tener una sesion autenticada \n return [p() for p in permissions]", "def get_permissions(self):\n if self.action in ['create', 'retrieve', 'react', 'reactions']:\n permissions = [IsAuthenticated, IsFriendPostOwner]\n elif self.action in ['update', 'partial_update']:\n permissions = [IsAuthenticated, IsCommentOwner]\n elif self.action in ['destroy']:\n permissions = [IsAuthenticated, IsCommentOrPostOwner]\n else:\n permissions = [IsAuthenticated]\n return[p() for p in permissions]", "def get_permissions(self):\n permissions = [IsAuthenticated]\n return [permission() for permission in permissions]", "def permissions(self):\n return list(self._permissions)", "def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]", "def get_permissions(self):\n if self.action in ['list', 'retrieve']:\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]", "def permissions(self):\n return [DSSWorkspacePermissionItem(permission) for permission in self.settings['permissions']]", "def get_permissions(self):\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action =='retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]", "def get_permissions(self, principal_id):", "def get_permissions(self):\n if self.action in [\"update\", \"partial_update\", \"destroy\"]:\n permission_classes = [IsAdminOrOwner]\n else:\n permission_classes = [IsAuthenticated]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n # Condition to check the action level and set desired permission_class\n if self.action == 'create':\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAuthenticated]\n \n # Finally return the all the permissions\n return [permission() for permission in permission_classes]", "def permissions(self) -> str:\n return pulumi.get(self, \"permissions\")", "def get_permissions(self):\n if self.action == 'list':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]", "def permissions():\n pass", "def list_permissions(self):\n # type: () -> List[Permission]\n headers = Headers({\"accept\": \"application/json\"})\n return self.connection.api_call(\n \"GET\", [\"resources\", self.id, \"permissions\"], model=Permission, headers=headers,\n )", "def get_permissions(self):\n if self.action == 'destroy' or self.action == 'partial_update':\n permission_classes = [\n permissions.IsOwner,\n IsAuthenticated,\n ]\n else:\n permission_classes = [\n permissions.IsAdminOrReadOnly,\n IsAuthenticated,\n ]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n if self.action in ['list', 'create']:\n permission_classes = [IsStaffOrReadOnly]\n else:\n permission_classes = [IsAuthorOrReadOnly, IsStaffOrReadOnly]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n method = self.request.method.lower()\n\n try:\n use_permission_classes = getattr(self, '%s_permission_classes' % method)\n except AttributeError:\n use_permission_classes = self.permission_classes\n\n return [permission() for permission in use_permission_classes]", "async def get_permissions(self, requester: Requester, model: Model):\n raise NotImplementedError", "def get_permissions(self):\n\n permissions = [\n IsAuthenticated(),\n IsCircleActiveMember(),\n ]\n\n if self.action in ['update', 'partial_update', 'finish']:\n permissions.append(\n IsRideOwner()\n )\n\n if self.action in ['join', 'qualify']:\n permissions.append(\n IsNotRideOwner()\n )\n\n return permissions", "def get_permissions(self):\n if self.action == 'list':\n permission_classes = [IsAuthenticatedOrReadOnly]\n if self.action == 'create':\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAdminUser | IsAuthenticated| IsAdminOrIsSelf]\n return [permission() for permission in permission_classes]", "def permissions(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"permissions\")", "def get_permissions(self):\n if self.action in ['retrieve', 'list']:\n self.permission_classes = [permissions.ViewUserPermission,]\n elif self.action in ['update', 'partial_update']:\n self.permission_classes = [permissions.UpdateUserPermission]\n elif self.action in ['destroy']:\n self.permission_classes = [permissions.UpdateUserPermission]\n\n return [permission() for permission in self.permission_classes]", "def permissions(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"permissions\")", "def getPermissions(self, scope):\n\n return [permissions.api_enum_for_permission(p)\n for p in permissions.get_permissions(scope)]", "def get_all_permissions(self, obj=None):", "def permissions(self) -> pulumi.Output[Optional[Sequence['outputs.DataSetResourcePermission']]]:\n return pulumi.get(self, \"permissions\")", "def get_permissions(self):\n if self.action == 'list':\n permission_classes = [AdminPermission.__or__(ReviewerPermission)]\n elif self.action == 'retrieve':\n permission_classes = [\n AdminPermission.__or__(\n ReviewerPermission.__or__(UserPermission)\n )\n ]\n elif self.action in ['update', 'partial_update']:\n permission_classes = [AdminPermission.__or__(UserPermission)]\n else:\n permission_classes = [AdminPermission]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n if self.action == 'list':\n permission_classes = [AdminPermission.__or__(ReviewerPermission)]\n elif self.action == 'retrieve':\n permission_classes = [\n AdminPermission.__or__(\n ReviewerPermission.__or__(UserPermission)\n )\n ]\n elif self.action in ['update', 'partial_update']:\n permission_classes = [AdminPermission.__or__(UserPermission)]\n else:\n permission_classes = [AdminPermission]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n if self.action == \"create\" or self.action == \"token\":\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAuthenticated]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n if self.action == \"create\" or self.action == \"token\":\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAuthenticated] \n return [permission() for permission in permission_classes]", "def get_permissions(self):\n if self.action == \"create\" or self.action == \"token\":\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAuthenticated] \n return [permission() for permission in permission_classes]", "def get_permissions(self):\n if self.action == \"create\" or self.action == \"token\":\n permission_classes = [AllowAny]\n else:\n permission_classes = [IsAuthenticated] \n return [permission() for permission in permission_classes]", "def get_permissions(self):\n return [permission() for permission in self.permission_classes]", "def get_permissions(self):\n if self.action in []:\n permission_classes = [permissions.AllowAny]\n else:\n permission_classes = [permissions.IsAuthenticated]\n return [permission() for permission in permission_classes]", "def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataSetResourcePermissionArgs']]]]:\n return pulumi.get(self, \"permissions\")", "def get_permissions(self):\n if self.action in [\"list\"]:\n permission_classes = [permissions.UserOrPlaylistIsAuthenticated]\n elif self.action in [\"create\", \"set_display_name\", \"push_attendance\"]:\n permission_classes = [\n permissions.PlaylistIsAuthenticated\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.BaseIsParamsVideoRoleThroughPlaylist\n ]\n elif self.action in [\n \"partial_update\",\n \"retrieve\",\n ]:\n permission_classes = [\n permissions.IsTokenPlaylistRouteObjectRelatedVideo\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.BaseIsParamsVideoRoleThroughPlaylist\n ]\n elif self.action in [\"list_attendances\"]:\n permission_classes = [\n permissions.IsTokenInstructor\n | permissions.IsTokenAdmin\n # With standalone site, admin can access\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.IsParamsVideoAdminOrInstructorThroughPlaylist\n ]\n elif self.action is None:\n if self.request.method not in self.allowed_methods:\n raise MethodNotAllowed(self.request.method)\n permission_classes = self.permission_classes\n else:\n # When here it means we forgot to define a permission for a new action\n # We enforce the permission definition in this method to have a clearer view\n raise NotImplementedError(f\"Action '{self.action}' is not implemented.\")\n return [permission() for permission in permission_classes]", "def RequestedPermissions(self) -> _n_6_t_0:", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def get_permissions(self):\n if self.action == 'update' and self.action == 'delete':\n permission_classes = [IsBlackListedToken, IsValidGroupUser]\n else:\n permission_classes = [IsBlackListedToken, ]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n if self.action == 'update' and self.action == 'delete':\n permission_classes = [IsBlackListedToken, IsValidGroupUser]\n else:\n permission_classes = [IsBlackListedToken, ]\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n permission_classes = {\"create\": [CanUploadReport]}.get(self.action)\n return [permission() for permission in permission_classes]", "def permissions(self) -> Optional[pulumi.Input['KeyVaultSpecAccessPoliciesPermissionsArgs']]:\n return pulumi.get(self, \"permissions\")", "def permission_resources(self):\n return self._permission_resources", "def permission_resources(self):\n return self._permission_resources", "def permissions(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"permissions\")", "def permissions(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"permissions\")", "def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"permissions\")", "def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"permissions\")", "def retrieve_permissions(service, file_id):\n try:\n permissions = service.permissions().list(fileId=file_id).execute()\n return permissions.get('items', [])\n except errors.HttpError, error:\n print 'An error occurred: %s' % error\n return None", "def permissions(self):\n return SegmentPermissions(self.segment_t)", "def get_permissions(self, user_id):\n response = self.request(\n \"{0}/{1}/permissions\".format(self.version, user_id), {}\n )[\"data\"]\n return {x[\"permission\"] for x in response if x[\"status\"] == \"granted\"}", "def get_cli_permissions():\n query = {\"type\": \"op\", \"cmd\": \"<show><cli><permissions></permissions></cli></show>\"}\n\n return __proxy__[\"panos.call\"](query)", "def test_get_permissions(self):\n pass", "def get_permissions(self):\n return {key: value.permissions for key, value in self}", "def get_permissions(self):\n if self.action == \"destroy\":\n permission_classes = [IsAuthenticated, IsAuthor]\n elif self.action in [\"list\", \"create\"]:\n permission_classes = [IsAuthenticated, IsContributorOrAuthor]\n else:\n permission_classes = [NotAllowed]\n\n return [permission() for permission in permission_classes]", "def get_permissions(self):\n view = getattr(self, self.action)\n if hasattr(view, 'permission_classes'):\n return [permission_class() for permission_class in view.permission_classes]\n return super().get_permissions()", "def get_group_permissions(self):\n if not hasattr(self, '_group_permissions'):\n self._group_permissions = Permission.objects.filter(group__in=self.groups.all())\n return self._group_permissions", "def permissions(self):\n return int(self.options.get_str('octal_permissions'), 8)", "def permission_list(**kwargs):\n print(AppPermissionSchema(many=True).dumps(\n get_protected_routes(ignored_methods=[\"HEAD\", \"OPTIONS\"]), indent=4))", "def has_permissions(self):\n perms = set(r.name for r in Permission.query.join(Permission.groups, Group.users).filter(User.id == self.id).all())\n return perms", "def permissions(server_object, client, address, command_args):\n\n\tusr = command_args[1]\n\n\t#: Get the permission level of the user\n\tpermission_level = server_object.permissions[server_object.get_ip(usr)].permission\n\n\t#: Send that permission level to the caller.\n\tclient.send(permission_level.encode())", "def get_all_permissions(self) -> set[tuple[str, str]]:\n return set(\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n )\n )", "def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]", "def get_permissions(self):\n try:\n # return permission_classes depending on `action`\n return [permission() for permission in self.permission_action\n [self.action]]\n except KeyError:\n # action is not set return default permission_classes\n return [permission() for permission in self.permission_classes]", "async def permissions(self, ctx):\n if len(ctx.message.mentions) == 0:\n for perm in ctx.message.author.server_permissions:\n print(perm)\n else:\n users = ctx.message.mentions\n message = discord.Embed(title='Permissions',type='rich', colour=discord.Color(0xffb6c1))\n for user in users:\n t_perm, f_perm = '', ''\n for perm, value in user.server_permissions:\n if value:\n t_perm += perm + '\\n'\n else:\n f_perm += perm + '\\n'\n perms = \"_**Allowed**_\\n\" +t_perm + '------\\n' + \"_**Not allowed**_\\n\" + f_perm \n message.add_field(name=user, value='{}'.format(perms))\n await self.bot.say(embed=message)", "def get_permissions(self) -> List[Type[BasePermission]]:\n return [UserCanTriageRequest]", "def get_permissions(self) -> List[Type[BasePermission]]:\n return [UserCanTriageRequest]", "def get_permissions(self) -> List[Type[BasePermission]]:\n return [UserCanTriageRequest]", "def get_permissions(self) -> List[Type[BasePermission]]:\n return [UserCanTriageRequest]", "def get_all_permissions(self, obj=None):\n return self.get_group_permissions(obj)", "def PermissionSet(self) -> _n_6_t_0:", "def resource_permissions(self, resource_type, params, username, group):\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query permitted resources\n permissions = self.resource_permission_handler.permissions(\n resource_type, params, username, group, session\n )\n\n # close session\n session.close()\n\n return {\n 'permissions': permissions\n }", "async def _p_list(self, ctx):\n result = self.database.get_perm_rules(ctx.guild.id)\n if len(result) == 0:\n await ctx.send(\"No permissions set for this guild.\")\n return\n guild_perms = {}\n for perm in result:\n if guild_perms.get(perm.command, None) is None:\n guild_perms[perm.command] = {}\n if guild_perms.get(perm.command).get(perm.perm_type, None) is None:\n guild_perms[perm.command][perm.perm_type] = []\n guild_perms[perm.command][perm.perm_type].append([perm.target, perm.priority, perm.allow])\n\n out = \"```\"\n for command in guild_perms:\n out += f\"Command: {command}\\n\"\n for level in sorted(guild_perms[command], key=lambda a: self.LEVELS[a]):\n out += f\" Level: {level}\\n\"\n if level == \"guild\":\n out += f\" {guild_perms[command][level]}\\n\"\n else:\n for detail in guild_perms[command][level]:\n out += f\" {detail[1]}-{detail[0]}: {bool(detail[2])}\\n\"\n out += \"```\"\n await ctx.send(out)", "def getAllPerms(self,request):\n request.needAuthType(request.ADMIN)\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n all_perms_dic=perm_loader.getLoader().getAllPerms()\n if request.has_key(\"category\"):\n category=request[\"category\"]\n else:\n category=\"all\"\n all_perms_list=self.__getPermsListFromPerms(all_perms_dic,category)\n sorted=SortedList(all_perms_list)\n sorted.sortByPostText('[\"name\"]',0)\n return sorted.getList()", "def get_required_permissions(self, method, view):\n perms_map = self.perms_map.copy()\n\n if hasattr(view, \"perms_map\"):\n perms_map.update(view.perms_map)\n\n if method not in perms_map:\n return []\n\n return perms_map[method]", "def QueryTestablePermissions(self, request, global_params=None):\n config = self.GetMethodConfig('QueryTestablePermissions')\n return self._RunMethod(\n config, request, global_params=global_params)", "async def get_manipulation_permissions(self, requester: Requester,\n model: Model) -> Tuple[\n ManipulationPermissions, Dict[str, Any]]:\n raise NotImplementedError", "def get_access_list(self):\n return self.manager.get_access_list(self)", "def getPermsOfAdmin(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\")\n admin_perms=admin_main.getLoader().getAdminByName(request[\"admin_username\"]).getPerms()\n perms_list=self.__getPermsListFromAdminPerms(admin_perms)\n sorted=SortedList(perms_list)\n sorted.sortByPostText('[\"name\"]',0)\n return sorted.getList()", "def list_permissions(self, catalog_id: str) -> List[Dict[str, Any]]:\n return self.grants[catalog_id]", "def getPermissionsForUser(self, scope, extra_params, perm_filter):\n\n if perm_filter is None or not any(perm_filter.__dict__.values()):\n # If no filtering is needed, this function behaves identically\n # to getPermissions().\n return self.getPermissions(scope)\n\n with DBSession(self.__config_db) as session:\n # The database connection must always be passed to the permission\n # handler.\n params = ThriftAuthHandler.__unpack_extra_params(extra_params,\n session)\n\n perms = []\n for perm in permissions.get_permissions(scope):\n should_return = True\n handler = make_handler(perm, params)\n\n if should_return and perm_filter.given:\n should_return = handler.has_permission(self.__auth_session)\n\n if should_return and perm_filter.canManage:\n # If the user has any of the permissions that are\n # authorised to manage the currently iterated permission,\n # the filter passes.\n should_return = require_manager(\n perm, params, self.__auth_session)\n\n if should_return:\n perms.append(perm)\n\n return [permissions.api_enum_for_permission(p)\n for p in perms]", "def _get_permission(self, obj_type, path, username):\n if obj_type == Collection:\n# XXX - in iRODS < 4.2, CollectionUser.name isn't supported.\n# query = self.session.query(Collection, CollectionAccess).filter(\n# CollectionUser.name == username, Collection.name == path)\n# result = [self.perm_str_mapping[row[CollectionAccess.name]] for row in query\n query = self.session.query(User.id).filter(User.name == username)\n for row in query:\n id = row[User.id]\n query = self.session.query(Collection, CollectionAccess).filter(\n CollectionAccess.user_id == id, Collection.name == path)\n result = [self.perm_str_mapping[row[CollectionAccess.name]] for row in query]\n### XXX - ^^^\n return result\n if obj_type == DataObject:\n conditions = [\n Collection.name == dirname(path),\n DataObject.name == basename(path),\n User.name == username\n ]\n query = self.session.query(DataObject.name, DataAccess.name) \\\n .filter(*conditions).all()\n result = [self.perm_str_mapping[row[DataAccess.name]] for row in query]\n return result\n self._fail(\"Unsupported Object type\")\n return None", "def getpermission(self, context=None, componentid=None, app=None):\n return jsoncall.do_call(\"getpermission\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context': context,\\\n 'componentid': componentid,\\\n 'app': app},\n self.connection)" ]
[ "0.7986771", "0.794256", "0.7887418", "0.78345644", "0.77169865", "0.76053184", "0.7577209", "0.75250983", "0.74911094", "0.7277559", "0.72693956", "0.72676855", "0.723835", "0.71908754", "0.7177519", "0.7157548", "0.7146727", "0.71430004", "0.7129717", "0.70936584", "0.7071286", "0.706043", "0.70268464", "0.6990812", "0.6970579", "0.6968402", "0.6952973", "0.69511414", "0.69497454", "0.69436383", "0.6938639", "0.69341975", "0.68840194", "0.68751705", "0.68534213", "0.6846339", "0.6842796", "0.6839459", "0.682163", "0.6815556", "0.6806116", "0.6799485", "0.67981905", "0.67981905", "0.67404485", "0.6710019", "0.6710019", "0.6710019", "0.66981035", "0.66867304", "0.6684178", "0.6682525", "0.6655024", "0.6644297", "0.6644297", "0.66133004", "0.66133004", "0.6609999", "0.6577344", "0.65654963", "0.65654963", "0.65147525", "0.65147525", "0.65110576", "0.65110576", "0.65090895", "0.6503565", "0.6503536", "0.646784", "0.6461788", "0.6456595", "0.6440999", "0.6402085", "0.6385934", "0.6347927", "0.63423574", "0.63362426", "0.6335927", "0.63040704", "0.6287558", "0.6287558", "0.627778", "0.62653476", "0.62653476", "0.62653476", "0.62653476", "0.62454695", "0.6231751", "0.62224346", "0.62157047", "0.6214024", "0.6191481", "0.6155192", "0.6140428", "0.6134987", "0.61015713", "0.60974157", "0.609249", "0.6016433", "0.60105646" ]
0.7516241
8
Gets the user mention string. If the user isn't found, just return the username.
def mention_user(self, username: str) -> str: member = self.get_user(username) if member: return member.mention return username
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_user_mention(self, name: str) -> str:\n try:\n return self.known_user_ids[name]\n except KeyError:\n pass\n\n mention = name\n users = get_value_from_redis('slack-members') or []\n user = self._get_user(name, users)\n if user:\n mention = f'<@{user[\"id\"]}>'\n\n self.known_user_ids[name] = mention\n\n return mention", "def mention_user(user: tg.types.User) -> str:\n\n if user.username:\n # Use username mention if possible\n name = f\"@{user.username}\"\n else:\n # Use the first and last name otherwise\n name = tg.utils.get_display_name(user)\n if not name:\n # Deleted accounts have no name; behave like the official clients\n name = \"Deleted Account\"\n\n return f\"[{name}](tg://user?id={user.id})\"", "def mention(self) -> str:", "def mention(self) -> str:", "def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def get_username(self, auth_token, user_id=None):\n self.headers['Authorization'] = f'Bearer {auth_token}'\n user = self.http_client.get(\n f'{self.api_endpoint}/users/@me', self.headers)\n return f'{user[\"username\"]}#{user[\"discriminator\"]}'", "def get_username(self, request):\r\n try:\r\n return request.user.username\r\n except AttributeError:\r\n return ''", "def get_user_name_safe(self, uid, *, channel_model: Optional[ChannelModel] = None) -> Optional[str]:\n prof = self.get_profile(uid, channel_model=channel_model)\n\n if prof:\n return prof.display_name\n\n return None", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def username(self) -> str:", "def username(self) -> str:", "def get_username(self, tg_user_id):\n\n data = {\n 'user_id': tg_user_id\n }\n result = self._send_data('getUser', data)\n if result.update:\n return result.update.get('username','')", "def mention(self) -> str:\n raise NotImplementedError", "def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()", "async def get_user_name(self, user_target: str) -> str:\n user = await self.get_user(user_target=user_target)\n if user is None:\n return user_target\n return user.display_name", "def get_username():\n\n if session.get(\"user_id\") is None:\n username = \"\"\n else:\n user_id = session.get(\"user_id\")\n user = User.query.filter(User.id==user_id).first()\n username = user.username\n\n return username", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def get_username(self):\n return self.browser.find_element(*locators.USER_NAME_TEXT).text", "def usernameFind(self):\r\n return self.username()", "async def get_user(event):\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n replied_user = await event.client(GetFullUserRequest(previous_message.from_id))\n else:\n user = event.pattern_match.group(1)\n if user.isnumeric():\n user = int(user)\n\n if not user:\n self_user = await event.client.get_me()\n user = self_user.id\n\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n replied_user = await event.client(GetFullUserRequest(user_id))\n return replied_user\n try:\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n\n except (TypeError, ValueError):\n await event.edit(\"`I don't slap aliens, they ugly AF !!`\")\n return None\n\n return replied_user", "def get_username(self, uid: str) -> str:\n\n cursor = self._db_connection.cursor()\n\n # Get username associated with the UID if this UID is in the database\n cursor.execute('''SELECT username FROM users WHERE uid = ?;''', (uid,))\n username = cursor.fetchone()\n\n # If the user can't be found\n if username is None:\n raise RuntimeError(f\"No username found for UID: {uid}\")\n\n # Get the user's username from the returned tuple\n username = username[0]\n\n return username", "def get_full_name(self):\n return self.username", "def get_full_name(self):\n return self.username", "async def get_user(event):\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n replied_user = await event.client(\n GetFullUserRequest(previous_message.sender_id)\n )\n else:\n user = event.pattern_match.group(1)\n\n if user.isnumeric():\n user = int(user)\n\n if not user:\n self_user = await event.client.get_me()\n user = self_user.id\n\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n replied_user = await event.client(GetFullUserRequest(user_id))\n return replied_user\n try:\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n\n except (TypeError, ValueError):\n await event.edit(\"`I don't slap aliens, they ugly AF !!`\")\n return None\n\n return replied_user", "def get_username(self):\n return str(getattr(self, self.USERNAME_FIELD))", "def get_user_name(self):\n whoami = subprocess.Popen('whoami', stdin=None, stdout=subprocess.PIPE,\n shell=True, close_fds=True)\n whoami = whoami.communicate()[0]\n if '\\n' in whoami:\n newline_index = whoami.find('\\n')\n whoami = whoami[:newline_index]\n return whoami", "def username(self) -> undefined.UndefinedOr[str]:", "def get_username(self) -> str:\n return self._username", "def get_user_name(self, uid):\n uid = str(uid)\n name = self._username_cache.get(uid)\n if name is None:\n name = self.fbchat_client.fetchUserInfo(uid)[uid].name\n self._username_cache[uid] = name\n return name", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self):\n return self._stub.List(self._message).user_name", "def user_ref_to_username(value):\n # strip the '<@' and '>'\n user_id = reference_to_id(value.group())\n user_profile = get_user_profile(user_id)\n return '@' + user_profile['name'] or user_id", "def username(self):\n if self._username is not None:\n return self._username\n # Try to get a username from the userprofile\n try:\n self._username = self.userprofile.user.username\n except UserProfile.DoesNotExist:\n # User profile does not exist\n return None\n return self._username", "def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")", "def get_nickname_for_user(cls, user):\n return cls.get_account_for_user(user).nickname", "def get_meme_generator_username(self):\n key = self.bot_data_file[\"meme_generator\"][\"username\"]\n if self.check_empty_key(key):\n return key\n else:\n print(\"ERROR GETTING THE MEME USERNAME (register on https://api.imgflip.com/) - BOT ABORTING\")\n quit(1)", "def get_user_fullname(self):\n member = self.get_user()\n if member:\n return member.getProperty('fullname')", "def get_username(self):\n return self.username", "async def getuserid(ctx, user=None):\n if user == None:\n await ctx.send(f\"Your user ID is `{ctx.message.author.id}`.\")\n elif user[:3] != \"<@!\":\n member = ctx.message.guild.get_member_named(user)\n await ctx.send(f\"The user ID of {user} is: `{member.id}`\")\n else:\n user = user.replace(\"<@!\", \"\").replace(\">\", \"\")\n await ctx.send(f\"The user ID of <@{user}> is `{user}`.\")", "def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def steamUsername() -> Optional[str]:\n\ttry:\n\t\twith open( f'{steamDir()}/config/loginusers.vdf' ) as file:\n\t\t\tusers = Property.parse( file, 'loginusers.vdf' ).as_dict()[ 'users' ]\n\t\t# find the first user in the users dict and take is username\n\t\treturn users[ [ usr for usr in users.keys() ][ 0 ] ][ 'personaname' ]\n\texcept ( FileNotFoundError, TokenSyntaxError ):\n\t\treturn None", "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")", "def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]", "def mention(result):\n return result.text.find('@') != -1", "def _get_username():\n username = request.args.get(\"username\")\n if not username:\n raise NoUserError()\n else:\n return username", "def get_user(self, username: str) -> Optional[discord.Member]:\n for m in self.guild.members:\n if utils.istrcmp(m.display_name, username):\n return m\n return None", "def _get_user_name(self):\n if self.runtime.get_real_user is None:\n return 'staff'\n else:\n return self.runtime.get_real_user(self.runtime.anonymous_student_id).username", "def get_username(self):\r\n return self.username", "def _get_username(self):\n name = self._get_username_from_cookies()\n if name:\n return name\n if self._oauth and self._login_info[0]:\n return self._login_info[0]\n return self._get_username_from_api()", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def get_username(self):\n if not self.is_valid():\n return None\n try:\n # NOTE: all emails stored in lower-case\n email = self.clean_email().lower()\n return User.objects.get(email=email).username\n except User.DoesNotExist:\n pass\n return None", "def get_username(self, obj):\n return obj.user.username", "def get_username(self, obj):\n return obj.user.username", "def get_user_name(user: User) -> str:\n user_name = user.get(\"display_name\")\n if not user_name:\n user_name = user[\"fullname\"]\n if not user_name:\n user_name = user[\"name\"]\n return user_name", "def get_user_display_name():\n user_display_name = session.get(\"user_display_name\")\n return user_display_name if user_display_name else None", "def __full_name_for_user(self, username):\n\n # First check our cache of previous hits.\n if username in self.username_map:\n return self.username_map[username]\n\n # Fall back to p4gf_usermap, p4 users.\n user_3tuple = self.usermap.lookup_by_p4user(username)\n if user_3tuple:\n user = p4gf_usermap.tuple_to_P4User(user_3tuple)\n else:\n user = None\n fullname = ''\n if user:\n # remove extraneous whitespace for consistency with Git\n fullname = ' '.join(user.full_name.split())\n self.username_map[username] = fullname\n return fullname", "def get_username(self):\n raise NotImplementedError('get_username')", "def get_displayname(self):\n return self.full_name or self.user.username", "def get_username(self) -> str:\n try:\n return self[\"user\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon user in system marathon config\"\n )", "def get_sender_username(self, mess):\n jid = mess.getFrom()\n typ = mess.getType()\n username = jid.getNode()\n domain = jid.getDomain()\n if typ == \"chat\":\n return \"%s@%s\" %(username, domain)\n else:\n return \"\"", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def username(self):\n return self.user.username", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def mention(cls, user, message, mentioned):\r\n pass", "def mention(self) -> str:\n return f\"<@{self.id}>\"", "def get_author_nickname(self, attribute_name, default=None):\n return getattr(self, '%s__author_nickname' % attribute_name, default)", "def get_short_name(self):\n # The user is identified by the email address\n return self.email", "def user_name(self):\n return self._user_name", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def _get_username_from_api(self):\n result = self.api_query(action=\"query\", meta=\"userinfo\")\n return result[\"query\"][\"userinfo\"][\"name\"]", "def _what_is_username(self):\n prompt = \"-?- Send to: \"\n sn = self._input(prompt)\n return sn", "def username(self) -> str:\n raise NotImplementedError", "def username_from_user_id_gql(self, user_id: int) -> str:\n return self.user_short_gql(user_id).username", "def get_user_name(_cache_user) -> str:\n try:\n return _cache_user[\"preferred_username\"]\n except KeyError:\n return \"Testing\"\n except TypeError:\n return \"Testing\"", "def given_name(self):\n profile = self._json['author-profile']\n return profile.get('preferred-name', {}).get('given-name')", "def GetUsername(self):\n pass", "def mention(cls, user, message, mentioned):\n pass", "def compute_user_mentions_screen_name(row):\n entity_series = pd.read_json(json.dumps(row['entities']), typ='series')\n user_mentions_screen_name = list(map(lambda entry: entry['screen_name'], entity_series['user_mentions']))\n return ','.join(user_mentions_screen_name)", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def get_username(self):\r\n raise NotImplementedError", "def full_name(self):\n return self.user.get_full_name() or None", "def _get_username(user_id):\n username = select(u.username for u in UserInformationData if u.user_id == user_id).first()\n\n return username", "def get_username(email):\n username = [i['username'] for i in Data.users if email == i['email']]\n return \"\".join(username)" ]
[ "0.78680474", "0.75037277", "0.698189", "0.698189", "0.68996614", "0.6867837", "0.67106825", "0.6666238", "0.66000545", "0.6591795", "0.6591795", "0.65639687", "0.65635896", "0.65554214", "0.65437853", "0.64928955", "0.64731586", "0.6467594", "0.6467354", "0.6457515", "0.6457064", "0.6450335", "0.6450335", "0.6444754", "0.6414851", "0.6408973", "0.63948184", "0.6383352", "0.6379296", "0.63684314", "0.6363614", "0.63617796", "0.63557756", "0.63369423", "0.63313687", "0.63311887", "0.6330166", "0.63160133", "0.62991387", "0.62948626", "0.62847716", "0.6272396", "0.6270863", "0.62612915", "0.6259295", "0.62526083", "0.624904", "0.62460315", "0.6244343", "0.623333", "0.6231092", "0.6225646", "0.621873", "0.62042975", "0.62042975", "0.619386", "0.6189211", "0.61846733", "0.6174519", "0.61646116", "0.6162579", "0.6161847", "0.6151285", "0.6151285", "0.6151285", "0.61459595", "0.613788", "0.613788", "0.613788", "0.6136971", "0.6132371", "0.6131788", "0.6124434", "0.61076134", "0.60962117", "0.60962117", "0.60962117", "0.60962117", "0.60962117", "0.60962117", "0.60962117", "0.60962117", "0.60962117", "0.6095663", "0.6095663", "0.6095663", "0.60947084", "0.60943997", "0.6077114", "0.6073783", "0.6072959", "0.60623276", "0.60567415", "0.60560626", "0.60528946", "0.60463226", "0.6045409", "0.60449725", "0.6033889", "0.60291284" ]
0.8424426
0
Get member by username.
def get_user(self, username: str) -> Optional[discord.Member]: for m in self.guild.members: if utils.istrcmp(m.display_name, username): return m return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user(self, username):\n\t\treturn self.users.get(username, None)", "def get_member(self, user):\n for player in self.members:\n if player.uuid == user.id:\n return player\n return None", "def Get(self, username):\n if self._users:\n return self._users.get(username, None)\n else:\n return None", "def get_member(self, *args, **kwargs):\n return self.bot.get_chat_member(self.id, *args, **kwargs)", "def get(self, username):\n return username", "def get_member_from_guild(guild_members, username):\n username = username.lower()\n if username == 'rand':\n return random.choice(guild_members)\n members = []\n for member in guild_members:\n lower_name = member.name.replace(' ', '').lower()\n if member.nick is not None:\n lower_nick = member.nick.replace(' ', '').lower()\n if username == lower_nick:\n return member\n if username in lower_nick:\n members.append(member)\n elif username == lower_name:\n return member\n elif username in lower_name:\n members.append(member)\n\n if not members:\n raise NameError(username)\n elif len(members) == 1:\n return members[0]\n else:\n raise AmbiguousInputError([member.name for member in members])", "def get(user):\n if user:\n return Member.get_by_key_name(user.user_id())", "def get(self, username):\n return User.find_by_username_or_email(username)", "def getMemberFromName(self, name):\n for member in self.playersAndRoles:\n if name in member.user.name:\n return member", "def get_by_username(self, username):\r\n return social_models.DjangoStorage.user.user_model().objects.get(username=username)", "def get_user(self, username):\n return self._cache.get(username)", "def get_by_username(cls, username):\n return cls.objects.get(username__iexact=username)", "def get_object(self, username):\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n raise Http404", "def get(cls, username):\n doc = cls.collection().find_one({'username': username})\n if doc:\n return cls(**doc)\n else:\n return cls(username=None)", "def get_user(self, username):\n return self.s.query(User).filter(User.username == username).first()", "def get_by_username(self, username):\n user = User.query.filter_by(username=username).first()\n\n return user", "def member(self, user):\n return self.search(uid=user)[0][1]", "def get_user(username):\n users = get_users()\n for user in users:\n if user['username'] == username:\n return user\n\n raise UserNotFound", "def get_user_by_name(cls, username):\n a = db.GqlQuery(\"select * from Users where username=:1\", username)\n return a", "def parse_name(guild, username):\n if '@' in username:\n try:\n return guild.get_member(int(username[3:-1]))\n except:\n raise NameError(username)\n else:\n return get_member_from_guild(guild.members, username)", "def getMember(unique_name):", "def getMember(unique_name):", "def getUserByUsername(self, username):\n try:\n return User.objects.get(username=username)\n except:\n return None", "def find(cls, username: str) -> \"User\":\n return cls.retrieve(\n bind=User, params=dict(method=\"user.getInfo\", user=username)\n )", "def get_by_username(cls, username):\n return cls.query.filter_by(username=username).first()", "def find_user_by_username(username):\n if username == None:\n return None;\n \n logging.info(\"finding user %s in redis \" % username)\n key = ENVIRONMENT['REDIS_PREFIX'] + \"users:%s\" % (username)\n data = redis_server.get(key)\n\n if data != None:\n logging.info(\"found user by username (%s): %s\" % (key, data))\n return User(**json.loads(data))\n else:\n logging.info(\"unable to find user by username (%s): '%s'\" % (key, username))\n return None", "async def get_by_username(db: Session, username: str) -> User:\n return db.query(User).filter(User.username == username).first()", "def get_user(self, username) -> User:\n raise NotImplementedError", "def get(cls, username, server, bucket=None):\n\t\tusername = cls._clean_username(username)\n\t\tif not username:\n\t\t\traise IDMException(\"you must provide a username\")\n\t\t\n\t\tres = cls.find_on({'type': 'user', 'username': username}, server, bucket)\n\t\tif res and len(res) > 0:\n\t\t\treturn res[0]\n\t\traise IDMException(\"no user with the given username\", 404)", "def find_by_username(cls, username):\n user = cls.query.filter_by(username=username).first()\n return user", "def get(self, username):\n session = DBSession()\n result = session.query(User).filter_by(username=username)\n if result.count() < 1:\n return None\n\n return result.one()", "def find_member(message, nickname):\n for member in message.guild.members:\n if nickname in member.display_name:\n return member", "def get_user(username):\n return Users.query.filter_by(username=username).first()", "def get_user_by_username(username): #hiking_object.user_id\n\n return User.query.filter_by(username=username).first()", "def get(self, username=None):\n ownprofile = False\n if username is None:\n # try to use the logged in user if existing\n user = self.user\n if user is None:\n raise werkzeug.exceptions.NotFound()\n else:\n user = self.settings.users.get_by_id(username)\n if user is None:\n raise werkzeug.exceptions.NotFound()\n \n if self.user is not None:\n ownprofile = self.user['_id'] == user['_id']\n\n return self.render(myuser = user, ownprofile = ownprofile)", "def get(self, username):\n q = \"SELECT * FROM profiles where username = ?\"\n r = self._query(q, (username,), fetch='one')\n try:\n return r\n except Exception as e:\n raise e", "def get_member(self, name):\n members = self.wls_board.get_members()\n for member in members:\n if name in member.full_name:\n return member\n return 'None'", "def find_user_by_username(self, username):\n user = None\n logging.info(\"channel finding %s in redis \" % username)\n key = \"%s:%s\" % (self.channel_id, username)\n # see if we have a timestamp in the room\n rank = self.redis_server.zrank(ENVIRONMENT['REDIS_PREFIX'] + \"users_timestamp\", key)\n logging.info(\"channel %s users_timestamp rank (%s): %s \" % (ENVIRONMENT['REDIS_PREFIX'], key, rank))\n if rank != None:\n # get our user from the chat server\n logging.info(\"found users_timestamp, fetching user\")\n user = find_user_by_username(username)\n\n if user != None:\n logging.info(\"found user by username (%s): %s\" % (key, username))\n return user\n else:\n logging.info(\"channel unable to find user by username (%s): '%s'\" % (key, username))\n return None", "def get_by_username(username):\n\n return User.query.filter_by(username=username).first()", "async def get_user_by_username(self, roblox_name: str) -> User:\n r = await self.request.request(url=f'https://api.roblox.com/users/get-by-username?username={roblox_name}', method=\"GET\", noerror=True)\n json = r.json()\n if r.status_code != 200 or not json.get('Id') or not json.get('Username'):\n return None\n return User(self.request, json['Id'], json['Username'])", "def member(self, uid):\n try:\n member = self.search(uid=uid)[0]\n except IndexError:\n return None\n\n if self.objects:\n return member\n\n return member[1]", "def member(self, member_id_or_username):\r\n return Member(self, member_id_or_username)", "def get_user_by_username(username):\r\n\r\n\t\ttry:\r\n\t\t\tuser = User.objects.filter(username=username, is_active=True)[0]\r\n\t\t\treturn user\r\n\t\texcept:\r\n\t\t\tpass\r\n\r\n\t\treturn None", "def get(self, username):\n user = query_user_by_name(username)\n if user is None:\n return 'User does not exist', 404\n return user.serialize(), 200", "def get(self, username):\n user = UserModel.get_by_username(username)\n if user:\n return user.as_dict()\n return {\"message\": \"user not found\"}, 404", "def get_by_username_or_404(cls, username):\n\n user = cls.query.filter(cls.username == username).first()\n\n if user is None:\n abort(404, description='Resource not found.')\n\n return user", "def getUser(self, ind):\r\n if ind >= 0 and ind < len(self.users):\r\n return self.users[ind]\r\n return None", "def get_user(username):\n if custom_user_model:\n return get_user_model().objects.get_by_natural_key(username)\n else:\n return get_user_model().objects.get(username=username)", "def get_user(self, instance, name):\n return instance.get_user(name)", "def _get_user(self, username):\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n raise ObjectDoesNotExist('User %s does not exist!' % username)", "def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user", "def user_info_by_username_gql(self, username: str) -> User:\n return extract_user_gql(self.public_a1_request(f\"/{username!s}/\")[\"user\"])", "def get_user_by_username(username):\n return User.query.filter_by(username=username).first()", "def get_user(self, username):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT firstname,lastname,othername,email,phoneNumber,registered FROM users\\\n WHERE username = %s\", (username,))\n user_data = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if user_data is not None:\n return user_data\n return False", "def retrieve_user(self, username):\n if username is None:\n self.log_error(MongoDatabase.retrieve_user.__name__ + \"Unexpected empty object: username\")\n return None, None, None\n if len(username) == 0:\n self.log_error(MongoDatabase.retrieve_user.__name__ + \"username is empty\")\n return None, None, None\n\n try:\n user = self.users_collection.find_one({\"username\": username})\n if user is not None:\n return str(user['_id']), user['hash'], user['realname']\n return None, None, None\n except:\n traceback.print_exc(file=sys.stdout)\n self.log_error(sys.exc_info()[0])\n return None, None, None", "def get_user(username):\n tx = cypher_transaction()\n query = \"\"\"MATCH (n:user) WHERE n.username={username} RETURN n\"\"\"\n tx.append(query, parameters={'username': username})\n result = tx.commit()\n\n # Returns a result of the form [[\n # Record(\n # columns=('n',),\n # values=(Node('http://localhost:7474/db/data/node/137'),)\n # )\n # ]]\n return _first(result)[0].values[0]", "async def get_chat_member(self, chat_id: typing.Union[base.Integer, base.String],\n user_id: base.Integer) -> types.ChatMember:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.GET_CHAT_MEMBER, payload)\n\n return types.ChatMember(**result)", "def find_by_id(cls, username):\n return cls.query.filter_by(username=username).first()", "def get_user_by_username(username):\n return User.query.filter_by(username=username).first()", "async def get_guild_member(guild_id, member_id):\n user_id = await token_check()\n await guild_check(user_id, guild_id)\n member = await app.storage.get_single_member(guild_id, member_id)\n return jsonify(member)", "def user_info_by_username_v1(self, username: str) -> User:\n try:\n result = self.private_request(f\"users/{username}/usernameinfo/\")\n except ClientNotFoundError as e:\n raise UserNotFound(e, username=username, **self.last_json)\n except ClientError as e:\n if \"User not found\" in str(e):\n raise UserNotFound(e, username=username, **self.last_json)\n raise e\n return extract_user_v1(result[\"user\"])", "def find_by_username(username):\n user = User.query.filter(User.username == username).first()\n\n return user", "def get_username_profile(db, username):\n return db['user'].find_one({'username': username})", "def get_single_user(username):\n user = mongo.db.users.find_one({\"username\": username})\n user[\"_id\"] = str(user[\"_id\"])\n return user", "def find_member(self, search_str: str) -> 'dt_member.Member':\n sp = search_str.rsplit(\"#\", 1)\n if len(sp) == 1:\n # Member name only :(\n predicate = lambda member: member.user.name == sp[0] or member.nickname == sp[0]\n else:\n # Discriminator too!\n # Don't check nicknames for this.\n predicate = lambda member: member.user.name == sp[0] \\\n and member.user.discriminator == sp[1]\n\n filtered = filter(predicate, self.members.values())\n return next(filtered, None)", "def search_for_member(self, *, name: str = None, discriminator: str = None,\n full_name: str = None):\n if full_name is not None:\n sp = full_name.split(\"#\", 1)\n return self.search_for_member(name=sp[0], discriminator=sp[1])\n\n # coerce into a proper string\n if isinstance(discriminator, int):\n discriminator = \"{:04d}\".format(discriminator)\n\n for member in self._members.values():\n # ensure discrim matches first\n if discriminator is not None and discriminator != member.user.discriminator:\n continue\n\n if member.user.username == name:\n return member\n\n if member.nickname == name:\n return member", "def retrieve_user(cls, username):\n\t\tuser_detail = dbop.get_user(username)\n\t\treturn cls(**user_detail)", "def usernameFind(self):\r\n return self.username()", "async def find_user_by_username(username):\n try:\n async with TelegramClient(api_username, api_id, api_hash) as client:\n await client.start()\n if not await client.is_user_authorized():\n client.send_code_request(api_id)\n user = await client.get_entity(username)\n return user\n except Exception as e:\n print(\"Failed to search user, details: \", e)", "def _search_member_by_name(self, fullname):\n if not fullname:\n return None\n\n membership = api.portal.get_tool(\"portal_membership\")\n members = membership.searchForMembers(name=fullname)\n if members:\n # in case there are more than one members with the\n # same fullname, we use the first one listed\n member = members[0].getUserId()\n return membership.getMemberInfo(member)", "async def get_profile_by_username(self, *, username: str) -> ProfileInDB:\n profile = await self.db.fetch_one(query=GET_PROFILE_BY_USERNAME_QUERY, values={\"username\": username})\n if profile:\n return ProfileInDB(**profile)", "def user_by_name(username):\n user = User.query.filter(User.username == username).one_or_none()\n return user", "def find_by_username(cls, username):\n ## Setup Connection & Cursor\n connection, cursor = Database.connect_to_db()\n\n ## Find the user\n query = \"SELECT * FROM {table} WHERE username=?\".format(table=cls.TABLE_NAME)\n result = cursor.execute(query, (username,)) ## Parameter must always be a tuple\n row = result.fetchone() ## Returns None if no results\n\n ## Create User object if we get data back\n if row:\n user = cls(*row)\n else:\n user = None\n\n ## Close Connection\n connection.close()\n\n return user", "def show_member(self, member, **_params):\r\n return self.get(self.member_path % (member), params=_params)", "def get_user():\n username = request.args.get('username')\n token = request.headers.get('token')\n\n if not username:\n return jsonify({'message': 'Username not provided'}), 404\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != 'admin' and token_username != username:\n return jsonify({'message': \"You aren't allowed to access this\"}), 404\n\n if username not in Users.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n return jsonify(Users[username]), 200", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user", "def get_user_named(self, name: str) -> Union[discord.User, None]:\n result = None\n users = self.users\n\n if len(name) > 5 and name[-5] == \"#\":\n # The 5 length is checking to see if #0000 is in the string,\n # as a#0000 has a length of 6, the minimum for a potential\n # discriminator lookup.\n potential_discriminator = name[-4:]\n\n # do the actual lookup and return if found\n # if it isn't found then we'll do a full name lookup below.\n result = discord.utils.get(users, name=name[:-5], discriminator=potential_discriminator)\n if result is not None:\n return result\n\n def pred(user):\n return user.nick == name or user.name == name\n\n return discord.utils.find(pred, users)", "def find_by_username(cls,username):\n\n for credential in cls.credential_list:\n if credential.username == username:\n return credential", "def get_member(did):\n conn = create_connection(db_location)\n c = conn.cursor()\n c.execute(\"SELECT * FROM members WHERE member_uid = \" + did)\n member = dict((c.description[i][0], value) for i, value in enumerate(c.fetchone()))\n if __debug__:\n print(member)\n conn.commit()\n conn.close()\n return member", "def lookup_friend(self,username):\n if self.isBlank(username) or self.isValidLen(username):\n return False\n safe_input = (username,)\n try:\n vals = self.cur.execute(\"SELECT Client_IP, Client_Port FROM Users WHERE Username=?\" ,safe_input).fetchone()\n if vals:\n return vals[0],str(vals[1])\n else:\n return False\n except LookupError as e:\n return False", "def getPublicUserInfo(self, username):\r\n self.send_getPublicUserInfo(username)\r\n return self.recv_getPublicUserInfo()", "def __call__(self, username):\n return flattrclient.user.User(session=self._session, username=username)", "def get_user(username: str) -> User:\n\n user = User.select(\n lambda db_user: db_user.username == username\n ).first()\n\n if not user:\n raise UserNotFound(username=username)\n\n return user", "def get_user(id):\n pass", "def find_user_by_username(db, username):\n users = db.tables.users\n return db.load_scalar(\n table=users, value={'username': username}, column='id')", "def getOkcupidUser(self, username):\n\t\tself.logger.info(\"Get Okcupid user: %s\", username)\n\t\tuser = self.session.query(Models.Okcupid).filter(Models.Okcupid.username==username).first()\n\t\treturn user", "def get_user(self):\n try:\n cursor = self.db.cursor()\n cursor.execute(\"SELECT username FROM users WHERE username=?\", (self.username,))\n return cursor.fetchall()\n except:\n print(\"Error obteniendo usuario\")", "def find_user_by_username(username: str) -> User:\n\n # Find user with this username, or None if there isn't any\n return User.query.filter_by(username=username).first()", "def getuser(gh, user):\n return gh.users(user).get()", "def get(self, username):\n\t\tdb = getattr(g, 'db', None)\n\n\t\tqry = \"SELECT username,email,active,steamid FROM\\\n\t\t\tprofiles WHERE username = %s;\"\n\t\twith db as cursor:\n\t\t\tcursor.execute(qry, (username,))\n\n\t\treturn {'profile':cursor.fetchone()}", "def find_by_user_name(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return user", "def harvest_by_username(client, username):\n try:\n entity = client.get_entity(username)\n except ValueError:\n return 'This username does not exist'\n\n if type(entity) == User:\n entity = client(users.GetFullUserRequest(id=username))\n return harvest_user(client, entity)\n elif type(entity) == Channel:\n return 'This username either belongs to a channel or a group'\n else:\n return 'This username does not belong to a user, bot, channel or a group'", "def user_info_by_username(self, username: str, use_cache: bool = True) -> User:\n if not use_cache or username not in self._usernames_cache:\n try:\n try:\n user = self.user_info_by_username_gql(username)\n except ClientLoginRequired as e:\n if not self.inject_sessionid_to_public():\n raise e\n user = self.user_info_by_username_gql(username) # retry\n except Exception as e:\n if not isinstance(e, ClientError):\n self.logger.exception(e) # Register unknown error\n user = self.user_info_by_username_v1(username)\n self._users_cache[user.pk] = user\n self._usernames_cache[user.username] = user.pk\n return self.user_info(self._usernames_cache[username])", "def getPlayer(self, userid):\r\n return self.__getitem__(userid)", "def get_user_info_by_name(self, username: str) -> dict:", "def test_user_by_username(self):\n username = make_user(self.client)['username']\n resp = self.client.get('/user/'+username,\n headers=api_headers())\n json_resp = json.loads(resp.data.decode('utf-8'))\n self.assertEqual(json_resp['status'], 'user found')\n self.assertEqual(json_resp['user']['username'], username)", "def get_membership(self, username, team):\n try:\n return CourseTeamMembership.objects.get(user__username=username, team=team)\n except CourseTeamMembership.DoesNotExist:\n raise Http404 # lint-amnesty, pylint: disable=raise-missing-from", "def getUserFromKey(key):\n\t\t#get(Key(key))\n\t\t#return None if no user found", "def get_member(self, public_key, private_key=\"\"):\n assert isinstance(public_key, str)\n assert isinstance(private_key, str)\n return Member(public_key, private_key)", "def _find_existing_user(self, username):\n users = User.objects.filter(username=username)\n if users.count() <= 0:\n return None\n else:\n return users[0]" ]
[ "0.75246143", "0.7495561", "0.74683446", "0.74049187", "0.73253155", "0.7299027", "0.72175676", "0.71334416", "0.7068116", "0.7049825", "0.7039728", "0.6946112", "0.69360876", "0.69183743", "0.69003195", "0.689256", "0.68825346", "0.6881717", "0.68705404", "0.68620247", "0.6858043", "0.6858043", "0.6856815", "0.6849666", "0.67962736", "0.67673314", "0.67569596", "0.67307097", "0.66713244", "0.66517437", "0.6642403", "0.66212016", "0.6605253", "0.6563093", "0.6552138", "0.6547935", "0.6543066", "0.651338", "0.65043133", "0.6496409", "0.64615685", "0.6450448", "0.64402556", "0.6429026", "0.64239043", "0.6411869", "0.63941866", "0.63935155", "0.63759124", "0.63723207", "0.63704103", "0.63585305", "0.63570297", "0.63511074", "0.6341653", "0.63256276", "0.6324112", "0.63209856", "0.6306986", "0.63026893", "0.6296965", "0.6291204", "0.6290539", "0.6288476", "0.6287377", "0.62823", "0.6266474", "0.6266221", "0.6236382", "0.6236007", "0.62245363", "0.6221759", "0.62192667", "0.6207298", "0.62071466", "0.6197754", "0.6194857", "0.61935097", "0.6166685", "0.6135438", "0.61269486", "0.6113198", "0.61042154", "0.60808116", "0.6079329", "0.6072895", "0.606873", "0.6058825", "0.60529536", "0.605249", "0.6042838", "0.6028546", "0.60222024", "0.6020514", "0.601554", "0.60138196", "0.6009145", "0.60034055", "0.597988", "0.59675145" ]
0.8309965
0
Catch any method/attribute lookups that are not defined in this class and try to find them on the provided bridge object.
def __getattr__(self, name: str): return getattr(self._client, name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _lookup_method(self, call):\n raise Exception(\"_lookup_method must be implemented by subclasses.\")", "def lookup(self, *args, **kwargs): # real signature unknown\n pass", "def lookup(self, *args, **kwargs): # real signature unknown\n pass", "def test_lookup_parameter_handler_object(self, force_field):\n bonds = force_field[\"Bonds\"]\n with pytest.raises(NotImplementedError):\n force_field[bonds]\n with pytest.raises(NotImplementedError):\n force_field[type(bonds)]", "def lookup(self, **kwargs):\n raise NotImplementedError()", "def find(self, objectclass, **kwargs):\n raise NotImplementedError", "def lookup():", "def lookup_filter(self, pbc, name=None, flags={}):\n d = []\n uplookup = None\n updesc = None\n for desc in pbc.descriptions:\n # pick methods but ignore already-bound methods, which can come\n # from an instance attribute\n if (isinstance(desc, description.MethodDesc)\n and desc.selfclassdef is None):\n methclassdef = desc.originclassdef\n if methclassdef is not self and methclassdef.issubclass(self):\n pass # subclasses methods are always candidates\n elif self.issubclass(methclassdef):\n # upward consider only the best match\n if uplookup is None or methclassdef.issubclass(uplookup):\n uplookup = methclassdef\n updesc = desc\n continue\n # for clsdef1 >= clsdef2, we guarantee that\n # clsdef1.lookup_filter(pbc) includes\n # clsdef2.lookup_filter(pbc) (see formal proof...)\n else:\n continue # not matching\n # bind the method by giving it a selfclassdef. Use the\n # more precise subclass that it's coming from.\n desc = desc.bind_self(methclassdef, flags)\n d.append(desc)\n if uplookup is not None:\n d.append(updesc.bind_self(self, flags))\n\n if d:\n return SomePBC(d, can_be_None=pbc.can_be_None)\n elif pbc.can_be_None:\n return s_None\n else:\n return s_ImpossibleValue", "def handle(self, o, params):\n\n if not o:\n return\n\n # Get the class of the object\n clazz = type(o)\n\n for var in [clazz, o]:\n # Check if a static method or variable 'dependencies' exists\n dependencies = getattr(var, \"dependencies\", None)\n\n if not dependencies:\n continue\n\n # Check if we have a method\n if callable(dependencies):\n # Execute that method to get list of dependencies and try to handle them\n self._handle_dependencies(o.dependencies(params))\n return", "def lookup(obj):\n objList = [method_name for method_name in dir(obj)\n if callable(getattr(obj, method_name))]\n return objList", "def __getattr__(self, name):\n\n # Search for named instance method in the class object and if it\n # exists, return callable object with self as hidden argument.\n # Note: you should give self and not self.ptr as a parameter to\n # ObjCBoundMethod, so that it will be able to keep the ObjCInstance\n # alive for chained calls like MyClass.alloc().init() where the\n # object created by alloc() is not assigned to a variable.\n\n # If there's a property with this name; return the value directly.\n # If the name ends with _, we can shortcut this step, because it's\n # clear that we're dealing with a method call.\n if not name.endswith(\"_\"):\n method = self.objc_class._cache_property_accessor(name)\n if method:\n return ObjCBoundMethod(method, self)()\n\n # See if there's a partial method starting with the given name,\n # either on self's class or any of the superclasses.\n cls = self.objc_class\n while cls is not None:\n # Load the class's methods if we haven't done so yet.\n if cls.methods_ptr is None:\n cls._load_methods()\n\n try:\n method = cls.partial_methods[name]\n break\n except KeyError:\n cls = cls.superclass\n else:\n method = None\n\n if method is None or set(method.methods) == {frozenset()}:\n # Find a method whose full name matches the given name if no partial\n # method was found, or the partial method can only resolve to a\n # single method that takes no arguments. The latter case avoids\n # returning partial methods in cases where a regular method works\n # just as well.\n method = self.objc_class._cache_method(name.replace(\"_\", \":\"))\n\n if method:\n return ObjCBoundMethod(method, self)\n\n # Check if the attribute name corresponds to an instance attribute defined at\n # runtime from Python. Return it if yes, raise an AttributeError otherwise.\n key = self._associated_attr_key_for_name(name)\n pyo_wrapper = libobjc.objc_getAssociatedObject(self, key)\n\n if pyo_wrapper.value is None:\n raise AttributeError(\n f\"{type(self).__module__}.{type(self).__qualname__} {self.objc_class.name} has no attribute {name}\"\n )\n address = get_ivar(pyo_wrapper, \"wrapped_pointer\")\n pyo = cast(address.value, py_object)\n\n return pyo.value", "def test_lookupChecksClass(self):\n badClass = Record_A('10.0.0.1')\n badClass.CLASS = HS\n servers = {\n ('1.1.2.3', 53): {\n ('foo.example.com', A): {\n 'answers': [('foo.example.com', badClass)],\n 'authority': [('foo.example.com', Record_NS('ns1.example.com'))],\n 'additional': [('ns1.example.com', Record_A('10.0.0.2'))],\n },\n },\n ('10.0.0.2', 53): {\n ('foo.example.com', A): {\n 'answers': [('foo.example.com', Record_A('10.0.0.3'))],\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('foo.example.com')\n d.addCallback(getOnePayload)\n d.addCallback(self.assertEqual, Record_A('10.0.0.3'))\n return d", "def test_lookup_method(self):\n\n # Run lookup method\n\n self.urlsclass.lookup()\n\n assert len(self.urlsclass.urldicts) == len(self.urlsclass.dedupurls)\n\n # TODO: test a redirect url", "def __getattr__(self, name: str) -> Any:\n # We don't want to return anything for python copy / pickle methods.\n if name in _UNDEFINED_COPY_PICKLE_METHODS:\n raise AttributeError()\n self._try_setup()\n if name in self.__dict__:\n return self.__dict__[name]\n else:\n raise AttributeError(\n f'\"{self.__class__.__name__}\" object has no attribute \"{name}\"')", "def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class", "def search(instance, owner, descriptor):\n '''Search class dictionary first'''\n if instance is not None:\n for name, value in instance.__class__.__dict__.items():\n if value is descriptor:\n return name\n '''Then search all the ancestors dictionary''' \n for cls in type(instance).__bases__:\n for name, value in cls.__dict__.items():\n if value is descriptor:\n return name\n elif owner is not None:\n for name, value in owner.__dict__.items():\n if value is descriptor:\n return name\n '''Then search all the ancestors dictionary''' \n for cls in owner.__bases__:\n for name, value in cls.__dict__.items():\n if value is descriptor:\n return name\n return None", "def FindObject(self, tagged_address):\n raise NotImplementedError", "def __resolve_attribute(self, attr_dict):\n result = dict()\n for key in attr_dict:\n attr = attr_dict[key]\n if type(attr) is dict:\n result[key] = self.__resolve_attribute(attr)\n else:\n result[key] = self._resolve_curly_braces(attr)\n # resolve null key in curly brace values, such as \"${__NULL__,Online}\"\n if key in NULL_VALUE_KEY_FIELDS and result[key] == NULL_VALUE_KEY:\n result[key] = None\n\n for key in [GET_METHOD, SET_METHOD, GET_MBEAN_TYPE, SET_MBEAN_TYPE]:\n if key in result and len(result[key]) == 0:\n del result[key]\n return result", "def __call__(self, proxy):\n def _lookup():\n try:\n return getattr(self, proxy)\n except AttributeError:\n raise UnboundProxyError(\"object '%s' unbound\" % proxy)\n return Proxy(_lookup)", "def test_all_finder_methods(self):\n methods = ['all', 'first', 'find']\n\n for method in methods:\n result = getattr(self.Test, method)\n self.assertEqual(type(result).__name__, 'instancemethod')\n self.assertEqual(result.im_class.__name__, 'Relation')", "def __getattr__(self, name):\n return self.lookup(name)", "def __getattr__(self, name: str) -> Any:\n blocked_class = self.getattr_depth_check(name, already_found=False)\n if blocked_class is not None:\n own_class = f\"{type(self).__module__}.{type(self).__name__}\"\n error_str = (\n f\"Error: Recursive attribute lookup for {name} from {own_class} is \"\n f\"ambiguous and hides attribute from {blocked_class}\"\n )\n raise AttributeError(error_str)\n\n return self.getattr_recursive(name)", "def do_poortego_find(self, arg):\n poortego_find(self.my_interface, arg)", "def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name\n filtered_methods = [f for f in protocol.methods if f.is_abstract]\n return self.match_Functions_against_Class(\n filtered_methods, unknown, subst)", "def lookup(self, cls, name, mode):\n mro = [el.__name__ for el in cls.mro()]\n registry = self.method_registry if mode=='method' else self.type_registry\n\n for class_name in mro:\n entries = registry[class_name]\n if name in entries:\n return entries[name]\n raise KeyError(\"Could not find method named %r.\"\n \" Please ensure classes using component decorators\"\n \" are decorated with the Model.definition\"\n \" class decorator.\" % name)", "def _resolve(self):\n pass", "def findPlug(node, attr):\n\n pass", "def set_element_class_lookup(self, lookup=None): # real signature unknown; restored from __doc__\n pass", "def post_lookup_hook(self):\n pass", "def object_hook(dct):\n return next((type.object_hook(dct)\n for ((key, cls), type)\n in STONE.items()\n if key in dct), dct)", "def get_object(value, all_dicts, existing_objects, ignore_names, depth):\n\n\n if not isinstance(value, str) and isinstance(value, collections.Iterable):\n return [get_object(val, all_dicts, existing_objects, ignore_names, depth + 1)\n for val in value]\n if value in ignore_names:\n existing_objects[value] = None\n return None\n if value in existing_objects:\n return existing_objects[value]\n if not isinstance(value, str) or not value.startswith(\"object:\"):\n return value\n\n name = value[7:]\n if name not in all_dicts:\n raise Exception(\"Object '{}' was not defined in the configuration.\"\n .format(name))\n this_dict = all_dicts[name]\n\n if depth > 20:\n raise Exception(\"Configuration does also object depth more thatn 20.\")\n if 'class' not in this_dict:\n raise Exception(\"Class is not defined for object: {}\".format(name))\n\n clazz = this_dict['class']\n\n if not isclass(clazz) and not isfunction(clazz):\n raise Exception((\"The 'class' field with value '{}' in object '{}'\"\n \" should be a type or function, was '{}'\")\n .format(clazz, name, type(clazz)))\n\n def process_arg(arg):\n \"\"\" Resolves potential references to other objects \"\"\"\n return get_object(arg, all_dicts, existing_objects, ignore_names, depth + 1)\n\n args = {k: process_arg(arg)\n for k, arg in this_dict.items() if k != 'class'}\n\n func_to_call = clazz.__init__ if isclass(clazz) else clazz\n arg_spec = getargspec(func_to_call)\n\n # if the parameters are not passed via keywords, check whether they match\n if not arg_spec.keywords:\n defaults = arg_spec.defaults if arg_spec.defaults else ()\n if arg_spec.args[0] == 'self':\n required_args = set(arg_spec.args[1:-len(defaults)])\n else:\n required_args = set(arg_spec.args[:-len(defaults)])\n all_args = set(arg_spec.args)\n additional_args = set()\n\n for key in list(args.keys()):\n if key in required_args:\n required_args.remove(key)\n if key not in all_args:\n additional_args.add(key)\n\n if required_args:\n raise Exception(\"Object '{}' is missing required args: {}\"\n .format(name, \", \".join(required_args)))\n if additional_args:\n raise Exception(\"Object '{}' got unexpected argument: {}\"\n .format(name, \", \".join(additional_args)))\n\n try:\n result = clazz(**args)\n except Exception as exc:\n raise ConfigBuildException(name, exc) from None\n existing_objects[value] = result\n return result", "def _find_adapter(self):\n required_interfaces = [GATT_MANAGER_IFACE, LE_ADVERTISING_MANAGER_IFACE]\n object_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, '/'), DBUS_OM_IFACE)\n objects = object_manager.GetManagedObjects()\n\n for object_path, properties in objects.items():\n missing_interfaces = [i for i in required_interfaces if i not in properties.keys()]\n if missing_interfaces:\n continue\n return object_path.rsplit('/', 1)[1]\n\n return None", "def test_register_lookup_handler_not_subclass() -> None:\n\n class FakeLookup:\n \"\"\"Fake lookup.\"\"\"\n\n with pytest.raises(TypeError):\n register_lookup_handler(\"test\", FakeLookup) # type: ignore", "def _resolve_lookup((model, lookup, arg_name), view_kwargs):\n value = view_kwargs.get(arg_name)\n if value is None:\n raise ValueError(\"Expected kwarg '%s' not found.\" % arg_name)\n if isinstance(model, basestring):\n model_class = get_model(*model.split('.'))\n else:\n model_class = model\n if model_class is None:\n raise ValueError(\"The given argument '%s' is not a valid model.\" %\n model)\n if inspect.isclass(model_class) and not issubclass(model_class, Model):\n raise ValueError(\"The argument '%s' needs to be a model.\" % model)\n return get_object_or_404(model_class, **{lookup: value})", "def object_search(self, ostring, caller=None,\n global_search=False, \n attribute_name=None, location=None):\n #ostring = str(ostring).strip()\n\n if not ostring:\n return [] \n\n # Easiest case - dbref matching (always exact) \n dbref = self.dbref(ostring)\n if dbref:\n dbref_match = self.dbref_search(dbref)\n if dbref_match:\n return [dbref_match]\n\n if not location and caller and hasattr(caller, \"location\"):\n location = caller.location\n\n # Test some common self-references\n\n if location and ostring == 'here':\n return [location] \n if caller and ostring in ['me', 'self']:\n return [caller]\n if caller and ostring in ['*me', '*self']: \n return [caller] \n \n # Test if we are looking for an object controlled by a\n # specific player\n\n if utils.to_unicode(ostring).startswith(\"*\"):\n # Player search - try to find obj by its player's name\n player_match = self.get_object_with_player(ostring)\n if player_match is not None:\n return [player_match]\n \n # Search for keys, aliases or other attributes\n \n search_locations = [None] # this means a global search\n if not global_search and location:\n # Test if we are referring to the current room\n if location and (ostring.lower() == location.key.lower() \n or ostring.lower() in [alias.lower() for alias in location.aliases]):\n return [location]\n # otherwise, setup the locations to search in \n search_locations = [location]\n if caller:\n search_locations.append(caller)\n \n def local_and_global_search(ostring, exact=False):\n \"Helper method for searching objects\" \n matches = [] \n for location in search_locations: \n if attribute_name:\n # Attribute/property search. First, search for db_<attrname> matches on the model\n matches.extend(self.get_objs_with_db_property_match(attribute_name, ostring, location, exact))\n if not matches:\n # Next, try Attribute matches\n matches.extend(self.get_objs_with_attr_match(attribute_name, ostring, location, exact))\n else:\n # No attribute/property named. Do a normal key/alias-search \n matches.extend(self.get_objs_with_key_or_alias(ostring, location, exact))\n return matches\n\n # Search through all possibilities.\n\n match_number = None\n matches = local_and_global_search(ostring, exact=True) \n if not matches:\n # if we have no match, check if we are dealing with an \"N-keyword\" query - if so, strip it.\n match_number, ostring = AT_MULTIMATCH_INPUT(ostring)\n if match_number != None and ostring:\n # Run search again, without match number:\n matches = local_and_global_search(ostring, exact=True)\n if ostring and (len(matches) > 1 or not matches):\n # Already multimatch or no matches. Run a fuzzy matching.\n matches = local_and_global_search(ostring, exact=False)\n elif len(matches) > 1:\n # multiple matches already. Run a fuzzy search. This catches partial matches (suggestions)\n matches = local_and_global_search(ostring, exact=False)\n \n # deal with the result\n if len(matches) > 1 and match_number != None:\n # We have multiple matches, but a N-type match number is available to separate them.\n try:\n matches = [matches[match_number]]\n except IndexError:\n pass\n # This is always a list.\n return matches", "def __getattr__(name):\n if name == \"GLOBAL_PDB\":\n return local.GLOBAL_PDB\n raise AttributeError(\"module '{}' has no attribute '{}'\".format(__name__, name))", "def __init__(self):\n self.lookup = {}", "def __getattr__(self, item):\n if item in self.unknown_args:\n return self.unknown_args.get(item)\n return super(SuperResolution, self).__getattr__(item)", "def getBinding(o, name):\n raise RuntimeError()", "def lookup(obj):\n return(dir(obj))", "def lookup(obj):\n a = list(dir(obj))\n return a", "def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):", "def get_injection_points(obj):\n\n return scan_methods(obj, lambda attr: attr.check(Tags.INJECTION_POINT))", "def resolve_attributes_lookup(self, current_objects, attributes):\n values = []\n\n for current_object in current_objects:\n if not hasattr(current_object, attributes[0]):\n raise SearchFieldError(\n \"The model '%r' does not have a model_attr '%s'.\"\n % (repr(current_object), attributes[0])\n )\n\n if len(attributes) > 1:\n current_objects_in_attr = self.get_iterable_objects(\n getattr(current_object, attributes[0])\n )\n values.extend(\n self.resolve_attributes_lookup(\n current_objects_in_attr, attributes[1:]\n )\n )\n continue\n\n current_object = getattr(current_object, attributes[0])\n\n if current_object is None:\n if self.has_default():\n current_object = self._default\n elif self.null:\n current_object = None\n else:\n raise SearchFieldError(\n \"The model '%s' combined with model_attr '%s' returned None, but doesn't allow \"\n \"a default or null value.\"\n % (repr(current_object), self.model_attr)\n )\n\n if callable(current_object):\n values.append(current_object())\n else:\n values.append(current_object)\n\n return values", "def _scan_for_mapped_bases(\n cls: ClassDef,\n api: SemanticAnalyzerPluginInterface,\n) -> None:\n\n info = util.info_for_cls(cls, api)\n\n if info is None:\n return\n\n for base_info in info.mro[1:-1]:\n if base_info.fullname.startswith(\"builtins\"):\n continue\n\n # scan each base for mapped attributes. if they are not already\n # scanned (but have all their type info), that means they are unmapped\n # mixins\n scan_declarative_assignments_and_apply_types(\n base_info.defn, api, is_mixin_scan=True\n )", "def _find_labelled_objects_functions():\n\n def _num_args_without_default_value(fn_sig):\n return len(\n [\n param\n for param in fn_sig.parameters.values()\n if param.default is inspect._empty\n ]\n )\n\n def _takes_object_labels_kwarg(fn):\n fn_sig = inspect.signature(fn)\n return (\n \"object_labels\" in fn_sig.parameters\n and _num_args_without_default_value(fn_sig) == 1\n )\n\n fns = [\n (fn_name, fn)\n for (fn_name, fn) in inspect.getmembers(\n sys.modules[__name__], inspect.isfunction\n )\n if not fn_name.startswith(\"_\") and _takes_object_labels_kwarg(fn)\n ]\n\n return dict(fns)", "def _find_handler(self, exc, exc_setup):\n catch_sites = [findop(block, 'exc_catch') for block in exc_setup.args]\n for exc_catch in catch_sites:\n for exc_type in exc_catch.args:\n with self.if_(self.exc_matches(types.Bool, [exc, exc_type])):\n self.jump(exc_catch.block)\n block = self._curblock\n self.position_at_end(block)", "def autoBind(myClass, bindIfnoDocumented):\n\n # dir(myClass) is a list of the names of\n # everything in class\n myClass.setModuleDescription(myClass.__doc__)\n\n for thing in dir(myClass):\n # getattr(x, \"y\") is exactly: x.y\n function = getattr(myClass, thing)\n if callable(function):\n if (type(function) == type(myClass.function)):\n\t if (bindIfnoDocumented or function.__doc__ != \"\"):\n\t if (thing[0] != \"_\"): # private method\n \t if (function.__doc__):\n \t myClass.functionName(thing, myClass.getName(), function.__doc__)\n\t else:\n\t myClass.functionName(thing, myClass.getName(), \"\")\n\n\t for param in function.func_code.co_varnames:\n\t if (param != \"self\"):\n\t myClass.addParam(param)\n\n\t myClass.bindWithParam(myClass.getName(),thing,len(function.func_code.co_varnames)-1)", "def findMixedMethods(self):\n \n ### Now resolve the methods. Don't bother defensive\n ### programming here, if the priors are inconsistent then this\n ### shouldn't work at all.\n self.mixedMeths = []\n for iMeth in range(len(self.mixedNames)):\n thisMeth = getattr(self, self.mixedNames[iMeth])\n self.mixedMeths.append(thisMeth)", "def test_lookupByRemoteName(self):\n typeClass = annotation.getTypeClassByTypeName(\n 'test.Change'\n )\n self.assertTrue(typeClass is not None)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def edge_lookup(self, keylookup_obj, id_strct, debug=False):\n # pylint: disable=E1102, R0201, W0613\n yield NotImplemented(\"This method must be overridden by the base class.\")", "def get_lookup(channel):\n try:\n lookup_label = settings.AJAX_LOOKUP_CHANNELS[channel]\n except (KeyError, AttributeError):\n raise ImproperlyConfigured(\"settings.AJAX_LOOKUP_CHANNELS not configured correctly for %r\" % channel)\n\n if isinstance(lookup_label,dict):\n # 'channel' : dict(model='app.model', search_field='title' )\n # generate a simple channel dynamically\n return make_channel( lookup_label['model'], lookup_label['search_field'] )\n else:\n # 'channel' : ('app.module','LookupClass')\n # from app.module load LookupClass and instantiate\n lookup_module = __import__( lookup_label[0],{},{},[''])\n lookup_class = getattr(lookup_module,lookup_label[1] )\n return lookup_class()", "def test_create_forcefield_custom_handler_classes(self):\n from openff.toolkit.typing.engines.smirnoff import BondHandler\n\n forcefield = ForceField(parameter_handler_classes=[BondHandler])\n\n # Should find BondHandler, since we registered it\n forcefield.get_parameter_handler(\"Bonds\")\n\n # Shouldn't find AngleHandler, since we didn't allow that to be registered\n with pytest.raises(KeyError):\n forcefield.get_parameter_handler(\"Angles\")", "def __init__(self, obj, adapted_methods):\n self.obj = obj\n self.__dict__.update(adapted_methods)", "def __call__(self, obj):\n return getattr(self, 'handle_' + type(obj).__name__, self.__unknown)(obj)", "def __getattr__(self, name):\n ...", "def find(self):\n for method in self.search_locations:\n out = method()\n if out is not None:\n return out\n return None", "def check_compliance(objects):\n for name, obj in objects.items():\n if isinstance(obj, SaveableInterface):\n continue\n # explicitly check for required methods\n for attr_to_check in {\"state_dict\", \"load_state_dict\"}:\n if not hasattr(obj, attr_to_check):\n raise TypeError(\"{} of {} needs to implement the {} fn\".format(\n obj, type(obj), attr_to_check))", "def hasUnoInterface( oObject, cInterfaceName ):\n\n # Get the Introspection service.\n oIntrospection = createUnoService( \"com.sun.star.beans.Introspection\" )\n\n # Now inspect the object to learn about it. \n oObjInfo = oIntrospection.inspect( oObject )\n \n # Obtain an array describing all methods of the object.\n oMethods = oObjInfo.getMethods( uno.getConstantByName( \"com.sun.star.beans.MethodConcept.ALL\" ) )\n # Now look at every method.\n for oMethod in oMethods:\n # Check the method's interface to see if\n # these aren't the droids you're looking for.\n cMethodInterfaceName = oMethod.getDeclaringClass().getName()\n if cMethodInterfaceName == cInterfaceName:\n return True\n return False", "def find_handler(url):\n for handler in __all__:\n # Get the symbol for handler\n mod = globals()[handler]\n # Ask handler if it can handle the url\n if getattr(mod, \"can_handle\")(url):\n return mod\n return None", "def test_method_not_found(self):\n\n pkt = {'type': 'event',\n 'name': 'foo',\n 'endpoint': '/chat',\n 'args': []\n }\n\n self.ns.process_packet(pkt)\n\n kwargs = dict(\n msg_id=None,\n endpoint='/woot',\n quiet=False\n )\n\n self.environ['socketio'].error.assert_called_with(\n 'no_such_method',\n 'The method \"%s\" was not found' % 'on_foo',\n **kwargs\n )", "def lookup(name):", "def lookup(name):", "def _get_discoverable_attribute(_class):\n return \"__{}_is_discoverable\".format(_class.__name__)", "def __getattr__(self,name):\r\n w=self.mapping.get(name,None)\r\n if w is not None:\r\n w.set_name(name)\r\n w.bind(self)\r\n return w\r\n else:\r\n raise AttributeError('{} not found in {}'.format(name,self.name))", "def lookupWithMapper(mapper, fqn):\n try:\n return mapper.lookup(fqn)\n except ImportError, e:\n raise e\n except:\n print \"Error raised by Exocet mapper while loading %r\" % (fqn)\n traceback.print_exc()\n raise ImportError(fqn)", "async def discover(self):\n raise NotImplementedError(\"this is a base class\")", "def __getattr__(self, item):\n # Early-exit for properties other than check_-methods.\n if not item.startswith(\"check_\"):\n return super().__getattribute__(self, item)\n\n # Lookup the appropriate method. If not found, complain.\n name = item.replace(\"check_\", \"\")\n try:\n func, on_true, on_false = self.checks[name]\n except KeyError:\n raise NotImplementedError(\"No check for '{}'.\".format(name))\n else:\n return partial(self._base_check, func, on_true, on_false)", "def test_lookup_missing(self):\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n ret = env.lookup('foo')\n self.assertIsNone(ret)", "def test_setup_object_without__all__name__(self):\n with self.assertRaises(AttributeError):\n pluggable_package.setup(self._test_object)", "def queryAdapterOnClass(klass, interface=None, name=u''):\n sm = component.getGlobalSiteManager()\n required = implementedBy(klass)\n factory = sm.adapters.lookup((required,), interface, name)\n if factory is not None:\n result = factory(klass)\n if result is not None:\n return result\n return None", "def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class", "def search_lxc_bridges():\n bridges = __context__.get(\"lxc.bridges\", None)\n # either match not yet called or no bridges were found\n # to handle the case where lxc was not installed on the first\n # call\n if not bridges:\n bridges = set()\n running_bridges = set()\n bridges.add(DEFAULT_BR)\n try:\n output = __salt__[\"cmd.run_all\"](\"brctl show\")\n for line in output[\"stdout\"].splitlines()[1:]:\n if not line.startswith(\" \"):\n running_bridges.add(line.split()[0].strip())\n except (SaltInvocationError, CommandExecutionError):\n pass\n for ifc, ip in __grains__.get(\"ip_interfaces\", {}).items():\n if ifc in running_bridges:\n bridges.add(ifc)\n elif os.path.exists(f\"/sys/devices/virtual/net/{ifc}/bridge\"):\n bridges.add(ifc)\n bridges = list(bridges)\n # if we found interfaces that have lxc in their names\n # we filter them as being the potential lxc bridges\n # we also try to default on br0 on other cases\n\n def sort_bridges(a):\n pref = \"z\"\n if \"lxc\" in a:\n pref = \"a\"\n elif \"br0\" == a:\n pref = \"c\"\n return f\"{pref}_{a}\"\n\n bridges.sort(key=sort_bridges)\n __context__[\"lxc.bridges\"] = bridges\n return bridges", "def autoBind(myClass, bindIfnoDocumented):\n\n # dir(myClass) is a list of the names of\n # everything in class\n myClass.setModuleDescription(myClass.__doc__)\n\n for thing in dir(myClass):\n # getattr(x, \"y\") is exactly: x.y\n function = getattr(myClass, thing)\n if callable(function):\n if (type(function) == type(myClass.__init__)):\n if (bindIfnoDocumented or function.__doc__ != \"\"):\n if (thing[0] != \"_\"): # private method\n if (function.__doc__):\n myClass.functionName(thing, myClass.getName(), function.__doc__)\n else:\n myClass.functionName(thing, myClass.getName(), \"\")\n\n for param in function.func_code.co_varnames:\n if (param != \"self\"):\n myClass.addParam(param)\n myClass._bindWithParam(myClass.getName(),thing,len(function.func_code.co_varnames)-1)", "def hasUnoInterface( oObject, cInterfaceName ):\n\n # Get the Introspection service.\n oIntrospection = createUnoService( \"com.sun.star.beans.Introspection\" )\n\n # Now inspect the object to learn about it.\n oObjInfo = oIntrospection.inspect( oObject )\n\n # Obtain an array describing all methods of the object.\n oMethods = oObjInfo.getMethods( uno.getConstantByName( \"com.sun.star.beans.MethodConcept.ALL\" ) )\n # Now look at every method.\n for oMethod in oMethods:\n # Check the method's interface to see if\n # these aren't the droids you're looking for.\n cMethodInterfaceName = oMethod.getDeclaringClass().getName()\n if cMethodInterfaceName == cInterfaceName:\n return True\n return False", "def __getattr__(self, name):\n blocked_class = self.getattr_depth_check(name, already_found=False)\n if blocked_class is not None:\n own_class = \"{0}.{1}\".format(type(self).__module__, type(self).__name__)\n format_str = (\"Error: Recursive attribute lookup for {0} from {1} is \"\n \"ambiguous and hides attribute from {2}\")\n raise AttributeError(format_str.format(name, own_class, blocked_class))\n\n return self.getattr_recursive(name)", "def AddressOf(obj):\n raise NotImplementedError('AddressOf does not work in Python and code will likely need refactoring')", "def _getattr_path(obj: Any, path: str) -> Any:\n if not path:\n return None\n\n for attr in path.split('.'):\n obj = getattr(obj, attr, None)\n return obj", "def lookup(obj):\n\n return (dir(obj))", "def process(self, obj):\r\n attrs = None\r\n\r\n if self._attribute is not None:\r\n attrs = self._attribute\r\n elif self._field_name is not None:\r\n attrs = self._field_name\r\n\r\n if attrs is not None:\r\n current_object = obj\r\n\r\n # Check for `__` in the field for looking through the relation.\r\n attrs = attrs.split('__')\r\n\r\n if isinstance(obj, dict):\r\n for attr in attrs:\r\n current_object = current_object.get(attr, None)\r\n\r\n if callable(current_object):\r\n current_object = current_object()\r\n else:\r\n for attr in attrs:\r\n current_object = getattr(current_object, attr, None)\r\n\r\n if callable(current_object):\r\n current_object = current_object()\r\n\r\n return current_object", "def _load_objects(self):\n self._get_package()\n\n object_names = [name for name in dir(self._sdk) if name != \"GATDLSession\" and name != \"SDKInfo\" and name.startswith(\"GA\") and not name.endswith(\"Fetcher\")]\n\n for object_name in object_names:\n obj = getattr(self._sdk, object_name)\n self._objects_mapping[obj.rest_name] = object_name", "def test_calls_finder_options(self):\n class Test(pyperry.base.Base):\n foo = Scope(where='foo')\n\n rel = Test.foo()\n\n self.assertEqual(type(rel), pyperry.relation.Relation)\n self.assertEqual(rel.query(), { 'where': ['foo'] })", "def __getattr__(self, name):\n c = self.context\n # Direct type or subclass of type\n if type(c).__name__.lower() == name or name in [x.__name__.lower() for x in type(c).__bases__]:\n return c\n\n # If the attached object is the wrong type then see if *it* has a property of that name\n return getattr(c, name, None)", "def identify(self, visitor, **kwds):\n # attempt to\n try:\n # ask the {visitor} for a handler for my type\n handler = visitor.library\n # if it doesn't exist\n except AttributeError:\n # chain up\n return super().identify(visitor=visitor, **kwds)\n # if it does, invoke it\n return handler(library=self, **kwds)", "def _resolve_attr(self, obj, attrspec):\n attrssplit = attrspec.split(\".\")\n attr = attrssplit[0]\n try:\n obj = obj[int(attr)] # In case list element\n except ValueError:\n try:\n obj = obj[attr]\n except (TypeError, KeyError, AttributeError):\n obj = getattr(obj, attr)\n except (TypeError, KeyError, AttributeError):\n obj = getattr(obj, attr)\n if len(attrssplit) > 1:\n attrspec = attrspec.partition(\".\")[2] # right part of the string.\n return self._resolve_attr(obj, attrspec) # Recurse\n return obj", "def lookup_by_class(dict_,class_):\n v = None\n for c in classlist(class_)[::-1]:\n if c in dict_:\n v = dict_[c]\n break\n return v", "def _build_lookup(tree: dict, stdlib_lookup: bool = False) -> None:\n def _apply(item: dict, python_stdlib: set) -> None:\n if item[\"type\"] == \"module\" and item[\"imports\"]:\n package = item[\"fullname\"].partition(\".\")[0]\n for import_module in item[\"imports\"].values():\n import_module[\"lookup\"] = None\n name, level, relative = _get_name_level_relative_import_module(import_module)\n # So we first try to find a module with the expected name in the same directory\n # We look the parent item of the current module\n target = _look_in_package(tree, item[\"path\"], name, level=level)\n if target:\n import_module[\"lookup\"] = target\n else:\n # We now look if a package or module has the same name (within the same package)\n target = find_tree(\n tree,\n lambda x, n, p: (x[\"fullname\"] == n) and (x[\"fullname\"].partition(\".\")[0] == p),\n args=(name, package)\n )\n if target:\n import_module[\"lookup\"] = target[\"path\"]\n elif relative:\n # We haven't found so it might be a symbol imported by a package in __init__.py\n # We don't want to let an internal reference as not found\n import_module[\"lookup\"] = \"@internal\"\n elif name.partition(\".\")[0] == item[\"fullname\"].partition(\".\")[0]:\n # This is in case a module from within the same package has not been found\n # We don't want to let an internal reference as not found\n import_module[\"lookup\"] = \"@internal\"\n else:\n # In last resort, we look for the package in the standard library\n if name in python_stdlib:\n import_module[\"lookup\"] = \"@stdlib\"\n apply_tree(tree, _apply, args=(_build_python_stdlib(stdlib_lookup),))", "def lookup(obj):\n return list(dir(obj))", "def lookup(obj):\n return list(dir(obj))", "def inspect(self, object, hooked_only: bool=True):\n for typename, check in self:\n # Skip checks if it is not required for this type.\n if hooked_only and len(self.__hooks) and typename not in self.__hooks:\n continue\n\n # Keep going if object is not matching.\n if not check(object):\n continue\n\n if typename in self.__hooks:\n return self.__hooks[typename](object)\n elif self.__default_hook:\n return self.__default_hook(object)\n else:\n return typename\n\n # Situation that occurs when receiving a type checks are not covering.\n if self.__default_hook:\n return self.__default_hook(object)\n return None", "def search_import(\n method: str, modules: List[Union[str, ModuleType]]\n) -> Optional[object]:\n for module in modules:\n try:\n\n if isinstance(module, ModuleType):\n mod = module\n elif isinstance(module, str):\n # get module by string name\n mod = importlib.import_module(module)\n else:\n raise TypeError('Must be list of strings or ModuleType')\n\n # get method from module by string name\n met = getattr(mod, method, None)\n\n if met:\n return met\n\n except ImportError: # import_module can fail\n continue\n\n return None", "def find_bridge(i, j, chain) :\n B = chain.bridges_dict\n br = None\n for b in B.keys() :\n if (B[b].lumen1 == i and B[b].lumen2 == j) or (B[b].lumen1 == j and B[b].lumen2 == i) :\n br = b\n if br == None :\n print('No bridge found to connect these lumens ('+str(i)+', '+str(j)+') !')\n return br" ]
[ "0.5799882", "0.56170505", "0.56170505", "0.5608421", "0.52733755", "0.51485467", "0.5021419", "0.49733004", "0.48889658", "0.48796782", "0.4817488", "0.47882497", "0.478772", "0.47650564", "0.4761213", "0.47604015", "0.475525", "0.47418138", "0.47340906", "0.47281703", "0.46887425", "0.4688649", "0.4687102", "0.46848592", "0.46846187", "0.4678835", "0.46641296", "0.46361285", "0.46282768", "0.4612363", "0.45881236", "0.4580914", "0.45676392", "0.4566791", "0.45638832", "0.45555782", "0.4551496", "0.45355067", "0.45247778", "0.4523759", "0.4521698", "0.45165387", "0.4501169", "0.4496944", "0.44887584", "0.44840115", "0.44834557", "0.4481831", "0.4480058", "0.44788384", "0.44787514", "0.44787514", "0.44787514", "0.44787514", "0.44787514", "0.44787514", "0.44787514", "0.44787514", "0.44787514", "0.44780248", "0.4474848", "0.44739947", "0.44666344", "0.4464314", "0.44636053", "0.4454747", "0.44401056", "0.4431933", "0.44305566", "0.44222352", "0.4421823", "0.4421823", "0.44147435", "0.44119576", "0.44041353", "0.4394222", "0.43925193", "0.43886355", "0.43815333", "0.43799907", "0.43783084", "0.43681732", "0.4362293", "0.43540755", "0.43494388", "0.43485662", "0.4344443", "0.43379125", "0.43377954", "0.43220574", "0.43174788", "0.43161988", "0.43145478", "0.430596", "0.43009642", "0.4296322", "0.42960638", "0.42960638", "0.4284913", "0.4284612", "0.42613724" ]
0.0
-1
Test names like Goat Cat
def test_first_last(self): full_name = get_full_name("pony", "cat") self.assertEqual(full_name, "Pony Cat") full_name = get_full_name("goat", "cat") self.assertEqual(full_name, "Goat Cat")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_title(names):", "def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )", "def test_names():\n first = get_name(\"As\")\n assert first == \"Arsenic\"\n\n second = get_name(\"Be\")\n assert second == \"Beryllium\"\n\n third = get_name(\"Li\")\n assert third == \"Lithium\"", "def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')", "def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)", "def test_legal_names(self):\n names = [prod.name for prod in generate_products()]\n sep = [(name.split()[0], name.split()[1]) for name in names]\n for name in sep:\n self.assertIn(name[0], ADJS)\n self.assertIn(name[1], NOUNS)", "def test_bad_names(self):\n self.do_test_bad_name('', 'tmp/frog')\n self.do_test_bad_name('.b', 'tmp/frog')\n self.do_test_bad_name('a b', 'tmp/frog') # FAILS\n self.do_test_bad_name('a-b', 'tmp/frog') # FAILS", "def test_legal_names(self):\n prods = generate_products()\n for obj in prods:\n self.assertRegexpMatches(\n '(\\w{2,10} \\w{0,12}|\\?{0,3}){1}', obj.name)", "def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")", "def test_sanitize_name(self):\n assert scrape_category.sanitize_name(\" foo \") == \"foo\"\n assert scrape_category.sanitize_name(' foo\" ') == \"foo\"\n assert scrape_category.sanitize_name(' foo...bar ') == \"foo.bar\"", "def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)", "def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')", "def test_it_has_a_name():\n rob = Unicorn('Robert')\n assert rob.name == 'Robert'", "def tester(name):\n return len(name)", "def test_legal_names(self):\n product_list = generate_products()\n for prod in product_list:\n noun = prod.name.split(\" \")[1]\n adjective = prod.name.split(\" \")[0]\n self.assertIn(noun, NOUNS)\n self.assertIn(adjective, ADJECTIVES)", "def test_legal_names(self):\n names = [i[0] for i in generate_products()]\n\n for n in names:\n name = str(n).split()\n name1 = name[0]\n name2 = name[1]\n self.assertIn(name1, ADJECTIVES)\n self.assertIn(name2, NOUNS)", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def test_three(self):\n name = get_formatted_name('david', 'malan', 'j')\n self.assertEqual(name, 'David J Malan')", "def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)", "def test_is_valid_kubernetes_resource_name_valid_input():\n # test valid names\n assert is_valid_kubernetes_resource_name(name=\"l0l\")\n assert is_valid_kubernetes_resource_name(name=\"l-l\")\n assert is_valid_kubernetes_resource_name(name=\"l.l\")\n assert is_valid_kubernetes_resource_name(name=\"4-you\")\n assert is_valid_kubernetes_resource_name(name=\"you.2\")", "def test_2():\n\tname = \"Luke Skywalker\"\n\tassert name.lower() == api_call().json()['name'].lower()", "def test_name(self, data, firstname, secondname):\n layer = Points(data)\n assert layer.name == \"Points\"\n\n layer = Points(data, name=firstname)\n assert layer.name == firstname\n\n layer.name = secondname\n assert layer.name == secondname", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def test_name(self):\n self.assertTrue(type(x.name) == str)", "def test_short_full_name(self):\n self.assertEqual(self.john.full_name_short, \"J. Doe\")\n self.assertEqual(self.solar.full_name_short, \"N. Graule\")", "def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))", "def test_legal_names(self):\n products = generate_products()\n\n for product in products:\n names = product.name.split(\" \")\n self.assertIn(names[0], ADJECTIVES)\n self.assertIn(names[1], NOUNS)", "def test_legal_names(self):\n gen_prods_split = [p.name.split(\" \")\n for p in generate_products()]\n should_be_adjs = [n[0] for n in gen_prods_split]\n should_be_nouns = [n[1] for n in gen_prods_split]\n\n for a in should_be_adjs:\n self.assertIn(a, ADJECTIVES)\n\n for n in should_be_nouns:\n self.assertIn(n, NOUNS)", "def test_animals_can_speak(self):\n self.assertEqual(self.lion, 'roar')\n self.assertEqual(self.cat, 'meow')", "def test_valid_name_valid():\n assert valid_name(\"1\")\n assert valid_name(\"a\"*20)", "def testTitleTemplateFindNames(self):\n\n\t\ttests = {\n\t\t\t'${abc.def.1}-$abc-${123}': {\n\t\t\t\t'abc.def.1': ['abc', 'def', 1],\n\t\t\t\t'123': [123]\n\t\t\t},\n\t\t\t'${abc..def} $$ ${qwe}': {'qwe': ['qwe']}\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.getFieldNames(), tests[test])", "def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']", "def test_full_name(self):\n self.assertEqual(self.john.full_name, \"John Doe\")\n self.assertEqual(self.solar.full_name, \"Nathan Graule\")", "def test_casing(self):\n char = Character(type=['Fish', 'Great Ape'])\n assert char.type_key == 'fish'", "def test_name(name):\n # To work with the name, we remove the address and then\n # split it by its blanks\n name = name.split(\",\")[0]\n name = name.split()\n # First, we check whether the fictional person is a doctor or not\n doctor = 0\n if \"Dr.\" in name:\n doctor = 1\n\n # We save the results in a list\n result = [doctor]\n # Next we look at whether the person has a double first name\n if \"-\" in name[-2]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check if the person hat a double last name.\n if \"-\" in name[-1]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check whether the person is male or female.\n first_name = name[-2]\n if result[1] == 1:\n first_name = (first_name.split(\"-\"))[-2]\n if (first_name in names.woman and \"Herr\" not in name) or \"Frau\" in name:\n result.append(\"female\")\n elif (first_name in names.man and \"Frau\" not in name) or \"Herr\" in name:\n result.append(\"male\")\n return result", "def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))", "def test_name(self):\n\n for name in TEST_NAMES:\n self.colorspace.setName(name)\n self.assertEqual(name, self.colorspace.getName())", "def test_abbreviation(self):\n self.assertEqual(self.compound.abbreviation, \"Cool\")", "def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)", "def test__check_invalid_nickname__valid_name(self):\n self.assertEqual(\n first=trainercode.format_trainer_code(\"111122223333\"),\n second=\"1111 2222 3333\"\n )", "def test_strings_with_foo(self):\n write this test!", "def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)", "def test_get_by_name1(self):\n pass", "def test_function_name_had_cap_letter():\n functions = inspect.getmembers(s7, inspect.isfunction)\n for function in functions:\n assert len(re.findall('([A-Z])', function[0])) == 0, \"You have used Capital letter(s) in your function names\"", "def PytestNameToLabel(pytest_name):\n def _GuessIsAcronym(word):\n return not word.isalpha() or all(c not in 'aeiouy' for c in word)\n\n pytest_name = pytest_name.replace('.', ' ').replace('_', ' ')\n parts = []\n seen = set()\n for part in pytest_name.split():\n if part in seen:\n continue\n seen.add(part)\n parts.append(\n FactoryTest._PYTEST_LABEL_MAP.get(\n part, part.upper() if _GuessIsAcronym(part) else part.title()))\n return ' '.join(parts)", "def test_excalibur_name():\n assert I07Nexus.excalibur_detector_2021 == \"excroi\"\n assert I07Nexus.excalibur_04_2022 == \"exr\"", "def _check_name(self):\n\t\tpass", "def GetModernizedTestName(self, arg):\n return arg", "def test_names(self):\n obj = dotnet.DotNetNamespace({\"id\": \"Foo.Bar\"}, jinja_env=None, app=None)\n self.assertEqual(obj.name, \"Foo.Bar\")\n self.assertEqual(obj.short_name, \"Bar\")\n\n obj = dotnet.DotNetNamespace(\n {\"id\": \"Foo.Bar.Something`1\"}, jinja_env=None, app=None\n )\n self.assertEqual(obj.name, \"Foo.Bar.Something`1\")\n self.assertEqual(obj.short_name, \"Something`1\")", "def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)", "def test_first_name(self) :\n\t\tformatted_name = get_formatted_name('janis','joplin')\n\t\tself.assertEqual(formatted_name,'Janis Joplin')", "def test_first_last_middle_name(self):\n\t\tformatted_name = get_formatted_name('Wolfgang','mozart','amadues')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadues Mozart')", "def tesName(self):\n place = Place()\n self.assertTrue(hasattr(place, \"name\"))\n self.assertEqual(place.name, \"\")", "def test_name(self):\n dtt = self.TDTT(when=self.txt_when)\n expected_name = self.txt_when\n self.assertEquals(expected_name, dtt.name)\n self.assertEquals(expected_name, '{}'.format(dtt))\n expected_logged = '{}({})'.format(dtt.typename(), self.txt_when)\n self.assertEquals(expected_logged, dtt.logged)", "def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)", "def test_bnd_mate_name():\n\n mate = bnd_mate_name(ALT, \"2\")\n assert mate == \"17\"", "def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):\n if strip_tags:\n tags_start = name.find('[')\n tags_end = name.find(']')\n if tags_start > 0 and tags_end > tags_start:\n newname = name[:tags_start]\n newname += name[tags_end + 1:]\n name = newname\n\n if strip_scenarios:\n tags_start = name.find('(')\n tags_end = name.find(')')\n if tags_start > 0 and tags_end > tags_start:\n newname = name[:tags_start]\n newname += name[tags_end + 1:]\n name = newname\n\n return name", "def test_cleanup_name_for_homekit():\n\n assert cleanup_name_for_homekit(\"abc\") == \"abc\"\n assert cleanup_name_for_homekit(\"a b c\") == \"a b c\"\n assert cleanup_name_for_homekit(\"ab_c\") == \"ab c\"\n assert (\n cleanup_name_for_homekit('ab!@#$%^&*()-=\":.,><?//\\\\ frog')\n == \"ab--#---&----- -.,------ frog\"\n )\n assert cleanup_name_for_homekit(\"の日本_語文字セット\") == \"の日本 語文字セット\"", "def is_cool(name):\n if (name == \"Joe\") or (name == \"John\") or (name == \"Stephen\"):\n return True\n else:\n return False", "def test_get_by_name2(self):\n pass", "def nice_name():\n\n pass", "def testNames(self):\n self.assertEquals(\n set(['BLUE', 'GREEN', 'INDIGO', 'ORANGE', 'RED',\n 'VIOLET', 'YELLOW']),\n set(Color.names()))", "def test_organization_valid_name(self):\n hufflepuffs = models.Organization(name='hufflepuffs', title='Huffle Puffs')\n self.assertFalse(hufflepuffs.is_valid_name('#$%#%___2836273untitled'))\n self.assertTrue(hufflepuffs.is_valid_name('hufflepuffs'))", "def test_guess_nutrition_by_dish_name(self):\n pass", "def test_CategoryNames(self):\r\n exp = [\"BarcodeSequence\", \"DOB\", \"Description\", \"Treatment\"]\r\n obs = self.overview_map.CategoryNames\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.no_metadata.CategoryNames\r\n self.assertEqual(obs, [])\r\n\r\n obs = self.empty_map.CategoryNames\r\n self.assertEqual(obs, [])", "def test_TEB_shortname(self):\n teb = TEBpage()\n names = teb.short_name()\n assert len(names) == 1\n assert names[0] == 'Twin Elephant'", "def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)", "def test_check_name_is_3_parts():\n check_name_length()", "def test_first_last_name(self):\n formatted_name = get_formatted_name('jimi', 'hendrix')\n self.assertEqual(formatted_name, 'Jimi Hendrix')", "def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")", "def test_city_country(self):\n formatted_name = make_formatted_name('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def fixture_other_case() -> str:\n return \"angrybird\"", "def test_greeting(self):\r\n self.assertEqual(greet_by_name('Dani'), 'Hello, Mark!')", "def name_test(item):\n return f\"{item['params']['interface']}:{item['expected']['state']}\"", "def is_an_oak(name):\n if 'quercus' in name.lower():\n return True\n else:\n return False", "def test_name_only(self):\n mock_call = Call(\"some_name\")\n assert_that(str(mock_call), equal_to(\"Call(some_name)\"))", "def test_name(self):\n place = Place()\n self.assertTrue(hasattr(place, \"name\"))\n self.assertEqual(type(place.name), str)\n self.assertEqual(place.name, \"\")", "def test_CategoryNames(self):\n exp = [\"BarcodeSequence\", \"DOB\", \"Description\", \"Treatment\"]\n obs = self.overview_map.CategoryNames\n self.assertEqual(obs, exp)\n\n obs = self.no_metadata.CategoryNames\n self.assertEqual(obs, [])\n\n obs = self.empty_map.CategoryNames\n self.assertEqual(obs, [])", "def my_name(firstname, secondname):", "def FindTestName(self, test_constructors, args):\n test_name = None\n for arg in [self.GetModernizedTestName(a) for a in args]:\n if arg in test_constructors:\n test_name = arg\n\n return test_name", "def test_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\")\n assert bb.name == \"testname\"", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def test_fasta_naming(self):\n aln2fasta = hhsuite.AlignmentToFasta(q_name=\"foo\", t_name=\"{name}\")\n self.assertEqual(\n aln2fasta.fasta(self.hit, \"A-E----\"),\n \">foo\\nJKLMNOP\\n>templatename\\nA-E----\\n\")", "def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"", "def test_strings_without_foo(self):\n write this test!", "def test_name_of_cube(self):\n result = calculate_sleet_probability(self.snow_prob_cube, self.rain_prob_cube)\n name = \"probability_of_sleet\"\n self.assertEqual(result.long_name, name)", "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def test_drudge_has_names(free_alg):\n\n p = free_alg.names\n\n # Range and dummy related.\n assert p.R == Range('R')\n assert len(p.R_dumms) == 6\n assert p.R_dumms[0] == p.i\n assert p.R_dumms[-1] == p.n\n\n # Vector bases.\n assert p.v == Vec('v')\n\n # Scalar bases.\n assert p.m == IndexedBase('m')", "def _generate_test_name(source):\n out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()\n return \"test_%s\" % out", "def testUseAltNamingOne(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex-rc/' +\n '0.12.433.269', '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 1)\n self.assertEqual(expected, actual)", "def exact_name_check(name_to_test, parts):\n\n if parts is None:\n return 0\n\n if type(name_to_test) is not str:\n return 0\n\n\n for p in sorted(parts, key=len, reverse=True):\n name_to_test = name_to_test.replace(p, '', 1)\n if name_to_test == '':\n return 1\n return 0", "def test_name_must_be_capitalized(self):\n form = self.make_validator_form(name='PATRICK alves')\n # O cleaned_data é um dicionário que armazena todos os valores do\n # formulários já validados\n self.assertEqual('Patrick Alves', form.cleaned_data['name'])", "def test_first_last_name(self):\n formatted_name = get_formatted_name('david', 'Malan')\n self.assertEqual(formatted_name, 'David Malan')", "def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")", "def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")", "def name():\n\n pass", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set" ]
[ "0.7272773", "0.72304636", "0.72284096", "0.69994545", "0.697421", "0.69290376", "0.689268", "0.68515533", "0.6850236", "0.6830433", "0.6828345", "0.6685354", "0.6678936", "0.6674695", "0.66445744", "0.6638108", "0.6600845", "0.65993506", "0.6574424", "0.657173", "0.65344596", "0.6524607", "0.6521379", "0.650778", "0.64914954", "0.6479959", "0.6474985", "0.6448267", "0.6441509", "0.6440577", "0.64053196", "0.6400186", "0.63921833", "0.6389178", "0.63800806", "0.634529", "0.6318313", "0.63168186", "0.63100535", "0.6309176", "0.62898433", "0.6281844", "0.6266103", "0.6259719", "0.62527204", "0.62385905", "0.62069297", "0.6206797", "0.62022257", "0.62001973", "0.6195865", "0.6183966", "0.61820793", "0.61803573", "0.61757165", "0.6157915", "0.6137699", "0.613532", "0.6102939", "0.61000407", "0.60957474", "0.60935026", "0.6089931", "0.6085586", "0.6084609", "0.6083877", "0.6082089", "0.60765165", "0.6072638", "0.6070107", "0.6070099", "0.6069668", "0.6065768", "0.6055329", "0.60517097", "0.6046917", "0.60319257", "0.60281247", "0.60228914", "0.60143995", "0.6013918", "0.6009939", "0.6009753", "0.6004259", "0.59928334", "0.5991686", "0.598617", "0.5986101", "0.59836245", "0.5982015", "0.5981573", "0.59776676", "0.5970481", "0.5964945", "0.5958562", "0.59553915", "0.595523", "0.595523", "0.595381", "0.595381" ]
0.6196084
50
Test names like Goat Studman Cat
def test_first_last_middle(self): full_name = get_full_name("pony", "cat", "sweetie") self.assertEqual(full_name, "Pony Sweetie Cat") full_name = get_full_name("goat", "cat", "studman") self.assertEqual(full_name, "Goat Studman Cat")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_legal_names(self):\n names = [prod.name for prod in generate_products()]\n sep = [(name.split()[0], name.split()[1]) for name in names]\n for name in sep:\n self.assertIn(name[0], ADJS)\n self.assertIn(name[1], NOUNS)", "def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')", "def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )", "def test_names():\n first = get_name(\"As\")\n assert first == \"Arsenic\"\n\n second = get_name(\"Be\")\n assert second == \"Beryllium\"\n\n third = get_name(\"Li\")\n assert third == \"Lithium\"", "def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)", "def test_sanitize_name(self):\n assert scrape_category.sanitize_name(\" foo \") == \"foo\"\n assert scrape_category.sanitize_name(' foo\" ') == \"foo\"\n assert scrape_category.sanitize_name(' foo...bar ') == \"foo.bar\"", "def test_legal_names(self):\n prods = generate_products()\n for obj in prods:\n self.assertRegexpMatches(\n '(\\w{2,10} \\w{0,12}|\\?{0,3}){1}', obj.name)", "def test_name(name):\n # To work with the name, we remove the address and then\n # split it by its blanks\n name = name.split(\",\")[0]\n name = name.split()\n # First, we check whether the fictional person is a doctor or not\n doctor = 0\n if \"Dr.\" in name:\n doctor = 1\n\n # We save the results in a list\n result = [doctor]\n # Next we look at whether the person has a double first name\n if \"-\" in name[-2]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check if the person hat a double last name.\n if \"-\" in name[-1]:\n result.append(1)\n else:\n result.append(0)\n\n # Next we check whether the person is male or female.\n first_name = name[-2]\n if result[1] == 1:\n first_name = (first_name.split(\"-\"))[-2]\n if (first_name in names.woman and \"Herr\" not in name) or \"Frau\" in name:\n result.append(\"female\")\n elif (first_name in names.man and \"Frau\" not in name) or \"Herr\" in name:\n result.append(\"male\")\n return result", "def test_legal_names(self):\n product_list = generate_products()\n for prod in product_list:\n noun = prod.name.split(\" \")[1]\n adjective = prod.name.split(\" \")[0]\n self.assertIn(noun, NOUNS)\n self.assertIn(adjective, ADJECTIVES)", "def test_title(names):", "def test_first_last_middle_name(self):\n\t\tformatted_name = get_formatted_name('Wolfgang','mozart','amadues')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadues Mozart')", "def test_legal_names(self):\n names = [i[0] for i in generate_products()]\n\n for n in names:\n name = str(n).split()\n name1 = name[0]\n name2 = name[1]\n self.assertIn(name1, ADJECTIVES)\n self.assertIn(name2, NOUNS)", "def test_three(self):\n name = get_formatted_name('david', 'malan', 'j')\n self.assertEqual(name, 'David J Malan')", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)", "def test_bad_names(self):\n self.do_test_bad_name('', 'tmp/frog')\n self.do_test_bad_name('.b', 'tmp/frog')\n self.do_test_bad_name('a b', 'tmp/frog') # FAILS\n self.do_test_bad_name('a-b', 'tmp/frog') # FAILS", "def test_legal_names(self):\n products = generate_products()\n\n for product in products:\n names = product.name.split(\" \")\n self.assertIn(names[0], ADJECTIVES)\n self.assertIn(names[1], NOUNS)", "def tester(name):\n return len(name)", "def test_legal_names(self):\n gen_prods_split = [p.name.split(\" \")\n for p in generate_products()]\n should_be_adjs = [n[0] for n in gen_prods_split]\n should_be_nouns = [n[1] for n in gen_prods_split]\n\n for a in should_be_adjs:\n self.assertIn(a, ADJECTIVES)\n\n for n in should_be_nouns:\n self.assertIn(n, NOUNS)", "def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('marie', 'curie', 'francis')\n self.assertEqual(formatted_name, 'Marie Francis Curie')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('jimi', 'hendrix')\n self.assertEqual(formatted_name, 'Jimi Hendrix')", "def test_first_name(self) :\n\t\tformatted_name = get_formatted_name('janis','joplin')\n\t\tself.assertEqual(formatted_name,'Janis Joplin')", "def test__check_invalid_nickname__valid_name(self):\n self.assertEqual(\n first=trainercode.format_trainer_code(\"111122223333\"),\n second=\"1111 2222 3333\"\n )", "def test_valid_name_valid():\n assert valid_name(\"1\")\n assert valid_name(\"a\"*20)", "def test_it_has_a_name():\n rob = Unicorn('Robert')\n assert rob.name == 'Robert'", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def is_valid_string_name(src):\n\n return src[0].isupper()", "def check_name(name):\n name = sanitize_name(name)\n for letter in name:\n if letter not in all_letters:\n # print(f\"Bad letter = {letter}\")\n return False\n role = extract_role(name)\n # remove group\n name = name.replace(f' - {role}', '')\n try:\n parts = name.split(' ')\n firstname = parts[0].title()\n if firstname[0] not in letters:\n return False\n for letter in firstname[1:]:\n if letter not in LETTERS:\n return False\n familynames = parts[1:]\n for familyname in familynames:\n if familyname[0] not in letters:\n return False\n for letter in familyname[1:]:\n if letter not in LETTERS:\n return False\n return True\n except:\n return False", "def test_first_last_name(self):\n\t\tformatted_name = get_formatted_name('janos', 'jk')\n\t\tself.assertEqual(formatted_name, 'Janos Jk')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_is_valid_kubernetes_resource_name_valid_input():\n # test valid names\n assert is_valid_kubernetes_resource_name(name=\"l0l\")\n assert is_valid_kubernetes_resource_name(name=\"l-l\")\n assert is_valid_kubernetes_resource_name(name=\"l.l\")\n assert is_valid_kubernetes_resource_name(name=\"4-you\")\n assert is_valid_kubernetes_resource_name(name=\"you.2\")", "def test_first_last(self):\n\n full_name = get_full_name(\"pony\", \"cat\")\n self.assertEqual(full_name, \"Pony Cat\")\n\n full_name = get_full_name(\"goat\", \"cat\")\n self.assertEqual(full_name, \"Goat Cat\")", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('john', 'smith', 'billy')\n self.assertEqual(formatted_name, 'John Billy Smith')", "def test_name_must_be_capitalized(self):\n form = self.make_validator_form(name='PATRICK alves')\n # O cleaned_data é um dicionário que armazena todos os valores do\n # formulários já validados\n self.assertEqual('Patrick Alves', form.cleaned_data['name'])", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n # Asserting that formatted_name equals 'Janis Joplin'\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_check_name_is_3_parts():\n check_name_length()", "def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2", "def test_first_last_name(self):\n formatted_name = get_formatted_name('david', 'Malan')\n self.assertEqual(formatted_name, 'David Malan')", "def test_organization_valid_name(self):\n hufflepuffs = models.Organization(name='hufflepuffs', title='Huffle Puffs')\n self.assertFalse(hufflepuffs.is_valid_name('#$%#%___2836273untitled'))\n self.assertTrue(hufflepuffs.is_valid_name('hufflepuffs'))", "def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')", "def test_short_full_name(self):\n self.assertEqual(self.john.full_name_short, \"J. Doe\")\n self.assertEqual(self.solar.full_name_short, \"N. Graule\")", "def test_reformatted_full_name():\n assert reformatted_full_name(\"\") == \"\"\n assert reformatted_full_name(\"George\") == \"george\"\n assert reformatted_full_name(\"X Y Z A B\") == \"x b\"", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")", "def isValidName(theString, minimum, maximum) :\n\n return theString.isalpha() == True \\\n and len(theString) >= minimum \\\n and len(theString) <= maximum", "def exact_name_check(name_to_test, parts):\n\n if parts is None:\n return 0\n\n if type(name_to_test) is not str:\n return 0\n\n\n for p in sorted(parts, key=len, reverse=True):\n name_to_test = name_to_test.replace(p, '', 1)\n if name_to_test == '':\n return 1\n return 0", "def name_valid(name):\n return name.isalpha()", "def name_check(f_name):\r\n if len(f_name) == 0:\r\n print('The first name must be filled in.')\r\n if len(f_name) < 2:\r\n print(f_name + ' is not a valid name. Itis too short.')", "def test_city_country(self):\n formatted_name = make_formatted_name('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def split_name(fullname):", "def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))", "def test_full_name(self):\n self.assertEqual(self.john.full_name, \"John Doe\")\n self.assertEqual(self.solar.full_name, \"Nathan Graule\")", "def check_funny_chars_in_names(names, is_full_qualified_name=True):\n if names and len(names) > 0:\n for name in names:\n if ('\\t' in name or '\\n' in name or '!' in name or ',' in name or\n (is_full_qualified_name and name.count('.') > 1) or (not is_full_qualified_name and name.count('.') > 0)):\n raise Exception('Name has an invalid character \"\\\\t\" \"\\\\n\" \"!\" \",\" \".\": \"%s\"' % name)", "def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))", "def test_function_name_had_cap_letter():\n functions = inspect.getmembers(s7, inspect.isfunction)\n for function in functions:\n assert len(re.findall('([A-Z])', function[0])) == 0, \"You have used Capital letter(s) in your function names\"", "def testTitleTemplateFindNames(self):\n\n\t\ttests = {\n\t\t\t'${abc.def.1}-$abc-${123}': {\n\t\t\t\t'abc.def.1': ['abc', 'def', 1],\n\t\t\t\t'123': [123]\n\t\t\t},\n\t\t\t'${abc..def} $$ ${qwe}': {'qwe': ['qwe']}\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.getFieldNames(), tests[test])", "def check_name(name, is_name_ok):\n try:\n name = unicode(name, 'utf-8')\n except:\n pass\n name = name[max(string.rfind(name,'/'),\n string.rfind(name,'\\\\'),\n string.rfind(name,':')\n )+1:]\n name = string.replace(name, u\"'\", u'_')\n name = string.replace(name, u'ä', u'ae')\n name = string.replace(name, u'ö', u'oe')\n name = string.replace(name, u'ü', u'ue')\n name = string.replace(name, u'Ä', u'Ae')\n name = string.replace(name, u'Ö', u'Oe')\n name = string.replace(name, u'Ü', u'Ue')\n name = string.replace(name, u'ß', u'ss')\n bad_chars = ' ,;()[]{}*\"#%+~!'\n good_chars = '________________'\n TRANSMAP = string.maketrans(bad_chars, good_chars)\n name = name.encode('iso-8859-1')\n name = string.translate(name, TRANSMAP)\n if is_name_ok:\n return name\n html = '.html'\n if name[-5:] != html :\n name += html\n return name", "def verif_similar_names(sv):\r\n ok=True\r\n names=[os.path.normcase(n) for n in sv.Object_list] # list names without case\r\n names.sort() # facilitate compare one to the next\r\n for i, n in enumerate(names[:-1]): # scan whole list\r\n a,b=n[:-1], names[i+1][:-1] # names minus last char\r\n c=names[i+1][-1] # last char in full name\r\n d=n[-1] # last char in full name\r\n if len(a)>1 and (c <\"0\" or c>\"9\") and (d <\"0\" or d>\"9\") and a[-1]!=Underscore and b in [a, n]:\r\n if ok:\r\n print(\"\")\r\n ok=False\r\n warn(\"\\n\"+Warn_typing_risk+\"\\n'\"+n+\"' / '\"+names[i+1]+\"'\") # *** Warning: risk of typing error in '\"+n+\"' or '\"+names[i+1]+\"' *** \r\n \r\n if not ok: print(\"\")", "def test_cleanup_name_for_homekit():\n\n assert cleanup_name_for_homekit(\"abc\") == \"abc\"\n assert cleanup_name_for_homekit(\"a b c\") == \"a b c\"\n assert cleanup_name_for_homekit(\"ab_c\") == \"ab c\"\n assert (\n cleanup_name_for_homekit('ab!@#$%^&*()-=\":.,><?//\\\\ frog')\n == \"ab--#---&----- -.,------ frog\"\n )\n assert cleanup_name_for_homekit(\"の日本_語文字セット\") == \"の日本 語文字セット\"", "def my_name(firstname, secondname):", "def test_name(self):\n self.assertTrue(type(x.name) == str)", "def is_cool(name):\n if (name == \"Joe\") or (name == \"John\") or (name == \"Stephen\"):\n return True\n else:\n return False", "def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)", "def asName(self, name):\r\n\t\tnewName = \"\"\r\n\t\ttoHigher = False\r\n\t\tfor char in name:\r\n\t\t\tif char in \"_-\":\r\n\t\t\t\ttoHigher = True\r\n\t\t\telse:\r\n\t\t\t\tif toHigher:\r\n\t\t\t\t\tnewName = newName + char.upper()\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewName = newName + char\r\n\t\t\t\ttoHigher = False\r\n\t\treturn newName", "def acronym(name):\n return tuple(map(first, filter(capitalized, name.split())))", "def validate_team_name(name):\n if not re.match('^[A-Za-z0-9_]*$', name):\n print('INVALID NAME. LETTERS, NUMBERS AND UNDERSCORES ONLY')\n return False\n elif len(name) > 10:\n print('INVALID NAME. 10 CHARACTERS MAX')\n return False\n elif len(name) == 0:\n print('INVALID NAME. NOT LONG ENOUGH')\n else:\n return True", "def test_name_must_be_capitalized(self):\r\n form = self.make_validated_form(name='DieGo MaraNhao')\r\n self. assertEqual('Diego Maranhao', form.cleaned_data['name'])", "def validate_names(name):\n return isinstance(name, str) and not re.search(r'[\\s]', name)", "def verify_name(name):\n try:\n if name.index(' '):\n return False\n except ValueError:\n return True", "def test_city_country_population(self):\n formatted_name = make_formatted_name('santiago', 'chile', 5000000)\n self.assertEqual(formatted_name, 'Santiago, Chile - population 5000000')", "def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence", "def test_abbreviation(self):\n self.assertEqual(self.compound.abbreviation, \"Cool\")", "def is_an_oak(name):\n if 'quercus' in name.lower():\n return True\n else:\n return False", "def _check_name(self):\n\t\tpass", "def test_capitalize(self):\n self.assertEqual(\n minerals_extras.capitalize('mohs scale hardness'),\n 'Mohs Scale Hardness')", "def make_systematic_name(name):\n return \" \".join(re.findall(r\"([A-Z]+[a-z]*)\", name)).capitalize()", "def test_2():\n\tname = \"Luke Skywalker\"\n\tassert name.lower() == api_call().json()['name'].lower()", "def test_bnd_mate_name():\n\n mate = bnd_mate_name(ALT, \"2\")\n assert mate == \"17\"", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def test_normalize_star_name(self):\n self.assertEqual(normalize_star_name(\"RR LYR\"), \"RR LYR\")\n self.assertEqual(normalize_star_name(\"V1339 CYG\"), \"V1339 CYG\")\n self.assertEqual(normalize_star_name(\"V838 MON\"), \"V0838 MON\")", "def test_name(self, data, firstname, secondname):\n layer = Points(data)\n assert layer.name == \"Points\"\n\n layer = Points(data, name=firstname)\n assert layer.name == firstname\n\n layer.name = secondname\n assert layer.name == secondname", "def test_casing(self):\n char = Character(type=['Fish', 'Great Ape'])\n assert char.type_key == 'fish'", "def invalid_name(name):\n if any(not item.isalpha() for item in str(name)):\n return True\n return False", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def format_name_string(x: str) -> str:\n # get rid of [#] when present\n if \"{\" in x:\n x = x[:x.find(\"{\")-1]\n if \"var.\" in x.lower():\n p = x.lower().find(\"var.\")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+4] + \" <em class=\\\"species\\\">\" + x[p+4:] + \"</em>\"\n elif \" var \" in x.lower(): # need the spaces around var, because some names have the letters var in them\n p = x.lower().find(\" var \")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+4] + \" <em class=\\\"species\\\">\" + x[p+4:] + \"</em>\"\n elif \"subsp.\" in x.lower():\n p = x.lower().find(\"subsp.\")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+6] + \" <em class=\\\"species\\\">\" + x[p+6:] + \"</em>\"\n elif \" forme \" in x.lower():\n p = x.lower().find(\" forme \")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+6] + \" <em class=\\\"species\\\">\" + x[p+6:] + \"</em>\"\n elif \" f. \" in x.lower():\n p = x.lower().find(\" f. \")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+3] + \" <em class=\\\"species\\\">\" + x[p+3:] + \"</em>\"\n else:\n return \"<em class=\\\"species\\\">\" + x + \"</em>\"", "def validate_name(self, name):\n import re\n\n if not re.findall(\"^[\\w',]+$\", name):\n self.msg(\"That category name contains invalid characters.\")\n return False\n return True", "def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)", "def validName(varname):\r\n if (len(varname[0])>32):\r\n return False\r\n if not(varname[0][0].isalpha()):\r\n return False \r\n for ch in varname[0][1:]:\r\n if not(ch.isalpha() or ch.isdigit() or ch=='_'):\r\n return False\r\n \r\n return True", "def update_name(name, mapping):\n words_name = name.split(\" \")\n if words_name not in expected:\n for word in words_name:\n if word in mapping:\n name = name.replace(word, mapping[word])\n \n if word == word.lower():\n if word not in allowed_lowercase:\n name = name.replace(word, word.capitalize())\n \n if words_name[0] not in expected:\n if words_name[0] not in mapping:\n if words_name[0] == \"Fernando\":\n name = \"Avenida \" + name\n elif words_name[0] == \"rua\":\n pass\n else:\n name = \"Rua \" + name\n\n return name", "def test_size_topping(self):\n formatted_name = get_formatted_name('jimi', 'hendrix')\n self.assertEqual(formatted_name, 'Jimi Hendrix')", "def standardize_name_for_look_up(name: Any) -> str:\n if not isinstance(name, str):\n return name\n\n name = name.lower().strip()\n name = \" \".join(name.split(\"_\"))\n name = name.translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ) # remove punctuation\n name = \" \".join(\n [part for part in name.split(\" \") if part]\n ) # ensure there is only a single space between words\n return name", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())" ]
[ "0.7357881", "0.7340329", "0.732584", "0.72818434", "0.72236216", "0.720758", "0.7120223", "0.69911456", "0.69444036", "0.69189924", "0.69121563", "0.69082373", "0.6899404", "0.6823745", "0.6816478", "0.68021417", "0.6771601", "0.6714057", "0.67046154", "0.67022836", "0.6653335", "0.6653335", "0.6644428", "0.66098", "0.65944374", "0.6593488", "0.6562523", "0.6549178", "0.6547015", "0.65320706", "0.65172577", "0.65056825", "0.64924777", "0.64924777", "0.64858896", "0.6450341", "0.6446954", "0.64447176", "0.6443172", "0.6438941", "0.643679", "0.64364195", "0.6435864", "0.64280766", "0.6427032", "0.64224803", "0.64168257", "0.64168257", "0.64108956", "0.6383221", "0.63774663", "0.63574463", "0.6353255", "0.63489425", "0.6344468", "0.63296187", "0.63132095", "0.63068", "0.63043535", "0.62963504", "0.62828124", "0.6276274", "0.6271454", "0.6267531", "0.625494", "0.6235305", "0.6233437", "0.62299615", "0.6227205", "0.62250894", "0.62118024", "0.62091225", "0.62065244", "0.6204691", "0.62013704", "0.6199973", "0.6196729", "0.6182744", "0.6179954", "0.61720127", "0.6170672", "0.6169946", "0.61617815", "0.6157215", "0.6155469", "0.6151002", "0.6150943", "0.6147432", "0.614435", "0.614435", "0.6143177", "0.6131161", "0.6123316", "0.6106792", "0.61065155", "0.6104393", "0.6082848", "0.6082773", "0.6082773", "0.6082773" ]
0.64093536
49
Calculate the bake time remaining.
def bake_time_remaining(elapsed_bake_time=EXPECTED_BAKE_TIME): time_remaining = EXPECTED_BAKE_TIME - elapsed_bake_time return time_remaining
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bake_time_remaining(elapsed_bake_time: int) -> int:\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def bake_time_remaining(elapsed_bake_time):\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def bake_time_remaining(elapsed_bake_time):\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def bake_time_remaining(elapsed_bake_time):\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def time_remaining(self) -> float:\n\n return self.event.time - time.time()", "def remaining(self):\n return self.value - time.time()", "def remaining_ms():", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n\n preparation_time = preparation_time_in_minutes(number_of_layers)\n total_bake_time = preparation_time + elapsed_bake_time\n return total_bake_time", "def time_remaining(progress, elapsed):\n total = elapsed / progress\n return total - elapsed", "def elapsed_time_in_minutes(no_of_layers, elapsed_bake_time):\n return preparation_time_in_minutes(no_of_layers) + elapsed_bake_time", "def elapsed_time_in_minutes(number_of_layers: int, elapsed_bake_time: int) -> int:\n return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time", "def seconds_remaining(self):\n pass", "def time_left(self):\r\n return 10 - (int(time.time()) - self.start_time)", "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def remaining(self):\n if self.goal:\n return self.goal - self.total_donated()\n else:\n return 0", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time", "def time_remaining(self, steps_done):\n if steps_done:\n seconds_per_step = (datetime.datetime.now() - self.__start_time).total_seconds() / steps_done\n seconds_remaining = (self.__total_steps - steps_done) * seconds_per_step\n return str(datetime.timedelta(seconds=round(seconds_remaining)))\n return 'not enough data to estimate'", "def getRemainingRunTime(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"SYST:BATT:ARTT?\")\n ret = self.myFieldFox.read()\n else:\n ret = 60\n return ret", "def time_remaining(self):\n elapsed_time = time.time() - self.start_time\n self.progressbar['value'] = progressbar.current\n time_remaining = round((1 - progressbar.current) * elapsed_time)\n # Disabled for Demo due to confusion\n # if time_remaining < 60:\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining} seconds')\n # elif 3600 > time_remaining > 60:\n # time_remaining = round(time_remaining / 60)\n # self.progress_label.config(text=f'Estimated TIme Remaining: {time_remaining} minutes')\n # elif time_remaining > 3600:\n # time_remaining = dt.timedelta(seconds=time_remaining)\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining}')", "async def getDelayTimeRemaining(self):\n delay_time_remaining = await self.director.getItemVariableValue(\n self.item_id, \"DELAY_TIME_REMAINING\"\n )\n return delay_time_remaining", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n return elapsed_bake_time + (number_of_layers * 2)", "def receive_and_probing_time(self):\r\n latest_completion = 0\r\n for probe in self.__probes.values():\r\n\t\t \t if probe.complete():\r\n\t\t\t \t\t latest_completion = max(latest_completion, probe.completion_time)\r\n return latest_completion - self.__arrival_time", "def decreases_remaining(self):\n return 2 - self.decreases_today", "def important_time(self):\n\t\twork_s = self.work_time().seconds\n\t\tbreak_s = self.break_time().seconds\n\t\tif self.status():\n\t\t\tremaining_time_s = tomato(work_s, break_s)\n\t\telse:\n\t\t\tremaining_time_s = potato(work_s, break_s)\n\n\t\timp_time = datetime.now() + timedelta(0, remaining_time_s)\n\t\treturn imp_time", "def duration(self) -> float:\n return self.endTime()-self.startTime()", "def calculate_time_percentage_left(self):\n time_left = self.calculate_time_left()\n return time_left / self.attributes[AT.TIME_TO_EXPIRE]", "def calculate_time_left(self):\n time_left = self.attributes[AT.TIME_CREATED] \\\n + self.attributes[AT.TIME_TO_EXPIRE] \\\n - get_ticks()\n if time_left < 0:\n time_left = 0\n return time_left", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def get_remaining(self, curtime=None):\n if curtime is None:\n curtime = time.time()\n return self.time - curtime", "def Remaining(self):\n if self._timeout is None:\n return None\n\n # Get start time on first calculation\n if self._start_time is None:\n self._start_time = self._time_fn()\n\n # Calculate remaining time\n remaining_timeout = self._start_time + self._timeout - self._time_fn()\n\n if not self._allow_negative:\n # Ensure timeout is always >= 0\n return max(0.0, remaining_timeout)\n\n return remaining_timeout", "def remaining(self):\n return self._timeout - (time.time() - self._start_time)", "def time_remaining(self):\n with self._lock:\n deadline = self._expiration_manager.deadline()\n return max(0.0, deadline - time.time())", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def calc_total_wait(self, current_time_step):\n self.total_wait = current_time_step - self.time_entered\n return self.total_wait", "def time_left(self):\n t=self.transport\n return (t.stoptime or t.get_length())-t.get_time()", "def estimated_time(self):\n self._update()\n if not self.running_mode:\n return 0 if self._is_finished() else float(\"nan\")\n elif self.running_mode == \"local\":\n start = self.processes[0].create_time()\n elif self.running_mode == \"grid\":\n start = self.job[\"start_time\"]\n if start == 0:\n # Queued, but not started\n return float(\"nan\")\n else:\n logger.warning(\"Invalid running_mode attribute\")\n return float(\"nan\")\n current = self.current_step()\n if current <= 0: # If not dumped yet or error\n return float('nan')\n else:\n elapsed = time() - start\n return elapsed * (self.total_steps / current - 1)", "def get_time_taken_sec(self) -> float:\n return self.time_stop - self.time_start", "def timesLeft(self)->int:\n return self.maxTimes - self.timesUsed", "def wait_time(self, current_time):\n return current_time - self.timestamp", "def get_total_wait_time(self):\n s = sum([r['wait'] for r in self.required_signals.itervalues()])\n if self.current_signal:\n s += self.current_signal[1]['wait']\n\n return s", "def calc_recharge_time(self, upper_battery_capacity = 4):\n # coasting_velocity = self.coast_speed(lap_length, angle) # KWh, point when to swap back to battery power\n time = ((upper_battery_capacity - self.current_capacity) / \n (self.recharge_rate))\n return time", "def remaining(self):\n if not self.enabled:\n return None\n duration = self.timeout - self.elapsed\n if self.timed_out: # check timed_out after duration for real-time correctness\n return 0\n return duration", "def duration(self):\r\n return self.t2 - self.t1", "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def TimeToRefill(self):\n # Get current timestamp in miliseconds from unix epoch\n now = int(time.time() * 1000)\n timeatrefile = self.status['timestamp'] + self.status['refillIn']\n\n timetorefil = timeatrefile - now + 1000 # plus one second fudge factor\n if timetorefil < 0:\n timetorefil = 0\n\n # Return value in seconds\n return timetorefil / 1000.0", "def duration(self):\n return self._backup_finished - self._backup_started", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def update_stay_time(self):\n # It would not be better to simply self.stay_time = self.get_length() ??\n self.stay_time = self.get_length()", "def elapsed(self):\n return datetime.datetime.now() - self.start", "def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25", "def duration(self):\n return int(\n (self.finish_checkpoint - self.start_checkpoint) * 1000000\n )", "def remaining_space_in_hold(self):\n balls = self.config['balls_to_hold'] - self.balls_held\n if balls < 0:\n balls = 0\n return balls", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def secondsLeft(self)->int:\n t = datetime.utcnow()\n if self._scenario == LM_HardDate.Scenario.ValidSince:\n return 0 if t >= self.timeBegin else int((self.timeBegin - t).total_seconds())\n else:\n return 0 if t >= self.timeEnd else int((self.timeEnd - t).total_seconds())", "async def getDelayTimeTotal(self):\n delay_time_total = await self.director.getItemVariableValue(\n self.item_id, \"DELAY_TIME_TOTAL\"\n )\n return delay_time_total", "def calculateWaitingTime(self, inputs):\n CollisionCounter.CollisionCounter.getInstance().waitingTimeCalculated(self.time)\n timeUntilDepature = self.getAtt('departure_time', inputs) - self.time\n remainingLoadingTime = self.calculateLoadingTime(inputs)\n # calculates first maximum possible waiting time\n sampleTime = int((timeUntilDepature - remainingLoadingTime) / self.participants)\n\n if sampleTime >= 1:\n # result is big enough for a standard treatment\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(sampleTime + 1)\n elif sampleTime < 1:\n # reslut is too small, special treatment necessary\n upperLimit = (10 * (1 - (math.exp(sampleTime - 1)))) + 1\n self.waitingTime = MyRandom.RandomNumber.getInstance().getRandomNumber(max((min(upperLimit,\n timeUntilDepature)) + 1, 1))\n # decides whether charging is allowed during waiting time\n if not self.stayedConnected:\n self.stayConnected = True\n self.stayedConnected = True\n else:\n self.stayConnected = False\n self.stayedConnected = False", "def duration(self):\n\n ended = time.time() if self.ended is None else self.ended\n return ended - self.started", "def get_waiting_in_line(self):\n return self.time_step_to_dequeue - self.time_step_to_enqueue", "def time_left(self):\n return self.timeout - self.current_milli_time()", "def get_remaining_playback_time(self):\n\n return self._current_playback[\"item\"][\"duration_ms\"] - self._current_playback[\"progress_ms\"]", "def time_consumed(self) -> int:\n if not self.actions:\n return 0\n else:\n return self.actions[-1].time_end", "def duration(self):\n started = self.started_at\n finished = self.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None # can't compute yet", "def compute_go_duration(self, units='seconds'):\n go_duration = 0\n for trial in self.trials:\n max_time = 0\n for event in trial.events:\n if self.stop > max_time:\n max_time = self.stop\n\n go_duration += max_time\n\n self.go_duration = (go_duration, units)", "def duration(self):\r\n return self.stop - self.start", "def duration( self ):\n return (self.start and time.process_time()-self.start) or 0", "def time_left(self) -> float:\n return self._alarm_silence - time.monotonic()", "def bottle_duration(consumption, pressure, empty_pressure=0):\n available = pressure - empty_pressure\n return math.floor(available / consumption)", "def remain():\r\n global total\r\n global user_pick\r\n total = int(total - user_pick)\r\n print(\"Remaining \" + str(total))", "def duration(self):\n return self.end_time - self.start_time", "def queued_time(self):\r\n return (self.node_monitor_launch_time - self.node_monitor_submit_time)", "def _getUpTime(self):\n diff = (datetime.datetime.now() - self._startTime).__str__()\n return diff[:diff.find('.')]", "def _its_remaining_time(cls, prefix, timeout, from_start_time):\n already_passed = time.time() - from_start_time\n remain_time = timeout - already_passed\n if remain_time < 0.0:\n remain_time = 0.0\n msg = \"{} {:.3f} [sec], already passed {:.3f} [sec]\".format(prefix, remain_time, already_passed)\n return remain_time, msg", "def _get_remain_hd_time(self):\n return self.__remain_hd_time", "def get_seconds(self):\n return self.seconds_remaining", "def time_passed(self):\n return (datetime.now(timezone.utc) - self._time_run).total_seconds()", "def duration(self) -> int:\n return 0", "def elapsed(self):\n return self.__last_time() - self.__start", "def duration(self):\n if not self.started:\n return None\n start = self.started\n end = self.completed\n if not end:\n end = datetime.utcnow()\n return end - start", "def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(self.name, \"took\", str(hours), \"hours and\", \"{0:.2f}\".format(minutes), unit, \"to complete\")\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(self.name, \"took\", str(minutes), \"minutes and\", \"{0:.2f}\".format(seconds), unit, \"to complete\")\n else:\n print(self.name, \"took\", \"{0:.2f}\".format(elapsed), unit, \"to complete\")", "def pc_work_time_total(self):\n return _spacegrant_swig.general_burster_2_sptr_pc_work_time_total(self)", "def get_duration(self, obj):\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None # can't compute yet", "def _self_time(self):\r\n return self.duration() - sum([child.duration() for child in self.children])", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))", "def get_remaining(self) -> str:\n hex_remaining_time = hexlify(self.message)[294:302]\n int_remaining_time_seconds = int(\n hex_remaining_time[6:8]\n + hex_remaining_time[4:6]\n + hex_remaining_time[2:4]\n + hex_remaining_time[0:2],\n 16,\n )\n return seconds_to_iso_time(int_remaining_time_seconds)", "def get_duration(self):\n\n return self.endtime - self.starttime", "def elapsed(self):\n return str(datetime.datetime.now() - self.start).split('.')[0]", "def time_left(self) -> str:\n return self._time_to_auto_off", "def report_remaining_seconds(self):\n LOGGER.info('Lambda remaining seconds: %.2f', self.remaining_ms() / 1000.0)", "def lockout_duration(self, obj):\n delta = relativedelta(obj.modified, obj.created)\n return humanize_delta(delta)", "def length(self):\n if self.running:\n return ZERO_TIME\n else:\n return self.end - self.start", "def runtime(self):\n return self.stop_time - self.start_time", "def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])", "def time_until(self, cookies):\n cost = cookies - self._current_cookies\n time = 0.0\n if cost > 0.0:\n time = math.ceil(cost/self._cps)\n else:\n time = 0.0\n return time", "def secondsLeft(self)->int:\n return 0 if self.secondsPassed >= self.secondsTotal else self.secondsTotal - self.secondsPassed", "def get_duration(self, current_time):\n return current_time - self.slam.get_data(node_name=self.last_point_name)['time']", "def duration(self):\n\t\tif self.status():\n\t\t\t# Currently on, return time since session was started\n\t\t\treturn self.length()\n\t\telse:\n\t\t\t# Otherwise return time until last bit of work\n\t\t\t# Check that this isn't an empty session\n\t\t\tif not self.toggles: return timedelta()\n\t\t\treturn self.toggles[-1] - self.toggles[0]", "def compute_run_duration(flasher_data, initial_delay):\n if initial_delay is None:\n tot = 0\n else:\n tot = initial_delay\n\n for pair in flasher_data:\n tot += pair[1] + 10\n\n return tot", "def get_remaining_count(self):\n return self.total_count - self.count", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time" ]
[ "0.8687785", "0.85834104", "0.85834104", "0.85834104", "0.7155303", "0.6970577", "0.6957602", "0.688031", "0.67487776", "0.6720861", "0.67003495", "0.66638684", "0.6635681", "0.6626661", "0.6574791", "0.65689373", "0.6551115", "0.65494686", "0.653096", "0.6519621", "0.65069056", "0.64870733", "0.6471145", "0.6464806", "0.645559", "0.6432379", "0.64001113", "0.6381405", "0.63672304", "0.63554794", "0.633987", "0.63103175", "0.6308583", "0.6302533", "0.6288278", "0.62594384", "0.6256708", "0.6242021", "0.62344074", "0.6231879", "0.62296873", "0.62108094", "0.6208802", "0.6197729", "0.6187996", "0.61611634", "0.6150217", "0.6133896", "0.6133352", "0.6131605", "0.6115419", "0.61112064", "0.61101705", "0.6105935", "0.60994685", "0.609597", "0.60659385", "0.6045319", "0.6042151", "0.6039696", "0.60368705", "0.6033386", "0.6026908", "0.59950125", "0.5994044", "0.59922236", "0.5975477", "0.5966979", "0.59543633", "0.59419423", "0.5940826", "0.5934441", "0.59334046", "0.59289443", "0.5921532", "0.5920523", "0.59199", "0.59190226", "0.5918405", "0.59161484", "0.59156805", "0.59048957", "0.5903511", "0.5893916", "0.5892662", "0.58703655", "0.5869248", "0.58685255", "0.58685", "0.58683383", "0.58636886", "0.5859261", "0.5858777", "0.5856705", "0.5848425", "0.58350253", "0.582839", "0.58273536", "0.5824862", "0.58195364" ]
0.84728587
4
Calculate Time To Prepare All Layers. This funtion takes the lasagna number of layers as a parameter and mutiplies that value with the designated preparation time.
def preparation_time_in_minutes(number_of_layers): layers_preparation_time = number_of_layers * PREPARATION_TIME return layers_preparation_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preparation_time_in_minutes(number_of_layers: int) -> int:\n return PREPARATION_TIME_PER_LAYER_IN_MINUTES * number_of_layers", "def preparation_time_in_minutes(number_of_layers):\n return number_of_layers * 2", "def preparation_time_in_minutes(number_of_layers):\n return PREPARATION_TIME * number_of_layers", "def preparation_time_in_minutes(no_of_layers):\n return PREPARATION_TIME * no_of_layers", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time", "def elapsed_time_in_minutes(number_of_layers: int, elapsed_bake_time: int) -> int:\n return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time", "def elapsed_time_in_minutes(no_of_layers, elapsed_bake_time):\n return preparation_time_in_minutes(no_of_layers) + elapsed_bake_time", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n\n preparation_time = preparation_time_in_minutes(number_of_layers)\n total_bake_time = preparation_time + elapsed_bake_time\n return total_bake_time", "def adjust_layer_temps(self):\n\n if self.layer_count == 1:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s = self.t_s_0\n\n elif self.layer_count == 2:\n if self.isothermal:\n self.t_s = FREEZE\n self.t_s_l = FREEZE\n self.t_s_0 = FREEZE\n else:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s_l = self.new_tsno(\n self.m_s_l,\n self.t_s_l,\n self.cc_s_l)\n self.t_s = self.new_tsno(\n self.m_s,\n self.t_s,\n self.cc_s)", "def make_layers(self):\r\n #assuming temporal field is always the first column!\r\n timeCol = self.data.columns[0]\r\n times = self.data[timeCol].unique() \r\n lat = self.data.lat.unique()\r\n lon = self.data.lon.unique()\r\n shape = (len(lat), len(lon))\r\n depths, hours = [None], [None]\r\n if 'depth' in self.data.columns:\r\n depths = self.data.depth.unique()\r\n if 'hour' in self.data.columns:\r\n hours = self.data.hour.unique()\r\n layers, titles = [], []\r\n for t in times:\r\n for h in hours:\r\n for z in depths:\r\n frame = self.data[self.data[timeCol] == t]\r\n\r\n if timeCol == 'time':\r\n sub = self.variable + self.unit + ', ' + str(datetime.strptime(t, '%Y-%m-%dT%H:%M:%S').date())\r\n else:\r\n sub = self.variable + self.unit + ', ' + timeCol + ': ' + str(t) \r\n\r\n if h != None:\r\n frame = frame[frame['hour'] == h]\r\n sub = sub + ', hour: ' + str(h) + 'hr'\r\n if z != None:\r\n frame = frame[frame['depth'] == z] \r\n sub = sub + ', depth: %2.2f' % z + ' [m]' \r\n try: \r\n layers.append(frame[self.variable].values.reshape(shape))\r\n titles.append(sub)\r\n except Exception as e:\r\n continue \r\n return layers, titles, lat, lon", "def init_timeparams(options):\n params = OrderedDict()\n # for time prediction\n '''\n W_t = np.zeros(options['dim_proj'])\n params['W_t'] = W_t.astype(config.floatX)\n b_t = np.zeros(1)\n params['b_t'] = b_t.astype(config.floatX)\n '''\n W_t = init_params_weight(options['dim_proj'], 1)\n params['W_t'] = W_t.astype(config.floatX)\n b_t = init_params_weight(1, 1)\n params['b_t'] = b_t.astype(config.floatX)\n # w_g = np.zeros(1)\n # params['w_g'] = w_g.astype(config.floatX)\n\n return params", "def __initAvailableLayerTypes(self):\n from backend.caffe.path_loader import PathLoader\n caffe = PathLoader().importCaffe()\n layerNameMainParts = list(caffe.layer_type_list())\n\n res = {}\n paramsPerLayerType = {}\n\n # calculate common parameters of all layer types\n # by removing all which will be used for one specific layer type only\n # also keep in mind which ones have been removed to readd them to specific layers\n commonParams = self._availableParameterGroupDescriptors[\"LayerParameter\"].parameter() #use .parameter() on purpose\n layerSpecificParameters = set()\n for nameMainPart in layerNameMainParts:\n specificParamsName = [nameMainPart + \"Parameter\"]\n if moreLayerNameParameter.has_key(nameMainPart):\n specificParamsName.append( moreLayerNameParameter[nameMainPart])\n paramsPerLayerType[nameMainPart] = {}\n for key, value in commonParams.items():\n if value.isParameterGroup() and value.parameterName() in specificParamsName:\n paramsPerLayerType[nameMainPart][key] = value\n layerSpecificParameters.add(key)\n\n\n # special case: shared params for loss layers\n key = \"loss_param\"\n value = commonParams[key]\n del commonParams[key]\n for nameMainPart in layerNameMainParts:\n if LayerType.getCategoryByName(nameMainPart) == LayerType.CATEGORY_LOSS:\n paramsPerLayerType[nameMainPart][key] = value\n\n # TODO is there a special case for the TransformationParameter?\n\n # create each layer type after one another\n for nameMainPart in layerNameMainParts:\n\n # add common params to the specific ones\n layerTypeParam = paramsPerLayerType[nameMainPart].keys()\n paramsPerLayerType[nameMainPart].update(commonParams)\n\n irrelevant = layerSpecificParameters.difference(layerTypeParam)\n res[nameMainPart] = LayerType(nameMainPart, paramsPerLayerType[nameMainPart], layerTypeParam, irrelevant)\n\n self._commonParams = commonParams\n self._availableLayerTypes = res", "def prepare_iteration_parameters(prepared_parameters):\n \n (sess, graph, bottleneck_tensor, jpeg_data_tensor,\n resized_image_tensor, image_lists) = prepared_parameters\n # See if the command-line flags mean we're applying any distortions.\n (do_distort_images, distorted_jpeg_data_tensor,\n distorted_image_tensor) = distort.distort_images(prepared_parameters)\n # Add the new layer that we'll be training.\n num_classes = len(image_lists.keys()) # Calculates number of output classes\n (train_step, cross_entropy, total_loss, bottleneck_input,\n ground_truth_input, final_tensor, keep_prob) = config.add_final_training_ops(num_classes,\n flags.final_tensor_name,\n bottleneck_tensor)\n # Set up all our weights to their initial default values.\n prepare_session(sess)\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step = config.add_evaluation_step(final_tensor, ground_truth_input)\n prediction_step = config.add_prediction_step(final_tensor)\n\n return (sess, graph, (do_distort_images, image_lists,\n distorted_jpeg_data_tensor, distorted_image_tensor,\n resized_image_tensor, bottleneck_tensor, jpeg_data_tensor,\n train_step, bottleneck_input, ground_truth_input, keep_prob,\n evaluation_step, prediction_step, cross_entropy, total_loss))", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n return elapsed_bake_time + (number_of_layers * 2)", "def profile(self, layer, num_iter=50, num_warmup=10, direction='forward'):\n return TimeMeasure()", "def optimize_layers(self) -> None:\n delays = [frame.delay for frame in self.sequence]\n self.coalesce()\n super().optimize_layers()\n for frame, d in zip(self.sequence, delays):\n frame.delay = d", "def _LayerParams(ii):\n if isinstance(p.transformer_layer_params_tpl, list):\n factor = p.num_layers // len(p.transformer_layer_params_tpl)\n i = ii // factor\n p_ii = p.transformer_layer_params_tpl[i].Copy()\n else:\n p_ii = p.transformer_layer_params_tpl.Copy()\n p_ii.name = 'layer_%d' % ii\n p_ii.has_aux_atten = p.has_aux_atten\n p_ii.mask_self_atten = p.mask_self_atten\n p_ii.input_dim = p.mdl_dim or p_ii.input_dim\n p_ii.output_dim = p.mdl_dim or p_ii.output_dim\n p_ii.packed_input = p.packed_input\n if (not isinstance(p_ii.tr_atten_tpl.num_heads, list) and\n p.num_atten_heads is not None):\n p_ii.tr_atten_tpl.num_heads = p.num_atten_heads\n if p.dropout_prob is not None:\n p_ii.tr_atten_tpl.atten_dropout_prob = p.dropout_prob\n p_ii.tr_atten_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.relu_dropout_prob = p.dropout_prob\n if p.stochastic_depth_droppath_prob is not None:\n ratio = p.stochastic_depth_droppath_prob * ii / (p.num_layers - 1)\n p_ii.tr_atten_tpl.residual_droppath_prob = ratio\n p_ii.tr_fflayer_tpl.residual_droppath_prob = ratio\n if p.hidden_dim is not None:\n p_ii.tr_fflayer_tpl.hidden_dim = p.hidden_dim\n p_ii.tr_atten_tpl.add_unnormalized_input = p.add_unnormalized_input\n if ii in p.moe_layers:\n p_ii.tr_fflayer_tpl = _MoeLayerParams(p_ii.tr_fflayer_tpl)\n return p_ii", "def _init_time(self, zmin, zmax, nzbin):\n # lgz is actually lg(1+z)\n # this actually very ill-defined as we really want to pack the early time with more \n # sampling points...\n# self.lgz = np.linspace(log10(zmin+1), log10(zmax+1), num=nzbin)\n# self.lgzmax = self.lgz[-1]\n# self.lgzmin = self.lgz[0]\n# self.z = np.power(10., self.lgz) - 1.\n\n _lgzrev = np.linspace(log10(zmin+1), log10(zmax+1), num=nzbin)\n _zrev = zmin + zmax - (np.power(10.0, _lgzrev) - 1)\n self.z = _zrev[::-1] \n self.lgz = np.log10(self.z+1)\n self.lgzmax = self.lgz[-1]\n self.lgzmin = self.lgz[0]", "def layers(self, layers):\n self._layers = layers\n self.thetas = []\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer\n self.thetas.extend(layer.thetas())", "def initialize_parameters(X, Y, nb_units_per_hidden_layer):\n # Your code here\n np.random.seed(1)\n params = {}\n L = len(nb_units_per_hidden_layer)\n params['W' + str(1)] = np.random.randn(nb_units_per_hidden_layer[0],X.shape[0] ) * 0.05\n params['b' + str(1)] = np.zeros((nb_units_per_hidden_layer[0], 1))\n\n for i in range(1, L):\n params['W' + str(i+1)] = np.random.randn(nb_units_per_hidden_layer[i], nb_units_per_hidden_layer[i - 1]) * 0.01\n params['b' + str(i+1)] = np.zeros((nb_units_per_hidden_layer[i], 1))\n params['W' + str(L+1)]= np.random.randn(1, nb_units_per_hidden_layer[L-1]) * 0.05\n params['b' + str(L+1)]= np.zeros((1,1))\n return params\n # raise NotImplementedError", "def __define_variable_time(self, initial_guess, minimum, maximum):\n i = 0\n for nlp in self.nlp:\n if isinstance(nlp[\"tf\"], self.CX):\n time_bounds = Bounds(minimum[i], maximum[i], interpolation=InterpolationType.CONSTANT)\n time_init = InitialConditions(initial_guess[i])\n Parameters._add_to_v(self, \"time\", 1, None, time_bounds, time_init, nlp[\"tf\"])\n i += 1", "def initialize_parameters(self):\n for i in range(1, self.L):\n self.W[i - 1] = np.random.randn(self.layer_dims[i], self.layer_dims[i - 1]) * 0.01\n self.b[i - 1] = np.zeros((self.layer_dims[i], 1))", "def test_fit_prep():\n args = get_layer('fit', 'manual', 'temporal', False, True, window=2, step_size=3)\n run_layer(*args)", "def _get_time_params(self, cfg):\n for step_name in PipelineConfig.steps:\n params = dict()\n step_cfg = cfg.get(step_name)\n pipe_type = step_cfg.pipe_step.type\n params['pipe_type'] = pipe_type\n if not cfg[step_name].get('trainer', None):\n continue\n params['epochs'] = cfg[step_name].trainer.epochs\n self.params_dict[step_name] = params", "def time_gan(self, dataset, niterations=101):\n\n if not dataset:\n raise ValueError(\"Provide correct dataset for timing\")\n\n min_iterations = self.model.get_k(self.model.gen_iter) + 1\n if niterations < min_iterations:\n raise ValueError(\"Invalid iterations number. Run at least \" +\n str(min_iterations) + \" iterations.\")\n\n # iterate through minibatches of the dataset\n times = OrderedDict()\n time_keys = ['fprop', 'bprop', 'iteration']\n if self.time_layers:\n time_keys.append('data_loading')\n for ky in time_keys:\n times[ky] = np.full(niterations, -1.0)\n count = 0\n\n epoch = self.model.epoch_index\n z, y_temp = self.model.zbuf, self.model.ybuf\n\n data_loading_mark = self.model.be.init_mark()\n fprop_start = self.model.be.init_mark()\n fprop_end = self.model.be.init_mark()\n bprop_start = self.model.be.init_mark()\n bprop_end = self.model.be.init_mark()\n\n while count < niterations:\n dataset.reset()\n self.model.be.record_mark(data_loading_mark)\n for mb_idx, (x, t) in enumerate(dataset):\n # clip all discriminator parameters to a cube in case of WGAN\n if self.model.wgan_param_clamp:\n self.model.clip_param_in_layers(\n self.model.layers.discriminator.layers_to_optimize,\n self.model.wgan_param_clamp)\n # benchmark discriminator on noise\n self.model.fill_noise(z, normal=(self.model.noise_type == 'normal'))\n self.model.be.record_mark(fprop_start) # mark start of fprop\n Gz = self.model.fprop_gen(z)\n y_noise = self.model.fprop_dis(Gz)\n self.model.be.record_mark(fprop_end) # mark end of fprop\n data_loading_time = self.model.be.get_time(data_loading_mark, fprop_start)\n if self.time_layers:\n times['data_loading'][count] = data_loading_time\n times['fprop'][count] += self.model.be.get_time(fprop_start,\n fprop_end) + data_loading_time\n y_temp[:] = y_noise\n self.model.be.record_mark(bprop_start) # mark start of bprop\n delta_noise = self.model.cost.costfunc.bprop_noise(y_noise)\n self.model.bprop_dis(delta_noise)\n self.model.be.record_mark(bprop_end) # mark end of bprop\n times['bprop'][count] += self.model.be.get_time(bprop_start, bprop_end)\n self.model.layers.discriminator.set_acc_on(True)\n\n # benchmark discriminator on data\n self.model.be.record_mark(fprop_start)\n y_data = self.model.fprop_dis(x)\n self.model.be.record_mark(fprop_end)\n times['fprop'][count] += self.model.be.get_time(fprop_start, fprop_end)\n self.model.be.record_mark(bprop_start)\n delta_data = self.model.cost.costfunc.bprop_data(y_data)\n self.model.bprop_dis(delta_data)\n self.model.optimizer.optimize(self.model.layers.discriminator.layers_to_optimize,\n epoch=epoch)\n self.model.be.record_mark(bprop_end)\n times['bprop'][count] += self.model.be.get_time(bprop_start, bprop_end)\n self.model.layers.discriminator.set_acc_on(False)\n\n # benchmark generator\n if self.model.current_batch == self.model.last_gen_batch + \\\n self.model.get_k(self.model.gen_iter):\n self.model.fill_noise(z, normal=(self.model.noise_type == 'normal'))\n self.model.be.record_mark(fprop_start)\n Gz = self.model.fprop_gen(z)\n y_temp[:] = y_data\n y_noise = self.model.fprop_dis(Gz)\n self.model.be.record_mark(fprop_end)\n times['fprop'][count] += self.model.be.get_time(fprop_start, fprop_end)\n self.model.be.record_mark(bprop_start)\n delta_noise = self.model.cost.costfunc.bprop_generator(y_noise)\n delta_dis = self.model.bprop_dis(delta_noise)\n self.model.bprop_gen(delta_dis)\n self.model.optimizer.optimize(self.model.layers.generator.layers_to_optimize,\n epoch=epoch)\n self.model.be.record_mark(bprop_end)\n times['bprop'][count] += self.model.be.get_time(bprop_start, bprop_end)\n self.model.last_gen_batch = self.model.current_batch\n self.model.gen_iter += 1\n\n self.model.current_batch += 1\n times['iteration'][count] = times['data_loading'][count] + times['fprop'][\n count] + times['bprop'][count]\n self.model.be.record_mark(data_loading_mark)\n count += 1\n if count >= niterations:\n break\n\n return times", "def update_all_layers(self,t1=0.0,t2=0.0,phi=0.0,m=0.0,t31=0.0, t32=0.0, randomly=False,sigma=0.03):\n if randomly:\n for i in range(self.NL):\n self.layers[i].update_values( t1*(1.+np.random.randn(1)*sigma) ,t2*(1.+np.random.randn(1)*sigma) ,phi*(1.+np.random.randn(1)*sigma) ,m*(1.+np.random.randn(1)*sigma) ,t31*(1.+np.random.randn(1)*sigma), t32*(1.+np.random.randn(1)*sigma) )\n else:\n for i in range(self.NL):\n self.layers[i].update_values( t1 ,t2 ,phi ,m ,t31 ,t32 )", "def time_cnn():\n\n data_dir = \"/home/liyanzeng/git/Var-CNN--DynaFlow/preprocess\"\n\n # read in data from numpy files\n train_metadata = np.load(r\"%s/train_metadata.npy\" % data_dir)\n test_metadata = np.load(r\"%s/test_metadata.npy\" % data_dir)\n train_seq = np.load(r\"%s/train_seq.npy\" % data_dir)\n train_labels = np.load(r\"%s/train_labels.npy\" % data_dir)\n test_seq = np.load(r\"%s/test_seq.npy\" % data_dir)\n test_labels = np.load(r\"%s/test_labels.npy\" % data_dir)\n\n # apply normalization to metadata\n metadata_scaler = StandardScaler()\n train_metadata = metadata_scaler.fit_transform(train_metadata)\n test_metadata = metadata_scaler.transform(test_metadata)\n\n # extract sequences\n train_time, train_time_dleft, train_time_dright, train_dir = np.split(train_seq, 4, axis=2)\n test_time, test_time_dleft, test_time_dright, test_dir = np.split(test_seq, 4, axis=2)\n\n # reshape to be able to normalize\n train_time = np.reshape(train_time, (train_time.shape[0], train_time.shape[1]))\n test_time = np.reshape(test_time, (test_time.shape[0], test_time.shape[1]))\n train_time_dleft = np.reshape(train_time_dleft, (train_time_dleft.shape[0], train_time_dleft.shape[1]))\n test_time_dleft = np.reshape(test_time_dleft, (test_time_dleft.shape[0], test_time_dleft.shape[1]))\n train_time_dright = np.reshape(train_time_dright, (train_time_dright.shape[0], train_time_dright.shape[1]))\n test_time_dright = np.reshape(test_time_dright, (test_time_dright.shape[0], test_time_dright.shape[1]))\n\n # apply normalization to packet time data according to scaling computed on train timestamp data\n time_scaler = StandardScaler()\n train_time = time_scaler.fit_transform(train_time)\n test_time = time_scaler.transform(test_time)\n train_time_dleft = time_scaler.transform(train_time_dleft)\n test_time_dleft = time_scaler.transform(test_time_dleft)\n train_time_dright = time_scaler.transform(train_time_dright)\n test_time_dright = time_scaler.transform(test_time_dright)\n\n train_seq = np.stack((train_time, train_time_dleft, train_time_dright), axis=-1)\n test_seq = np.stack((test_time, test_time_dleft, test_time_dright), axis=-1)\n\n # construct CNN\n cnn_input = Input(shape=(seq_length, 3,), name='cnn_input')\n cnn_model = time_conv_block(cnn_input, 2, 4)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_output = Flatten()(cnn_model)\n cnn_output = dense_layer(cnn_output, 1024, 0.4)\n\n # construct MLP for metadata\n metadata_input = Input(shape=(7,), name='metadata_input')\n metadata_output = dense_layer(metadata_input, 32, 0.) # consider this the embedding of all the metadata\n\n # concatenate before second dense layer\n combined = Concatenate()([cnn_output, metadata_output])\n combined = dense_layer(combined, 1024, 0.5)\n\n # add final softmax layer\n if NUM_UNMON_SITES == 0: # closed-world\n combined_output = Dense(units=NUM_MON_SITES, activation='softmax', name='combined_output')(combined)\n else:\n # add extra class for unmonitored sites\n combined_output = Dense(units=NUM_MON_SITES + 1, activation='softmax', name='combined_output')(combined)\n\n model = Model(inputs=[cnn_input, metadata_input], outputs=[combined_output])\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(0.001),\n metrics=['accuracy'])\n\n training_data = ({'cnn_input': train_seq,\n 'metadata_input': train_metadata},\n {'combined_output': train_labels})\n\n test_data = ({'cnn_input': test_seq,\n 'metadata_input': test_metadata},\n {'combined_output': test_labels})\n\n lr_modifier = LearningRateScheduler(schedule=lr_scheduler)\n\n # train model\n train_time_start = time.time()\n model.fit(x=training_data[0],\n y=training_data[1],\n batch_size=50,\n epochs=200,\n verbose=0,\n callbacks=[lr_modifier])\n train_time_end = time.time()\n\n # compute final softmax predictions on test set and save predictions\n test_time_start = time.time()\n predictions = model.predict(test_data[0], batch_size=50, verbose=0)\n test_time_end = time.time()\n \n save_dir = \"predictions\"\n np.save(file=r\"%s/time_model\" % save_dir, arr=predictions)\n \n return (train_time_end - train_time_start), (test_time_end - test_time_start)", "def calc_time(directions_result):\n\n # there is only one leg\n legs = directions_result[\"legs\"][0][\"steps\"]\n\n steps = map(lambda x: (x[\"travel_mode\"], x[\"start_location\"], x[\"end_location\"]), legs)\n\n walking = filter(lambda x: x[0] == \"WALKING\", steps)\n transit = filter(lambda x: x[0] == \"TRANSIT\", steps)\n\n\n walking_to_biking = map(lambda x: gmaps.directions(\n x[1], x[2],\n mode=\"bicycling\"), walking)\n\n transit_final = map(lambda x: gmaps.directions(\n x[1], x[2], mode=\"transit\"), transit)\n\n\n walking_addrs = map(lambda x : (x[0][\"legs\"][0][\"start_address\"], x[0][\"legs\"][0][\"end_address\"]), walking_to_biking)\n transit_addrs = map(lambda x : (x[0][\"legs\"][0][\"start_address\"], x[0][\"legs\"][0][\"end_address\"]), transit_final)\n\n all_legs = map(lambda x:\n sum(map(lambda y: y[\"duration\"][\"value\"], x[0][\"legs\"]))\n ,walking_to_biking+transit_final)\n\n final = zip(all_legs, walking+transit, walking_addrs+transit_addrs)\n\n\n def reconstruct():\n w,t = 0,len(walking)\n arr = []\n for i in xrange(len(all_legs)):\n if steps[i][0] == \"TRANSIT\":\n arr.append(final[t])\n t += 1\n else:\n arr.append(final[w])\n w += 1\n return arr\n\n\n total_time = sum(all_legs) \n\n return total_time, reconstruct()", "def _build_parameters(self):\n # Compute total duration based on Pool temperature\n run_hours_total = self._pool_controler.duration(\n float(\n self._hass.states.get(\n self._hass.data[DOMAIN][ATTR_POOL_TEMPERATURE_ENTITY_ID]\n ).state\n )\n )\n _LOGGER.debug(\n \"Daily filtering total duration: {} hours\".format(run_hours_total)\n )\n\n # Update state with total duration\n self._hass.states.async_set(\n \"{}.{}\".format(DOMAIN, ATTR_TOTAL_DAILY_FILTERING_DURATION),\n format(run_hours_total, \".2f\"),\n )\n\n # Return total duration in hours\n return run_hours_total", "def __set_time_elements(*args):\n args[0].TimeState.delay_elements = args[1]\n args[0].TimeState.set_delay_elements()", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def initialize_parameters(layer_dims, initialization_factor = '0.01'):\n\n\tparameters = {}\n\tL = len(layer_dims)\n\n\tfor l in xrange(1,L):\n\n\t\tif initialization_factor == '0.01':\n\t\t\tparameters['W' + str(l)] = np.random.randn(layer_dims[l] , layer_dims[l-1])*0.01\n\t\tif initialization_factor == 'he':\n\t\t\tparameters[\"W\" + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])*np.sqrt(2/layer_dims[l-1])\n\t\tif initialization_factor == 'xavier':\n\t\t\tparameters[\"W\" + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])*np.sqrt(1/layer_dims[l-1])\n\n\t\tparameters['b' + str(l)] = np.zeros( (layer_dims[l], 1) )*0.01\n\n\n\treturn parameters", "def _set_runtimes(self):\n self._run_times =np.zeros(self.n_runs, dtype = np.float)", "def setup_layer_structure(self):\n self.page_rank_convolution_1 = self.layer(self.feature_number, self.args.layers[0], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_2 = self.layer(self.args.layers[0], self.args.layers[1], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_3 = self.layer(self.args.layers[1], self.class_number, self.args.iterations, self.args.alpha)", "def initialize_output_times_from_tunables():\n duration = (\n h_to_s(SimulationDuration.value)\n if SimulationDuration.value > 0\n else float('inf')\n )\n\n output_interval = h_to_s(SimulationOutputInterval.value)\n\n if not SimulationOutputFirstState.value:\n last_output = 0.0\n else:\n last_output = -(h_to_s(SimulationOutputInterval.value) + sys.float_info.epsilon)\n\n return duration, output_interval, last_output", "def _prep_times(self):\n self.test_times = 'diagonal'\n if hasattr(self, 'times'):\n self.train_times = self.times\n if hasattr(self, 'times_'):\n self.train_times_ = self.times_\n self.test_times_ = _DecodingTime()\n self.test_times_['slices'] = [[slic] for slic in\n self.train_times_['slices']]\n self.test_times_['times'] = [[tim] for tim in\n self.train_times_['times']]\n if hasattr(self, 'scores_'):\n self.scores_ = [[score] for score in self.scores_]\n if hasattr(self, 'y_pred_'):\n self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]", "def batchWallTime(cls, time, parsedCmd, numCpus):\n numTargets = 0\n for refList in parsedCmd.id.refList:\n numTargets += len(refList)\n return time*numTargets/float(numCpus)", "def __init__(self, total_time, *args, **kwargs):\n super(AdjustingFSNN, self).__init__(*args, **kwargs)\n\n self._adjustment_distribution = [\n (.05, 32, .6),\n (.15, 64, .75),\n (.70, 1024, .98),\n (.10, 4096, .996)\n ]\n self._start_time = None\n self._next_adjustment_index = 0\n\n self._adjustments = [\n (0,\n self._adjustment_distribution[0][1],\n self._adjustment_distribution[0][2])]\n\n time_alloted_so_far = total_time * self._adjustment_distribution[0][0]\n idx = 1\n while idx < len(self._adjustment_distribution):\n self._adjustments.append(\n (time_alloted_so_far,\n self._adjustment_distribution[idx][1],\n self._adjustment_distribution[idx][2]))\n this_allotment = self._adjustment_distribution[idx][0]\n time_alloted_so_far += total_time * this_allotment\n idx += 1", "def test_fit_prep_proba():\n args = get_layer('fit', 'manual', 'temporal', True, True, window=2, step_size=3)\n run_layer(*args)", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def updateweigths():\n for i_lay in range(1, len(layers)):\n layers[i_lay][\"weigths\"] += layers[i_lay][\"Delta_w\"]", "def initParams(self):\n sizes = [self.inputDim]+self.layerSizes+[self.outputDim]\n scales = [np.sqrt(6)/np.sqrt(n+m) for n,m in zip(sizes[:-1],sizes[1:])]\n self.stack = [[np.random.rand(m,n)*2*s-s,np.zeros((m,1))] \\\n for n,m,s in zip(sizes[:-1],sizes[1:],scales)]\n self.hActs_M = [cm.empty((s,self.maxBatch)) for s in sizes]\n\n if self.train:\n # Now assuming that all layers are the same size\n self.grad = [[cm.empty(w.shape),cm.empty(b.shape)] for w,b in self.stack]\n self.deltasC_M = cm.empty((self.outputDim,self.maxBatch))\n self.deltasOut_M = cm.empty((sizes[1],self.maxBatch)) \n self.deltasIn_M = cm.empty((sizes[1],self.maxBatch)) \n self.tmpGrad_M = cm.empty((self.layerSize,self.maxBatch))\n \n # Allocate memory once here and reuse\n # Store probs\n self.probs_M = cm.empty((self.outputDim,self.maxBatch))\n # Store col max\n self.rowVec_M = cm.empty((1,self.maxBatch))\n \n self.stack = [[cm.CUDAMatrix(w),cm.CUDAMatrix(b)]\n for w,b in self.stack]\n\n if self.temporalLayer > 0:\n # dummy bias used for temporal layer\n dummy = cm.empty((1,1))\n dummy.assign(0.0)\n\n scale = np.sqrt(6)/np.sqrt(self.layerSize*2)\n wtf = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n wtb = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n self.stack.append([wtf,dummy])\n self.stack.append([wtb,dummy])\n\n # forward and backward activations for temporal layer\n self.hActsFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.hActsBack_M = cm.empty((self.layerSize,self.maxBatch))\n\n if self.train:\n dwtf = cm.empty(wtf.shape)\n self.grad.append([dwtf,dummy])\n dwtb = cm.empty(wtb.shape)\n self.grad.append([dwtb,dummy])\n\n self.tmpGradBack_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasBack_M = cm.empty((self.layerSize,self.maxBatch))", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def preparation_time(self, preparation_time):\n\n self._preparation_time = preparation_time", "def main(layers=None, modules=None):\n test_runs = create_test_runs(layers=layers, modules=modules)\n discovered_layers = set(param.get(\"layer\", \"Unknown layer\") for param in test_runs)\n for layer in discovered_layers:\n logger.debug(\"Discovered: %s\", layer)\n logger.debug(\"Discovered %d layers in total.\", len(discovered_layers))\n\n discovered_modules = set(\n param.get(\"module\", \"Unknown module\") for param in test_runs\n )\n for module in discovered_modules:\n logger.debug(\"Discovered: %s\", module)\n logger.debug(\"Discovered %d modules in total.\", len(discovered_modules))\n\n logger.debug(\"Running %d test runs.\", len(test_runs))\n\n # Counter system congestion and hyperthreading, FWIW\n concurrency = max(1, cpu_count() // 2 - 1)\n logger.debug(\"Timing tests in up to %d processes in parallel.\", concurrency)\n pool = Pool(concurrency)\n\n logger.debug(\"Timing layers - this can take a while!\")\n start_time = time()\n results = sorted(\n pool.imap_unordered(run_tests, test_runs),\n key=lambda result: result.get(\"runtime\", 0.0),\n )\n\n pool.terminate()\n pool.join()\n\n wallclock = humanize_time(time() - start_time)\n logger.debug(\"Done timing layers in %s.\", wallclock)\n\n total_runtime = sum(result.get(\"runtime\", 0.0) for result in results)\n total_count = sum(result.get(\"count\", 0) for result in results)\n\n classname_width = max(len(result[\"classname\"]) for result in results)\n count_width = max(len(str(result.get(\"count\", 0))) + 4 for result in results)\n speed_width = max(\n len(\"{:.3f}\".format(result.get(\"speed\", 0))) + 4 for result in results\n )\n runtime_width = max(\n len(humanize_time(result.get(\"runtime\", 0.0))) + 4 for result in results\n )\n\n header = (\n \"{classname:>{classname_width}}\"\n \"{count:>{count_width}}\"\n \"{speed:>{speed_width}}\"\n \"{runtime:>{runtime_width}}\"\n \"{runtime_percentage:>10}\" # 9.2f\n \"{count_percentage:>10}\" # 9.2f\n \"{relative_weight:>11}\".format( # 10.2f\n classname=\"classname\",\n count=\"cnt\",\n speed=\"spd\",\n runtime=\"rt\",\n runtime_percentage=\"rt%\",\n count_percentage=\"cnt%\",\n relative_weight=\"wt%\",\n classname_width=classname_width,\n count_width=count_width + 6, # Suffix \" tests\"\n speed_width=speed_width + 9, # Suffix \" s / test\"\n runtime_width=runtime_width,\n )\n )\n logger.info(header)\n header_width = len(header)\n logger.info(\"=\" * header_width)\n\n for result in results:\n classname = result[\"classname\"]\n count = result.get(\"count\", 0)\n runtime = result.get(\"runtime\", 0.0)\n speed = result.get(\"speed\", 0.0)\n runtime = result.get(\"runtime\", 0)\n\n runtime_percentage = runtime / total_runtime\n count_percentage = float(count) / float(total_count)\n try:\n relative_weight = runtime_percentage / count_percentage\n except ZeroDivisionError:\n # Something failed and count thus is 0\n relative_weight = 0.0\n\n runtime = humanize_time(runtime)\n line = (\n \"{classname:>{classname_width}}\"\n \"{count:>{count_width}} tests\"\n \"{speed:>{speed_width}.3f} s / test\"\n \"{runtime:>{runtime_width}}\"\n \"{runtime_percentage:9.2f}%\"\n \"{count_percentage:>9.2f}%\"\n \"{relative_weight:>10.2f}%\".format(\n classname=classname,\n count=count,\n speed=speed,\n runtime=runtime,\n runtime_percentage=runtime_percentage * 100,\n count_percentage=count_percentage * 100,\n relative_weight=relative_weight * 100,\n classname_width=classname_width,\n count_width=count_width,\n speed_width=speed_width,\n runtime_width=runtime_width,\n )\n )\n logger.info(line)\n\n total = humanize_time(total_runtime)\n total_runtime_width = len(total)\n wallclock_width = len(wallclock)\n totals_width = max(wallclock_width, total_runtime_width)\n\n total_line = \"Total: {:>{totals_width}}\".format(\n total, totals_width=totals_width\n )\n wallclock_line = \"Wallclock: {:>{totals_width}}\".format(\n wallclock, totals_width=totals_width\n )\n logger.info(\"-\" * header_width)\n logger.info(\"Sorted by runtime.\")\n logger.info(\"\")\n logger.info(total_line)\n logger.info(wallclock_line)\n\n failed_runs = [result for result in results if result.get(\"failed\")]\n if failed_runs:\n logger.warn(\"Test run failures detected - YMMV!\")\n for run in failed_runs:\n logger.warn(\"Failures in: %s\", run.get(\"classname\", \"Unknown test class\"))", "def preprocessData(self):\r\n time = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\r\n\r\n with open(self.dirName+'config_files/'+time+'.json', 'w') as f:\r\n json.dump(self.data_config, f, indent=4, separators=(',', ': '))\r\n prepper = DataPrep(self.config, self.dirName+time)\r\n prepper.data_prep('training_episodes')\r\n prepper.data_prep('validation_episodes')\r\n\r\n return time", "def new_time(self, state, time, last_value, independent_sources):\n sum1 = np.matmul(state.__active_control[1],last_value) - state.__active_control[2]\n if (abs(sum1) > self.__absolute_error):\n sum2 = np.matmul(state.__active_control[1], np.matmul(self.__A, last_value) + np.matmul(self.__B, independent_sources))\n sum3 = np.matmul(state.__active_control[1], np.matmul(self.__A**2, last_value) + np.matmul(self.__A, \\\n np.matmul(self.__B, independent_sources)))\n return time + 1.0 / (sum3 / 2 / sum2 - sum2 / sum1)\n else:\n return -1", "def run(layers):", "def print_layer_times(filename, only_model = -1):\n results = load_events.load_values(\n filename,\n event_names=['minibatch_time', 'objective_evaluation_time', 'objective_differentiation_time'],\n layer_event_names=['fp_time', 'bp_time', 'update_time', 'imcomm_time', 'opt_time'],\n model=-1)\n for model in results.keys():\n if model != only_model and only_model != -1:\n continue\n print('Model {0}:'.format(model))\n fp_tot = 0.0\n bp_tot = 0.0\n update_tot = 0.0\n imcomm_tot = 0.0\n opt_tot = 0.0\n for layer in results[model]['fp_time'].keys():\n fp_mean = np.mean(results[model]['fp_time'][layer])\n l_fp_tot = np.sum(results[model]['fp_time'][layer])\n bp_mean = np.mean(results[model]['bp_time'][layer])\n l_bp_tot = np.sum(results[model]['bp_time'][layer])\n update_mean = np.mean(results[model]['update_time'][layer])\n l_update_tot = np.sum(results[model]['update_time'][layer])\n imcomm_mean = 0.0\n l_imcomm_tot = 0.0\n if 'imcomm_time' in results[model] and layer in results[model]['imcomm_time']:\n imcomm_mean = np.mean(results[model]['imcomm_time'][layer])\n l_imcomm_tot = np.sum(results[model]['imcomm_time'][layer])\n opt_mean = 0.0\n l_opt_tot = 0.0\n if 'opt_time' in results[model] and layer in results[model]['opt_time']:\n opt_mean = np.mean(results[model]['opt_time'][layer])\n l_opt_tot = np.sum(results[model]['opt_time'][layer])\n fp_tot += l_fp_tot\n bp_tot += l_bp_tot\n update_tot += l_update_tot\n imcomm_tot += l_imcomm_tot\n opt_tot += l_opt_tot\n portion = imcomm_mean / (fp_mean + bp_mean + update_mean + imcomm_mean + opt_mean) * 100\n print('Layer {0}:\\tfp={1:<10.4}\\tbp={2:<10.4}\\tupdate={3:<10.4}\\topt={4:<10.4}\\timcomm={5:<10.4}\\tportion={6:.4}%'.format(\n layer, fp_mean, bp_mean, update_mean, opt_mean, imcomm_mean, portion))\n print(' '*len('layer {0}'.format(layer)) +\n ':\\tfp={0:<10.4}\\tbp={1:<10.4}\\tupdate={2:<10.4}\\topt={3:<10.4}\\timcomm={4:<10.4}\\tportion={5:.4}%'.format(\n l_fp_tot, l_bp_tot, l_update_tot, l_opt_tot, l_imcomm_tot,\n l_imcomm_tot / (l_fp_tot + l_bp_tot + l_update_tot + l_opt_tot + l_imcomm_tot) * 100))\n print('Total: fp={0:.4} bp={1:.4} update={2:.4} opt={3:.4} imcomm={4:.4} portion={5:.4}%'.format(\n fp_tot, bp_tot, update_tot, opt_tot, imcomm_tot,\n imcomm_tot / (fp_tot + bp_tot + update_tot + opt_tot + imcomm_tot) * 100))\n print('mbavg={0:.4} mbtot={1:.6} objvalavg={2:.4} objvaltot={3:.6} objgradavg={4:.4} objgradtot={5:.6}'.format(\n np.mean(results[model]['minibatch_time']), np.sum(results[model]['minibatch_time']),\n np.mean(results[model]['objective_evaluation_time']),\n np.sum(results[model]['objective_evaluation_time']),\n np.mean(results[model]['objective_differentiation_time']),\n np.sum(results[model]['objective_differentiation_time'])))", "def __init__(self, features_number, surfaces_dimensions, taus, first_layer_polarities,\n delay_coeff, net_seed = 0, verbose=False):\n self.basis = []\n self.activations = []\n self.taus = taus\n self.layers = len(features_number)\n self.surfaces_dimensions = surfaces_dimensions\n self.features_number = features_number\n self.delay_coeff = delay_coeff\n self.verbose = verbose\n self.polarities = []\n self.polarities.append(first_layer_polarities)\n # attribute containing all surfaces computed in each layer and sublayer\n self.surfaces = []\n # attribute containing all optimization errors computed in each layer \n # and sublayer\n self.errors = []\n #setting the seed\n rng = np.random.RandomState()\n if (net_seed!=0):\n rng.seed(net_seed)\n # In the first layer I am going to process only 2 polarities corresponging\n # to on off events\n num_polarities = 1 \n for layer, nfeatures in enumerate(features_number):\n #basis and activations of a single sublayer\n sublayers_basis = []\n sublayers_activations = []\n self.polarities.append(nfeatures)\n for sublayer in range(2**layer):\n #basis and activations of a single layer\n basis_set = []\n activations_set = []\n for j in range(nfeatures):\n basis_set.append(rng.rand(surfaces_dimensions[layer][1], surfaces_dimensions[layer][0]*num_polarities))\n basis_set[j][surfaces_dimensions[layer][1]//2, [surfaces_dimensions[layer][0]//2 + surfaces_dimensions[layer][0]*a for a in range(num_polarities)]] = 1\n #activations, or aj (as in the paper) are set randomly between -1 and 1\n activations_set.append((rng.rand()-0.5)*2)\n sublayers_basis.append(np.array(basis_set))\n sublayers_activations.append(np.array(activations_set))\n self.basis.append(sublayers_basis)\n self.activations.append(sublayers_activations)\n num_polarities = nfeatures", "def time_conv_block(model, nb_layers, nb_filters):\n\n for _ in range(nb_layers):\n model = time_conv_layer(model, nb_filters)\n model = MaxPooling1D()(model)\n model = Dropout(0.1)(model)\n return model", "def increment_time_step(self):\n for grid in self.get_grid_list():\n try:\n self[grid].increment_time_step()\n except AttributeError:\n pass", "def __init__(self, layer, cNextLayer, batch):\n\n self._layer = layer\n self._oldWDeltas = [[0.0] * cNextLayer for _ in layer]\n self._batch = batch\n if batch:\n self._wDeltas = [[0.0] * cNextLayer for _ in layer]", "def test_transform_prep():\n args = get_layer('transform', 'manual', 'temporal', False, True, window=2, step_size=3)\n run_layer(*args)", "def compute(self):\n wavelet_dict = dict()\n self.count_dict = dict()\n windows = self.compute_windows()\n self.windows = windows\n if self.verbose > 0:\n print(\n 'Number of time windows before selection: {}'\n .format(len(windows)))\n\n wavelet_dict = self.initialize(windows)\n\n wavelet_dict = self.run_one_iteration(windows, wavelet_dict)\n\n wavelet_dict = self.run_one_iteration(windows, wavelet_dict)\n\n masks = self.remove_bad_windows(windows, wavelet_dict, self.min_cc)\n if self.verbose > 0:\n print(\n 'Number of time windows after selection: {}'\n .format((masks==True).sum()))\n\n for i, window in enumerate(self.windows):\n if masks[i]:\n if window.event.event_id not in self.count_dict:\n self.count_dict[window.event.event_id] = 1\n else:\n self.count_dict[window.event.event_id] += 1\n\n wavelet_dict = self.run_one_iteration(\n windows, wavelet_dict, masks=masks)\n\n stf_dict = self.process_causal_wavelet(wavelet_dict)\n\n self.wavelet_dict = wavelet_dict\n self.stf_dict = stf_dict\n self.masks = masks", "def _calibrate_time(self):\n time_overhead = 0\n for i in range(1000):\n start = self._adjusted_time()\n end = self._adjusted_time()\n time_overhead += end - start\n return time_overhead / 1000", "def initialize_parameters(layer_dim):\n #tf.set_random_seed(0)\n L= len(layer_dim)\n parameters={}\n for i in range(1,L):\n parameters[\"W\" +str(i)] = tf.get_variable(\"W\"+str(i), [layer_dim[i],layer_dim[i-1]], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n parameters[\"b\" +str(i)] = tf.get_variable(\"b\" +str(i),[layer_dim[i],1],initializer= tf.zeros_initializer())\n assert(parameters['W' + str(i)].shape == (layer_dim[i], layer_dim[i-1]))\n assert(parameters['b' + str(i)].shape == (layer_dim[i], 1))\n return parameters", "def test_transform_prep_proba():\n args = get_layer('transform', 'manual', 'temporal', True, True, window=2, step_size=3)\n run_layer(*args)", "def on_epoch_start(self, state: _State):\n optimizer = self._optimizer\n\n if self.decouple_weight_decay:\n self._optimizer_wd = [\n group.get(\"weight_decay\", 0.0)\n for group in optimizer.param_groups\n ]\n for i in range(len(optimizer.param_groups)):\n optimizer.param_groups[i][\"weight_decay\"] = 0.0\n else:\n self._optimizer_wd = [0.0] * len(optimizer.param_groups)", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def init_states(batch_size, num_lstm_layer, num_hidden):\n init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n return init_c + init_h", "def initialize_parameters_for_layers(layer_list, training_examples):\n W = [None] * len(layer_list)\n B = [None] * len(layer_list)\n X = np.random.randn(layer_list[0], training_examples)\n for j in range(1, len(layer_list)):\n W[j] = np.random.randn(layer_list[j], layer_list[j-1])\n B[j] = np.random.randn(layer_list[j], 1)\n return X, W, B", "def __init__(self, epochs, **kwargs):\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0", "def initialize(self):\n\n\t\tparameters = {}\n\t\tL = len(self.layer_dims) # number of layers in the network\n\n\t\tfor l in range(1, L):\n\t\t\tparameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1]) * 0.01\n\t\t\tparameters['b' + str(l)] = np.zeros((self.layer_dims[l], 1))\n\n\t\t\tassert(parameters['W' + str(l)].shape == (self.layer_dims[l], self.layer_dims[l-1]))\n\t\t\tassert(parameters['b' + str(l)].shape == (self.layer_dims[l], 1))\n\n\t\treturn parameters", "def precalc_all(REPS):\n for sigma in [0.25, 1.5]:\n print('-'*60)\n\n N_RANGE = arange(5,105,5)\n\n filename = f'categorical_K2_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_')\n with Timer(f'{filename} ({REPS} repetitions)'):\n run_precalc(filename, [(n,n,n) for n in N_RANGE], C, sigma, M, REPS)\n\n filename = f'categorical_LOO_C{C}_sigma{sigma:.2f}_M{M}'.replace('.','_')\n with Timer(f'{filename} ({REPS} repetitions)'):\n run_precalc(filename, [(n,1,n) for n in N_RANGE], C, sigma, M, REPS)", "def lammps(parameter, iteration, numberlist, density, chainlength):\n # input parameters\n var = parameter[0]\n x = var * numberlist[0]\n y = var * numberlist[1] * 1.732 / 2\n z = x / 2\n #nodeid = 55 + iteration % 5\n M = int((x * y * z * density - 4 * numberlist[0] * numberlist[1] * int(z / 0.3)) / chainlength) \n os.environ['var'] = str(var)\n os.environ['iteration'] = str(iteration)\n os.environ['M'] = str(M)\n #os.environ['nodeid'] = str(nodeid)\n # copy the files to modify\n os.system(\"cp in.asymmetric in.asymmetric_$iteration\")\n os.system(\"cp run_lammps.sh run_lammps_$iteration.sh\")\n os.system(\"cp MultiBCP_hexagonal_post.py MultiBCP_hexagonal_post_$iteration.py\")\n # modify relevant files\n os.system('sed -i -e \"s/distance = 12/distance = $var/\" MultiBCP_hexagonal_post_$iteration.py')\n os.system(\"python MultiBCP_hexagonal_post_$iteration.py\")\n time.sleep(3)\n os.system('sed -i -e \"s/3072/$M/\" in.asymmetric_$iteration')#modify according to the parameters\n os.system('sed -i -e \"s/XN_100/XN_100_$iteration/\" in.asymmetric_$iteration')\n os.system('sed -i -e \"s/finalasymmetric/finalasymmetric_$iteration/\" in.asymmetric_$iteration')\n os.system('sed -i -e \"s/POSTA2B8/POSTA2B8_$iteration/\" run_lammps_$iteration.sh')\n os.system('sed -i -e \"s/posta2b8.out/posta2b8_$iteration.out/\" run_lammps_$iteration.sh')\n #os.system('sed -i -e \"s/node55/node$nodeid/\" run_lammps_$iteration.sh')\n os.system('sed -i -e \"s/in.asymmetric/in.asymmetric_$iteration/\" run_lammps_$iteration.sh')\n # run the simulation\n os.system(\"sbatch run_lammps_$iteration.sh\")\n return None", "def _update_parameters(self, deltas):\n assert len(deltas) == len(self.layers), \\\n \"length of deltas must match number of layers\"\n\n if deltas is not None:\n for (layer, delta) in zip(self.layers, deltas):\n layer._update_parameters(delta)", "def OptimizeL(L,nepochs=10,rho=.5,noise=.05,npred=200,Href=None): \n N = len(L) \n HS = np.zeros((N,2,nepochs+1))\n\n Lp = []\n error = []\n\n for i in range(N):\n t = L[i]\n h = EntropyKS(t)\n dd0 = DegreeDist(t,npred,directed=True)\n s = EntropyD(dd0)\n Lp.append(t)\n HS[i,:,0]=[h,s]\n \n if not Href is None:\n error.append(MyKLD(Href,HS[:,:,0]))\n\n Changes = []\n for epoch in range(nepochs):\n Lp2 = []\n changes = 0\n for nn,t in enumerate(Lp):\n h0 = HS[nn,0,epoch]\n s0 = HS[nn,1,epoch]\n\n t1,h1,s1,change = Optimize(t,h0,s0,rho,noise,npred)\n Lp2.append(t1)\n HS[nn,:,epoch+1] = np.array([h1,s1])\n changes+=change\n Lp = Lp2\n Changes.append(changes)\n if not Href is None:\n error.append(MyKLD(Href,HS[:,:,epoch+1]))\n return(Lp,HS,np.array(Changes),np.array(error))", "def time_conv_layer(model, nb_filters):\n\n model = Conv1D(filters=nb_filters, kernel_size=3, padding='causal', activation='relu')(model)\n model = BatchNormalization()(model)\n return model", "def __init__(self, n_layers, layers_sizes, activation='sigmoid', learning_rate=0.1, weight_init='normal', batch_size=1000, num_epochs=100):\n self.layers_sizes=layers_sizes[1:]\n self.activation=activation\n self.learning_rate=learning_rate\n self.weight_init=weight_init\n self.batch_size=batch_size\n self.num_epochs=num_epochs\n self.weights={}\n self.n_layers=len(self.layers_sizes)\n self.num_samples=0\n self.training_loss_values=[]\n self.testing_loss_values=[]\n self.gg=0.01\n self.XTEST=None\n self.YTEST=None\n self.TTTT=None\n\n if activation not in self.acti_fns:\n raise Exception('Incorrect Activation Function')\n\n if weight_init not in self.weight_inits:\n raise Exception('Incorrect Weight Initialization Function')\n pass", "def update_parameters(parameters, grads, learning_rate):\n L = len(parameters) // 2\n\n for i in range(L):\n parameters[\"W\"+str(i+1)] = parameters[\"W\"+str(i+1)] - learning_rate * grads[\"dW\"+str(i+1)]\n parameters[\"b\"+str(i+1)] = parameters[\"b\"+str(i+1)] - learning_rate * grads[\"db\"+str(i+1)]\n\n return parameters", "def roll_params(layers, param_type):\n theta = np.empty(0)\n\n for layer in layers:\n params = getattr(layer, param_type)\n if params is not None:\n for k in params:\n vector = params[k]\n # Flatten and append the vector\n vector = vector.flatten()\n theta = np.concatenate((theta, vector))\n\n return theta", "def __init__(\n self,\n lca_layer: LCALayer,\n num_lca_dim: int,\n num_simulations: int = 10000,\n num_time_steps: int = 3000,\n ):\n\n super().__init__()\n\n self.lca_layer = lca_layer\n self.num_lca_dim = num_lca_dim\n self.num_simulations = num_simulations\n self.num_time_steps = num_time_steps", "def cost_function(params, count):\n circuit = models.Circuit(nqubits)\n for l in range(layers):\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(0, nqubits - 1, 2):\n circuit.add(gates.CZ(q, q + 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(1, nqubits - 2, 2):\n circuit.add(gates.CZ(q, q + 1))\n circuit.add(gates.CZ(0, nqubits - 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n\n cost = 0\n circuit.set_parameters(\n params\n ) # this will change all thetas to the appropriate values\n for i in range(len(ising_groundstates)):\n final_state = circuit(np.copy(ising_groundstates[i]))\n cost += np.real(encoder.expectation(final_state.state()))\n\n if count[0] % 50 == 0:\n print(count[0], cost / len(ising_groundstates))\n count[0] += 1\n\n return cost / len(ising_groundstates)", "def unroll_params(theta, layers, param_type):\n i = 0\n for layer in layers:\n params = getattr(layer, param_type)\n if params is not None:\n for k in params:\n vector = params[k]\n # Extract and reshape the parameter to the original form\n j = i + np.prod(vector.shape)\n params[k] = theta[i:j].reshape(vector.shape)\n i = j", "def prep_base(self):\n\n self.config.logger.info(\"Preparing base layer land use data...\")\n\n # set start time\n t0 = time.time()\n\n # extract and process base layer land cover data\n base_data = rdr.read_base(self.config, self.observed_landclasses, self.sequence_metric_dict,\n metric_seq=self.metric_sequence_list, region_seq=self.region_sequence_list)\n\n # unpack variables\n self.spat_ludata, self.spat_water, self.spat_coords, self.spat_aez_region, self.spat_grid_id, self.spat_aez, \\\n self.spat_region, self.ngrids, self.cellarea, self.celltrunk, self.sequence_metric_dict = base_data\n\n self.config.logger.info('PERFORMANCE: Base spatial landuse data prepared in {0} seconds'.format(time.time() - t0))", "def __zone_prep(self, output_times):\n\t\treturn self.__c_version.prep(output_times)", "def _prepare(self):\n # Time list\n self.time_list = []\n # Distance array\n if self._fxn[0] is True:\n self.res_dists, self.res_keys = build_reslist_dict(self._rpl)\n\n # Distance between alpha carbons\n if self._fxn[1] is True:\n self.ca_dists, self.ca_keys = build_reslist_dict(self._rpl)\n\n # Distance between resid center of mass\n if self._fxn[2] is True:\n self.cm_dists, self.cm_keys = build_reslist_dict(self._rpl)\n\n # Distance between resid center of geometry\n if self._fxn[3] is True:\n self.cg_dists, self.cg_keys = build_reslist_dict(self._rpl)", "def __init__(self, time_constant: float, sampling_time: float):\n self.alpha = sampling_time / (time_constant + sampling_time)\n self.state = None", "def optimize(self, layer_list, epoch):\n lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)\n param_list = get_param_list(layer_list)\n\n scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)\n\n for (param, grad), states in param_list:\n param.rounding = self.stochastic_round\n if len(states) == 0:\n states.append(self.be.zeros_like(grad))\n states.append(self.be.zeros_like(grad))\n grad = grad / self.be.bsz\n grad = self.clip_gradient_value(grad, self.gradient_clip_value)\n\n velocity = states[0]\n velocity_backup = states[-1]\n\n velocity_backup[:] = velocity\n velocity[:] = (self.momentum_coef * velocity -\n lrate * (scale_factor * grad + self.wdecay * param))\n param[:] = (param + velocity * (1 + self.momentum_coef) -\n self.momentum_coef * velocity_backup)", "def create_multilayer_lstm_params(num_layers, in_size, state_size, name=\"\"):\n lstm_layers = []\n for i in range(num_layers):\n layer_name = name + \"-\" + str(i)\n print(\"LSTM \" + layer_name + \": \" + str(in_size) + \" x \" + str(state_size) + \"; default Dynet initialization of hidden weights\")\n lstm_layer = torch.nn.LSTMCell(input_size=int(in_size), hidden_size=int(state_size), bias=True)\n lstm_layers.append(lstm_layer)\n in_size = state_size\n return torch.nn.ModuleList(lstm_layers)", "def time(tp, Ks, F, Fp, presHead, thetaSat, thetaInit):\n\n numeratorLN = Fp + np.absolute(presHead)*(thetaSat - thetaInit)\n denomLN = F + np.absolute(presHead)*(thetaSat - thetaInit)\n naturalLog = np.log(numeratorLN/denomLN)\n\n product1 = np.absolute(presHead)*(thetaSat - thetaInit)*naturalLog\n brackets = F - Fp + product1\n\n product2 = (1/Ks)*brackets\n time = tp + product2\n return time", "def waiting_times(all_data):\n print('Computing waiting times')\n result = {'p': [], 'alpha': [], 'durations': []}\n for data in all_data:\n N = data['config']['N']\n p = data['config']['p']\n alpha = data['config']['alpha']\n print(f'p = {p}, alpha = {alpha}')\n\n # find dominant strategy at each point in time\n print(' > Finding dominant strategies')\n dom_strats = np.asarray(list(map(lambda e: get_dominant_strategy(e), data['snapshots'])))\n print(f' >> Found {np.unique(dom_strats).size} unique strategies')\n\n if np.unique(dom_strats).size <= 1:\n print(' >> Skipping')\n continue\n\n # detect dominant strategy changes (and durations)\n print(' > Computing durations')\n durations = get_domain_durations(dom_strats)\n durations /= N**2\n print(f' >> Found {durations.size} durations')\n\n # store result\n result['p'].extend([p]*len(durations))\n result['alpha'].extend([alpha]*len(durations))\n result['durations'].extend(durations)\n\n df = pd.DataFrame(result)\n\n # plot w-time distributions\n print(' > Plotting')\n for p in df['p'].unique():\n sub = df[df['p']==p]\n\n plt.figure()\n for alpha, group in sub.groupby(['alpha']):\n sns.distplot(\n group['durations'],\n kde=False, label=rf'$\\alpha={alpha}$')\n\n plt.title(rf'Distribution of waiting times ($p={p}$)')\n plt.xlabel(r'$\\Delta t$')\n plt.ylabel(r'count')\n plt.legend(loc='best')\n\n plt.savefig(f'images/waiting_times_p{p}.pdf')\n\n ## plot wtd dependence on parameters\n plt.figure()\n sns.boxplot(x='alpha', y='durations', hue='p', data=df)\n plt.savefig('images/waiting_times_vs_alpha.pdf')\n plt.close()\n\n return df", "def init_weights(num_tilings, tiles_per_dim, num_dims, num_actions):\n weights = np.zeros((num_tilings*tiles_per_dim**num_dims*num_actions))\n return weights", "def build_layers(self):\n raise NotImplementedError", "def run(self, times=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 52, 53, 54]):\n self.mse_per_step = np.array([])\n self.all_alphas = np.array([])\n start_time = time.time()\n for t in times:\n mse, alphas = self.one_step(t)\n self.mse_per_step = np.append(self.mse_per_step, mse)\n self.all_alphas = np.append(self.all_alphas, alphas)\n np.savetxt('alphas_per_T_new2.txt', self.all_alphas)\n\n\n print(\"Done in\", time.time() - start_time, \"seconds.\")\n print(\"T's\", times)\n print(\"Scores:\", self.mse_per_step)", "def inititalize_parameters(self, nodes_of_layers, training_data_size):\n\n\t\tassert(self.layers == len(nodes_of_layers))\n\t\tassert(2 == len(training_data_size))\n\t\tself.w_array = [np.array([0])]\n\t\tself.b_array = [np.array([0])]\n\t\tfeatures, nums = training_data_size\n\n\t\t# initialize the parameters of layer one\n\t\tself.w_array.append(np.random.randn(nodes_of_layers[0], features)\n\t\t\t\t\t\t\t* np.sqrt(1 / nums))\n\t\tself.b_array.append(np.zeros((nodes_of_layers[0], 1)))\n\n\t\tfor layer in range(1, self.layers):\n\t\t\tself.w_array.append(np.random.randn(nodes_of_layers[layer],\n\t\t\t\t\t\t\t\tnodes_of_layers[layer - 1])\n\t\t\t\t\t\t\t\t* np.sqrt(1 / nodes_of_layers[layer - 1]))\n\t\t\tself.b_array.append(np.zeros((nodes_of_layers[layer], 1)))\n\t\treturn self.w_array, self.b_array", "def construct_density_network(flow_dict, D_Z, T):\n latent_layers = construct_latent_dynamics(flow_dict, D_Z, T)\n time_invariant_layers = construct_time_invariant_flow(flow_dict, D_Z, T)\n\n layers = latent_layers + time_invariant_layers\n nlayers = len(layers)\n\n num_theta_params = 0\n for i in range(nlayers):\n layer = layers[i]\n print(i, layer)\n num_theta_params += count_layer_params(layer)\n\n return layers, num_theta_params", "def timesteps_experiment():\n\n print(\"TIMESTEPS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'timestep_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'time_steps'\n changing_param_value = [1, 2, 4, 8, 16, 32, 64, 128, 256]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n set_params(use_word_emb=1)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)", "def residual_net(n,bottom,total_depth, nclasses, use_global_stats=False,return_layer=None):\n # figure out network structure\n net_defs = {\n 18:([2, 2, 2, 2], \"standard\"),\n 34:([3, 4, 6, 3], \"standard\"),\n 50:([3, 4, 6, 3], \"bottleneck\"),\n 101:([3, 4, 23, 3], \"bottleneck\"),\n 152:([3, 8, 36, 3], \"bottleneck\"),\n }\n alpha = string.ascii_lowercase\n assert total_depth in net_defs.keys(), \"net of depth:{} not defined\".format(total_depth)\n\n # nunits_list a list of integers indicating the number of layers in each depth.\n nunits_list, unit_type = net_defs[total_depth] \n nouts = [64, 128, 256, 512] # same for all nets\n \n n.conv1, n.bn1, n.scale1 = conv_bn_scale(bottom, ks = 7, \n stride = 2, nout = 64, pad = 3,\n use_global_stats=use_global_stats)\n n.conv1_relu = L.ReLU(n.scale1, in_place=True)\n n.pool1 = L.Pooling(n.conv1_relu, stride = 2, kernel_size = 3, pool=P.Pooling.MAX)\n \n U=n.pool1\n \n # make the convolutional body\n for i,(nout, nunits) in enumerate(zip(nouts, nunits_list)): # for each depth and nunits\n for unit,a in zip(range(1, nunits + 1),alpha): # for each unit. Enumerate from 1.\n# s = str(nout) + '_' + str(unit) + '_' # layer name prefix\n s= 'res{}{}'.format(i+2,a)\n# print(s)\n newdepth = 2 if unit is 1 else 0\n if i is 0 and newdepth:\n newdepth=1\n if unit_type == \"standard\":\n\n U=residual_standard_unit(n,U, nout, s, newdepth = newdepth, use_global_stats=use_global_stats)\n else:\n U=residual_bottleneck_unit(n,U, nout, s, newdepth = newdepth, use_global_stats=use_global_stats)\n\n # add the end layers \n n.global_pool = L.Pooling(U, pooling_param = dict(pool = 1, global_pooling = True))\n setattr(n,'fc'+str(nclasses), L.InnerProduct(n.global_pool, num_output = nclasses,\n param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)]))\n \n if return_layer is None:\n return getattr(n,'fc'+str(nclasses))\n else:\n return getattr(n,return_layer)", "def preprocess():\n #get a list of all sentinel-image filenames\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n #read in a csv-file with information about the cluster\n csvpath = os.path.abspath(os.path.join(os.path.abspath(__file__),\"../../dataResearch/Data_with_Pooled.csv\"))\n df = pd.read_csv(csvpath)\n #get the min and max values per band \n minmaxlist = minmax()\n timelist = []\n print(\"STEP 2/2\")\n print(\"CREATING TFRECORDS\")\n for i in s2files:\n start = time.time()\n s2file = s2path + \"/\" + i\n #Get Features out of the Dataframe\n #get the name of the label (equals the SurveyID in the data)\n labelname = i.replace(\".tif\",\"\")\n #get the index of the entry to get the information out of the dataframe\n index = df.ID[df.ID == labelname].index\n wealthpooled = float(df['wealthpooled'].loc[index].max().replace(\",\",\".\"))\n wealthpooled5country = float(df['wealthpooled5country'].loc[index].max().replace(\",\",\".\"))\n country = bytes(df['country'].loc[index].max(), 'utf-8')\n urbanrural = bytes(df['URBAN_RURA'].loc[index].max(), 'utf-8')\n csvlat = float(df['LATNUM'].loc[index].max().replace(\",\",\".\"))\n csvlon = float(df['LONGNUM'].loc[index].max().replace(\",\",\".\"))\n year = int(df['year'].loc[index].max())\n wealth = float(df['wealth'].loc[index].max().replace(\",\",\".\"))\n #Get all Bands out of the GEOTIFF File\n s2raster = gdal.Open(s2file)\n bandlist = []\n for n in range(s2raster.RasterCount):\n f = n+1\n if n not in [13,14,15]:\n s2band = s2raster.GetRasterBand(f)\n s2band = s2band.ReadAsArray()\n s2band = np.resize(s2band,(1050,1050)).flatten()\n min = minmaxlist[n][0]\n max = minmaxlist[n][1]\n s2band = (s2band-min)/(max-min)\n bandlist.append(s2band.flatten())\n #get the Nightlight Band out of the GEOTIFF File\n nlfile = nlpath + \"/\" + i\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n nlband = nlband.ReadAsArray()\n nlband = np.resize(nlband,(1050,1050)).flatten()\n min = minmaxlist[13][0]\n max = minmaxlist[13][1]\n nlband = (nlband-min)/(max-min)\n bandlist.append(nlband)\n #create a TFRecords-File with the TFRecordWriter\n with tf.io.TFRecordWriter(exportpath + '/' + labelname + '.tfrec') as writer:\n example = serialize_example(B1=bandlist[0],\n B2=bandlist[1],\n B3=bandlist[2],\n B4=bandlist[3],\n B5=bandlist[4],\n B6=bandlist[5],\n B7=bandlist[6],\n B8=bandlist[7],\n B8A=bandlist[8],\n B9=bandlist[9],\n B10=bandlist[10],\n B11=bandlist[11],\n B12=bandlist[12],\n NL=bandlist[13],\n wealth=wealth,\n wealthpooled=wealthpooled,\n wealthpooled5country=wealthpooled5country,\n country=country,\n urbanrural=urbanrural,\n lon_coord=csvlon,\n lat_coord=csvlat,\n year=year)\n writer.write(example)\n end = time.time()\n timelist.append(end-start)\n print(\"Done!\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%d:%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def __init__(self,b,u,v,hbls_old,hbbl_old,Kv_old,Kt_old,srflx,sustr,svstr,f,grid_dict,tstep_mode,dt):\n \n # INPUTS FROM TTTW SYSTEM\n self.b = b #buoyancy field: [Ly,N]\n self.u = u # x-component of velocity [Ly,N]\n self.v = v # y-component of velocity [Ly+1,N]\n self.hbls_old = hbls_old #boundary layer depth from previous time step [Ly]\n self.hbbl_old = hbbl_old # bottom boundary layer depth from previous time step [Ly]\n self.Kv_old = Kv_old # momentum mixing coefficeint from previous time step [Ly,N+1]\n self.Kt_old = Kt_old # tracer mixing coefficient from previous time step [Ly,N+1]\n self.srflx = srflx #solar heat flux [Ly] (degC * (m/s))\n self.sustr = sustr # x-component surface wind stress [Ly] (N/m^2) \n self.svstr = svstr # y-component surface wind stress [Ly+1] (N/m^2)\n self.grid_dict = grid_dict #gridded data\n self.f = f #coriolis parameter\n # KPP-SPECIFIC VARIABLES \n self.hbls = np.zeros([self.b.shape[0]])\n self.hbbl = np.zeros([self.b.shape[0]])\n self.ustar = []\n self.bvf = [] \n self.kmo = []\n self.C_h_MO = []\n self.kbl = []\n self.Cr = [] \n self.Fc = []\n self.ghat = [] #NONLOCAL TERM: TO BE USED IN TIME STEPPING\n self.tstep_mode = tstep_mode# if in time steppign mode, turn on HBL_RATE_LIMIT\n self.dt = dt", "def update_parameters(params, grads, alpha):\n n_layers = len(params) // 2\n for i in range(n_layers):\n params['w%s' % (i+1)] = (\n params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])\n params['b%s' % (i+1)] = (\n params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])\n return params", "def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters", "def _build_layer(filters, repetition, is_k3=False, is_down=False):\n def f(input):\n # 第一个瓶颈层,有些特殊,它的stride和kernel_size是要根据配置来调整的,而且和他的原型Resnet中对应的层处理的方式有差别!\n # 即kernel和stride的设置有区别!\n # 参见ResnetBuilder.py内的注释.\n input = _bottleneck(filters, is_k3=is_k3, is_down=is_down)(input)\n for _ in range(1, repetition):\n input = _bottleneck(filters)(input)\n return input\n\n return f", "def initialize(self):\n params = {}\n for i in range(1, len(self.layer_dimensions)):\n params['b_' + str(i)] = np.ones((self.layer_dimensions[i], 1))\n if self.he_initialization:\n params['W_' + str(i)] = np.random.randn(self.layer_dimensions[i],\n self.layer_dimensions[i - 1]) * np.sqrt(\n 2 / self.layer_dimensions[i - 1])\n else:\n params['W_' + str(i)] = np.random.rand(self.layer_dimensions[i], self.layer_dimensions[i - 1]) - 0.5\n return params", "def Problem1():\n sizes = [100,200,400,800]\n func1_times = []\n func2_times = []\n func3_times = []\n func4_times = []\n func5_times = []\n \n for n in sizes:\n before = time.time()\n func1(n)\n func1_times.append(time.time()-before)\n \n before = time.time()\n func2(n)\n func2_times.append(time.time()-before)\n \n before = time.time()\n func3(n)\n func3_times.append(time.time()-before)\n \n before = time.time()\n func4(n)\n func4_times.append(time.time()-before)\n \n before = time.time()\n func5(n)\n func5_times.append(time.time()-before)\n \n plt.plot(sizes, func1_times, label=\"Function 1\")\n plt.plot(sizes, func2_times, label=\"Function 2\")\n plt.plot(sizes, func3_times, label=\"Function 3\")\n plt.plot(sizes, func4_times, label=\"Function 4\")\n plt.plot(sizes, func5_times, label=\"Function 5\")\n plt.legend(loc=\"upper left\")\n plt.show()", "def SC_generation(hourly_radiation, prop_observers, number_groups, weather_data, g, Sz, Az, ha, Tin_C, height,\n panel_properties, latitude):\n\n\n n0 = panel_properties['n0']\n c1 = panel_properties['c1']\n c2 = panel_properties['c2']\n mB0_r = panel_properties['mB0_r']\n mB_max_r = panel_properties['mB_max_r']\n mB_min_r = panel_properties['mB_min_r']\n C_eff = panel_properties['C_eff']\n t_max = panel_properties['t_max']\n IAM_d = panel_properties['IAM_d']\n Aratio = panel_properties['aperture_area_ratio']\n Apanel = panel_properties['module_area']\n dP1 = panel_properties['dP1']\n dP2 = panel_properties['dP2']\n dP3 = panel_properties['dP3']\n dP4 = panel_properties['dP4']\n Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK\n\n # create lists to store results\n list_results = [None] * number_groups\n list_areas_groups = [None] * number_groups\n Sum_mcp_kWperC = np.zeros(8760)\n Sum_qout_kWh = np.zeros(8760)\n Sum_Eaux_kWh = np.zeros(8760)\n Sum_qloss = np.zeros(8760)\n Sum_radiation_kWh = np.zeros(8760)\n\n Tin_array_C = np.zeros(8760) + Tin_C\n aperature_area_per_module = Aratio * Apanel\n total_area_module = prop_observers['total_area_module'].sum() # total area for panel installation\n\n # calculate equivalent length of pipes\n lv = panel_properties['module_length'] # module length\n number_modules = round(total_area_module/Apanel) # this is an estimation\n l_ext_mperm2 = (2 * lv * number_modules/ (total_area_module * Aratio)) # pipe length within the collectors\n l_int_mperm2 = 2 * height / (total_area_module * Aratio) # pipe length from building substation to roof top collectors\n Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture\n\n if panel_properties['type'] == 'ET': # for evacuated tubes\n Nseg = 100 # default number of subsdivisions for the calculation\n else:\n Nseg = 10 # default number of subsdivisions for the calculation\n\n for group in range(number_groups):\n # load panel angles from group\n teta_z = prop_observers.loc[group, 'surface_azimuth'] # azimuth of panels of group\n area_per_group = prop_observers.loc[group, 'total_area_module']\n tilt_angle_deg = prop_observers.loc[group, 'tilt'] # tilt angle of panels\n\n # create dataframe with irradiation from group\n\n radiation_Wh = pd.DataFrame({'I_sol': hourly_radiation[group]})\n radiation_Wh['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wh.I_sol # calculate diffuse radiation\n radiation_Wh['I_direct'] = radiation_Wh['I_sol'] - radiation_Wh['I_diffuse'] # calculate direct radiation\n radiation_Wh.fillna(0, inplace=True) # set nan to zero\n\n # calculate incidence angle modifier for beam radiation\n IAM_b = calc_IAM_beam_SC(Az, g, ha, teta_z, tilt_angle_deg, panel_properties['type'], Sz, latitude)\n\n # calculate heat production from a solar collector of each group\n list_results[group] = calc_SC_module(tilt_angle_deg, IAM_b, IAM_d, radiation_Wh.I_direct,\n radiation_Wh.I_diffuse, weather_data.drybulb_C, n0,\n c1, c2, mB0_r, mB_max_r, mB_min_r, C_eff, t_max,\n aperature_area_per_module, dP1, dP2, dP3, dP4,\n Cp_fluid_JperkgK, Tin_C, Leq_mperm2, l_ext_mperm2,\n l_int_mperm2, Nseg)\n\n\n # multiplying the results with the number of panels in each group and write to list\n number_modules_per_group = area_per_group / Apanel\n list_areas_groups[group] = area_per_group\n radiation_array = hourly_radiation[group] * list_areas_groups[group] / 1000 # kWh\n Sum_qout_kWh = Sum_qout_kWh + list_results[group][1] * number_modules_per_group\n Sum_Eaux_kWh = Sum_Eaux_kWh + list_results[group][2] * number_modules_per_group\n Sum_qloss = Sum_qloss + list_results[group][0] * number_modules_per_group\n Sum_mcp_kWperC = Sum_mcp_kWperC + list_results[group][5] * number_modules_per_group\n Sum_radiation_kWh = Sum_radiation_kWh + radiation_Wh['I_sol']*area_per_group/1000\n\n Tout_group_C = (Sum_qout_kWh / Sum_mcp_kWperC) + Tin_C # in C assuming all collectors are connected in parallel\n\n Final = pd.DataFrame(\n {'Q_SC_gen_kWh': Sum_qout_kWh, 'T_SC_sup_C': Tin_array_C, 'T_SC_re_C': Tout_group_C, 'mcp_SC_kWperC': Sum_mcp_kWperC, 'Eaux_SC_kWh': Sum_Eaux_kWh,\n 'Q_SC_l_kWh': Sum_qloss, 'Area_SC_m2': sum(list_areas_groups), 'radiation_kWh': Sum_radiation_kWh}, index=range(8760))\n\n return list_results, Final", "def __init__(self, paramContainer, dt, grid, pumpFunction=None,\n damping='default', psiInitial=None, REAL_DTYPE=np.float32,\n COMPLEX_DTYPE=np.complex64):\n self.grid = grid\n self.paramContainer = paramContainer\n params = paramContainer.getGPEParams()\n self.singleComp = True if 'gamma_nl' in params else False\n self.x, self.y = grid.getSpatialGrid()\n self.dx_scaled = grid.dx_scaled\n self.dx_unscaled = grid.dx_unscaled\n self.K = grid.getKSquaredGrid()\n # This is already scaled because we obtained it from the scaled grid.\n self.max_XY = np.abs(self.x[-1, -1])\n self.N = int(self.x.shape[0])\n self.dt = dt\n self.time = 0\n self.nSteps = 0\n assert self.x.shape == self.y.shape, \"Spatial grids are not the same\\\n shape\"\n assert self.x.shape == self.K.shape, \"Spatial grids are not the same\\\n shape as k grid\"\n assert self.x.shape == (self.N, self.N), 'Grid must be square.'\n\n # Check we have the required parameters and assign them\n for key in self.__class__.__requiredParams:\n if key not in params:\n raise ValueError(\"Required Parameter %s missing\" % key)\n self.__setattr__(key, params[key])\n if self.singleComp:\n self.gamma_nl = params[\"gamma_nl\"]\n self.max_XY_scaled = self.max_XY * self.charL\n # Create arrays for damping, etc.\n if damping == 'default':\n damping = UtilityFunctions.RadialTanhDamping(\n self.max_XY, k=10.0).unscaledFunction()(self.x, self.y)\n elif damping == 'rectangular':\n damping = UtilityFunctions.RectangularTanhDamping(\n self.max_XY, k=10).unscaledFunction()(self.x, self.y)\n self.damping = damping\n if not psiInitial:\n psi0 = (np.abs(np.random.normal(size=(self.N, self.N),\n scale=10e-5, loc=10e-4))\n + 0.0j*np.random.normal(size=(self.N, self.N),\n scale=10e-4, loc=10e-3))\n else:\n psi0 = psiInitial(self.x, self.y) + 0j\n currentDensity = np.absolute(psi0) ** 2\n if not pumpFunction:\n pumpFunction = lambda x, y: np.zeros_like(x)\n Pdt = pumpFunction(self.x, self.y) * self.dt\n expFactorPolFirst = self.dt * (0.5 * self.R - 1j * self.g_R)\n expFactorPolThird = -0.5 * self.gamma_C * self.dt\n\n if self.singleComp:\n expFactorPolSecond = - (self.gamma_nl + 1j * self.g_C) * self.dt\n n = Pdt / (self.dt * self.gamma_R)\n else:\n expFactorPolSecond = - 1j * self.g_C * self.dt\n n = np.zeros_like(psi0, dtype=np.float64)\n kineticFactorHalf = np.exp(-1.0j * self.k * self.K * self.dt / 2.0)\n\n from skcuda.misc import add, multiply\n from pycuda.cumath import exp\n self.add = add\n self.multiply = multiply\n self.exp = exp\n self.cu_fft = cu_fft\n\n self.damping = gpuarray.to_gpu(damping.astype(REAL_DTYPE))\n self.psi = gpuarray.to_gpu(psi0.astype(COMPLEX_DTYPE))\n self.n = gpuarray.to_gpu(n.astype(REAL_DTYPE))\n self.currentDensity = gpuarray.to_gpu(currentDensity\n .astype(REAL_DTYPE))\n self.Pdt = gpuarray.to_gpu(Pdt.astype(REAL_DTYPE))\n self.expFactorPolFirst = gpuarray.to_gpu(\n np.array([expFactorPolFirst]).astype(COMPLEX_DTYPE))\n self.expFactorPolSecond = gpuarray.to_gpu(\n np.array([expFactorPolSecond]).astype(COMPLEX_DTYPE))\n self.expFactorPolThird = gpuarray.to_gpu(\n np.array([expFactorPolThird]).astype(COMPLEX_DTYPE))\n self.kineticFactorHalf = gpuarray.to_gpu(\n np.array([kineticFactorHalf]).astype(COMPLEX_DTYPE))\n # self.expFactorExciton = gpuarray.to_gpu(expFactorExciton)\n self.plan_forward = cu_fft.Plan((self.N, self.N), COMPLEX_DTYPE,\n COMPLEX_DTYPE)\n self.plan_inverse = cu_fft.Plan((self.N, self.N), COMPLEX_DTYPE,\n COMPLEX_DTYPE)" ]
[ "0.74356353", "0.7422801", "0.73030597", "0.7143981", "0.5985816", "0.5975828", "0.5915438", "0.5865488", "0.57980657", "0.5677244", "0.5349525", "0.5309606", "0.5289832", "0.52619296", "0.5260173", "0.5198084", "0.51720953", "0.5171042", "0.5160975", "0.5144867", "0.51287425", "0.51115435", "0.50965947", "0.5047088", "0.5015216", "0.5008753", "0.5008419", "0.49940196", "0.4986192", "0.49804962", "0.49681458", "0.49524596", "0.49396858", "0.4933664", "0.49325234", "0.49252596", "0.4913767", "0.49037287", "0.49037054", "0.49006775", "0.48945916", "0.4885365", "0.48820266", "0.4864908", "0.48648515", "0.4862506", "0.48601654", "0.48592678", "0.48471522", "0.48126674", "0.4811385", "0.48006216", "0.47953874", "0.47916842", "0.4786135", "0.47793427", "0.47752294", "0.4774401", "0.47699592", "0.47664037", "0.4757883", "0.47281325", "0.47224677", "0.47217402", "0.47162738", "0.4709412", "0.47076577", "0.47040367", "0.47015795", "0.46993586", "0.46983045", "0.46824718", "0.46805415", "0.46774969", "0.4670352", "0.46691284", "0.46686637", "0.46676704", "0.46660206", "0.46636462", "0.4662496", "0.46534836", "0.4653158", "0.46391815", "0.46320572", "0.463008", "0.4627857", "0.4626698", "0.46150988", "0.46133196", "0.46063256", "0.4602279", "0.4601654", "0.46009144", "0.45987132", "0.45899275", "0.4584118", "0.45829618", "0.45801932", "0.45749593" ]
0.7569402
0
Calculate Total Elapsed Time. This funtion takes the lasagna number of layers as a parameter and also the elapsed bake time and adds both values to aquire an accurate result hoe much time we have spent baking the lasagna.
def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time): preparation_time = preparation_time_in_minutes(number_of_layers) total_bake_time = preparation_time + elapsed_bake_time return total_bake_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def elapsed_time_in_minutes(no_of_layers, elapsed_bake_time):\n return preparation_time_in_minutes(no_of_layers) + elapsed_bake_time", "def elapsed_time_in_minutes(number_of_layers: int, elapsed_bake_time: int) -> int:\n return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n return preparation_time_in_minutes(number_of_layers) + elapsed_bake_time", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n return elapsed_bake_time + (number_of_layers * 2)", "def bake_time_remaining(elapsed_bake_time=EXPECTED_BAKE_TIME):\n\n time_remaining = EXPECTED_BAKE_TIME - elapsed_bake_time\n return time_remaining", "def bake_time_remaining(elapsed_bake_time: int) -> int:\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def bake_time_remaining(elapsed_bake_time):\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def bake_time_remaining(elapsed_bake_time):\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def bake_time_remaining(elapsed_bake_time):\n return EXPECTED_BAKE_TIME - elapsed_bake_time", "def calculate_total_lane_wait_time(self):\n\n if(self._vehicle_list == None):\n self._logger.write(\"Error! vehicle_list contains no value\")\n elif(len(self._vehicle_list) == 0):\n self._logger.write(\"Error! vehicle_list is empty\")\n else:\n try:\n for v in self._vehicle_list:\n self._total_lane_wait_time += v._wait_time\n except Exception as e:\n self._logger.write(\"Error! Could not calculate total_lane_wait_time: \\n %s\" % e)", "def get_time_elapsed(self, pipeline_run):\n begin = pipeline_run[\"steps\"][0][\"method_calls\"][0][\"start\"]\n begin_val = parse(begin)\n end = pipeline_run[\"steps\"][-1][\"method_calls\"][-1][\"end\"]\n end_val = parse(end)\n total_time = (end_val - begin_val).total_seconds()\n return total_time", "def _get_running_time(self):\n time_sum = 0.0\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n line = open('{0}/{1}/{2}/out/OUTDOCK'.format(self.path, subdir, DOCKING_RUN_FILES),'r').readlines()[-1]\n if line.startswith('elapsed time'):\n time = float(line.split()[-1])\n time_sum = time_sum + time\n except:\n pass \n self.running_time = time_sum", "def get_elapsed_time(self):\r\n self.get_bb_result()\r\n csv_path = self.bb_log_path + os.sep + 'run-logs' + os.sep + 'BigBenchTimes.csv'\r\n if not os.path.isfile(csv_path):\r\n print('BigBenchTimes.csv does not exist in {0}, existing...'.format(self.bb_log_path))\r\n exit(-1)\r\n df = pd.read_csv(csv_path, delimiter=';').loc[:,\r\n ['benchmarkPhase', 'streamNumber', 'queryNumber', 'durationInSeconds']]\r\n elapsed_time = pd.DataFrame()\r\n is_exist = False\r\n for phase in ['POWER_TEST', 'THROUGHPUT_TEST_1']:\r\n benchmark_phase = (df['benchmarkPhase'] == phase)\r\n if any(benchmark_phase): # whether this phase exist in the BB logs\r\n if phase == 'POWER_TEST': # power test overall and each query\r\n stream_num = ((df['streamNumber']) == 0)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(0, phase, seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n elif phase == 'THROUGHPUT_TEST_1':\r\n streams = int(np.max(df['streamNumber']))\r\n for stream in range(streams + 1):\r\n stream_num = ((df['streamNumber']) == stream)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(stream + 1, 'stream{0}'.format(stream), seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n is_exist = True\r\n if is_exist:\r\n print('*' * 100)\r\n print('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n\r\n result_path = self.bb_log_path + os.sep + 'bb_results.log'\r\n with open(result_path, 'a') as f:\r\n f.write('*' * 100 + '\\n')\r\n f.write('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n else:\r\n print('It seems BigBenchTimes.csv in {0} does not include TPCx-BB phases:POWER_TEST, THROUGHPUT_TEST_1' \\\r\n 'existing...'.format(self.bb_log_path))\r\n exit(-1)", "def averageTime(self):\n \n pass", "def compute_run_duration(flasher_data, initial_delay):\n if initial_delay is None:\n tot = 0\n else:\n tot = initial_delay\n\n for pair in flasher_data:\n tot += pair[1] + 10\n\n return tot", "def total_time(self):\n obs_times = self.clean_obs_times()\n return _np.sum(obs_times[:, 1] - obs_times[:, 0])", "def elapsed():\n global start_time\n return time.time() - start_time", "def calc_time(directions_result):\n\n # there is only one leg\n legs = directions_result[\"legs\"][0][\"steps\"]\n\n steps = map(lambda x: (x[\"travel_mode\"], x[\"start_location\"], x[\"end_location\"]), legs)\n\n walking = filter(lambda x: x[0] == \"WALKING\", steps)\n transit = filter(lambda x: x[0] == \"TRANSIT\", steps)\n\n\n walking_to_biking = map(lambda x: gmaps.directions(\n x[1], x[2],\n mode=\"bicycling\"), walking)\n\n transit_final = map(lambda x: gmaps.directions(\n x[1], x[2], mode=\"transit\"), transit)\n\n\n walking_addrs = map(lambda x : (x[0][\"legs\"][0][\"start_address\"], x[0][\"legs\"][0][\"end_address\"]), walking_to_biking)\n transit_addrs = map(lambda x : (x[0][\"legs\"][0][\"start_address\"], x[0][\"legs\"][0][\"end_address\"]), transit_final)\n\n all_legs = map(lambda x:\n sum(map(lambda y: y[\"duration\"][\"value\"], x[0][\"legs\"]))\n ,walking_to_biking+transit_final)\n\n final = zip(all_legs, walking+transit, walking_addrs+transit_addrs)\n\n\n def reconstruct():\n w,t = 0,len(walking)\n arr = []\n for i in xrange(len(all_legs)):\n if steps[i][0] == \"TRANSIT\":\n arr.append(final[t])\n t += 1\n else:\n arr.append(final[w])\n w += 1\n return arr\n\n\n total_time = sum(all_legs) \n\n return total_time, reconstruct()", "def get_total_lane_wait_time(self):\n if(type(self._total_lane_wait_time) != float):\n self._logger.write(\"Error! total_lane_volume must be of type int\")\n elif(self._total_lane_wait_time == None):\n self._logger.write(\"Error! total_lane_wait_time contains no value\")\n else:\n try:\n return self._total_lane_wait_time\n except Exception as e:\n self._logger.write(\"Error! Could not fetch the value of total_lane_wait_time: \\n %s\" % e)", "def elapsed(self, total: bool = True) -> float:\n\n return self.timer.elapsed(self.label, total=total)", "def _calibrate_time(self):\n time_overhead = 0\n for i in range(1000):\n start = self._adjusted_time()\n end = self._adjusted_time()\n time_overhead += end - start\n return time_overhead / 1000", "def elapseTime(self, gameState):\n\n \"*** YOUR CODE HERE ***\"\n\n allPossible = util.Counter()\n\n for oldPos in self.legalPositions:\n actions = gameState.getLegalActions(agentIndex)\n successorStates = [gameState.generateSuccessor(action) for action in actions]\n newPosDist = {}\n for state in successorStates:\n position = state.getAgentPosition(agentIndex)\n prob = 1.0/len(actions)\n newPosDist[position] = prob\n\n for newPos, prob in newPosDist.items():\n allPossible[newPos] += prob * self.beliefs[oldPos]\n\n allPossible.normalize()\n self.beliefs = allPossible", "def training_time_left(current_epoch, total_epochs, epoch_time):\n epochs_rem = total_epochs - current_epoch - 1\n time_rem = epochs_rem * epoch_time\n return str(datetime.timedelta(seconds=time_rem))", "def calculate(self):\n #runs = [ai\n # for ei in self.experiment_queues\n # for ai in ei.cleaned_automated_runs]\n #\n #ni = len(runs)\n #self.nruns = ni\n # for ei in self.experiment_queues:\n # dur=ei.stats.calculate_duration(ei.cleaned_automated_runs)\n # if\n\n\n tt = sum([ei.stats.calculate_duration(ei.cleaned_automated_runs)\n for ei in self.experiment_queues])\n self._total_time = tt\n offset = 0\n if self._start_time:\n offset = time.time() - self._start_time\n\n self.etf = self.format_duration(tt - offset)", "def elapsed(self):\n return self.__last_time() - self.__start", "def preparation_time_in_minutes(number_of_layers: int) -> int:\n return PREPARATION_TIME_PER_LAYER_IN_MINUTES * number_of_layers", "def get_elapsed(self):\n delta = self._now() - self.start\n return delta.microseconds / 1000.0", "def preparation_time_in_minutes(number_of_layers):\n\n layers_preparation_time = number_of_layers * PREPARATION_TIME\n return layers_preparation_time", "def pc_work_time_total(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_work_time_total(self)", "def estimated_time(self):\n self._update()\n if not self.running_mode:\n return 0 if self._is_finished() else float(\"nan\")\n elif self.running_mode == \"local\":\n start = self.processes[0].create_time()\n elif self.running_mode == \"grid\":\n start = self.job[\"start_time\"]\n if start == 0:\n # Queued, but not started\n return float(\"nan\")\n else:\n logger.warning(\"Invalid running_mode attribute\")\n return float(\"nan\")\n current = self.current_step()\n if current <= 0: # If not dumped yet or error\n return float('nan')\n else:\n elapsed = time() - start\n return elapsed * (self.total_steps / current - 1)", "def pc_work_time_total(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_work_time_total(self)", "def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(self.name, \"took\", str(hours), \"hours and\", \"{0:.2f}\".format(minutes), unit, \"to complete\")\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(self.name, \"took\", str(minutes), \"minutes and\", \"{0:.2f}\".format(seconds), unit, \"to complete\")\n else:\n print(self.name, \"took\", \"{0:.2f}\".format(elapsed), unit, \"to complete\")", "def runtime(self):\n return self.stop_time - self.start_time", "def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(\"{} {} took {} hours and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, hours, minutes, unit))\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(\"{} {} took {} minutes and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, minutes, seconds, unit))\n else:\n print(\"{} {} took {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, elapsed, unit))", "def preparation_time_in_minutes(number_of_layers):\n return number_of_layers * 2", "def CalcTimeAfterFinish(lap: gps_pb2.Point) -> float:\n point_c = lap.points[-1]\n point_b = GetPriorUniquePoint(lap, point_c)\n point_b_angle = SolvePointBAngle(point_b, point_c)\n accelration = CalcAcceleration(point_b, point_c)\n perp_dist_b = PerpendicularDistanceToFinish(point_b_angle, point_b)\n time_to_fin = SolveTimeToCrossFinish(point_b, perp_dist_b, accelration)\n delta = GetTimeDelta(point_b, point_c)\n return delta - time_to_fin", "def get_time_elapsed(check, metric_name, modifiers, global_options):\n gauge_method = check.gauge\n\n def time_elapsed(metric, sample_data, runtime_data):\n for sample, tags, hostname in sample_data:\n gauge_method(metric_name, get_timestamp() - sample.value, tags=tags, hostname=hostname)\n\n del check\n del modifiers\n del global_options\n return time_elapsed", "def get_time_elapsed(self):\n return self.__time_elapsed", "def pc_work_time_total(self):\n return _TestA_swig.cleanslate_sptr_pc_work_time_total(self)", "def alignTime(self):\n agility = self.agility\n mass = self.mass\n\n alignTime = -math.log(0.25) * agility * mass / 1000000\n return alignTime", "def time_taken(json_cutlist, laser):\r\n\tcutlist = json.loads(json_cutlist)\r\n\ttime = 0\r\n\tcoordinate_array = [0, 0]\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\tcoordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]]\r\n\t\t\tmag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2)\r\n\t\t\tif a[0] == \"jump\":\r\n\t\t\t\ttime += mag/laser[\"jump_speed\"]\r\n\t\t\telse:\r\n\t\t\t\ttime += mag/laser[\"mark_speed\"]\r\n\t\t\tcoordinate_array = [float(a[1]), float(a[2])]\r\n\t\telif a[0] == \"z_abs\" or a[0] == \"z_rel\":\r\n\t\t\tzSet = float(a[1])\r\n\t\telif a[0] == \"c_abs\" or a[0] == \"c_rel\":\r\n\t\t\tcSet = float(a[1])\r\n\t\telif a[0] == \"a_abs\" or a[0] == \"a_rel\":\r\n\t\t\taSet = float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\treturn str(datetime.timedelta(seconds=int(time)))", "def elapsed_time_for_test(self, test_class, test_name, end_time):\n if test_class is None or test_name is None:\n return -2.0\n\n test_key = \"{}.{}\".format(test_class, test_name)\n if test_key not in self.start_time_by_test:\n return -1.0\n else:\n start_time = self.start_time_by_test[test_key]\n del self.start_time_by_test[test_key]\n return end_time - start_time", "def total_seconds(td):\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6", "def calc_idle_time(self):\n tt = 0.0\n keys = self.output.keys()\n keys.sort()\n jobEndKeys = [thekey for thekey in keys if 'JobEnd' in thekey]\n jobEndKeys = jobEndKeys[0:len(jobEndKeys)-1]\n for key in jobEndKeys:\n idxstart = keys.index(key)\n stime = self.output[keys[idxstart]][1]\n idxend = idxstart + 1\n while not (keys[idxend][1] in ['Preparation','Production']):\n idxend += 1\n # Now we have the entry where the next work cycle starts\n etime = self.output[keys[idxend]][1]\n tot_time = (etime - stime).seconds/3600.\n for ii in range(idxstart+1, idxend):\n if keys[ii][1] in ['W-up', 'Maintenance']:\n tot_time -= self.output[keys[ii]][4]\n # if the time is longer than 5 min:\n if tot_time >= 5.0/60.0:\n tt += tot_time\n print 'idle time ', tt, ' hours'", "def time_average(self, time_since_out, time_step):\n\n for variable in self._energy_state:\n bar_variable = \"{}_bar\".format(variable)\n bar_value = getattr(self, bar_variable)\n ta = time_average(bar_value, time_since_out,\n getattr(self, variable), time_step)\n setattr(self, bar_variable, ta)\n\n self.E_s_sum += self.E_s\n self.melt_sum += self.melt\n self.swi_sum += self.swi", "def pc_work_time_total(self) -> \"float\":\n return _beamforming_swig.beamformer_sptr_pc_work_time_total(self)", "def mean_run_time(self) -> float:\n return float(self.result_array.sum(axis=0).mean())", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def elapsed(self):\n return datetime.datetime.now() - self.start", "def gaver_stehfest(time, lap_func):\n def nCr(n, r):\n return math.factorial(n)/(math.factorial(r)*\n math.factorial(n-r))\n def a(k, n):\n summation = 0.\n for j in range((k+1)/2, min(k, n)+1):\n current_summation = float(pow(j, n+1))/float(math.factorial(n))\n current_summation *= nCr(n, j)\n current_summation *= nCr(2*j, j)\n current_summation *= nCr(j, k-j)\n summation += current_summation\n return summation*pow(-1, n+k)\n n = 7\n total_sum = a(1, n)*lap_func(1.*np.log(2.)/time)\n for k in range(2, 2*n+1):\n total_sum += a(k, n)*lap_func(k*np.log(2.)/time)\n return total_sum*np.log(2.)/time", "def _get_time(self, state: State) -> int:\n benchmark_time = {\n 'resnet': state.timestamp.epoch.value,\n 'bert': state.timestamp.sample.value,\n }\n return benchmark_time[self.benchmark]", "def elapsed_time():\r\n elapsed_time, duration = video_time()\r\n return elapsed_time", "def timing_gemm(trials, k):\n # values to test\n vals = (100, 300, 500, 1000, 1500, 2000, 2500, 3000)\n dtypes = ('float64', 'float32')\n trans_tuple = ('n', 't')\n bp_total = 0.0\n np_total = 0.0\n\n for n in vals:\n # test all combinations of all possible values\n for (dtype, trans_a, trans_b) in product(dtypes, trans_tuple, trans_tuple):\n bp_time, np_time = timing_test(dtype, trans_a, trans_b, n, k, trials)\n bp_total += bp_time\n np_total += np_time\n\n print(\"\\nk: %d, m=n: %d, BLASpy Average: %.5fs, NumPy Average: %.5fs\"\n % (k, n, bp_total / 8, np_total / 8))", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def get_elapsed_time(self):\n\n return datetime.timedelta(seconds=int(time.time() - self.start_time))", "def print_time_stats(self):\n walk_total = 0\n bus_total = 0\n for passenger in self.passengers:\n time = self._passenger_trip_time(passenger)\n walk_total += time[\"walk\"]\n bus_total += time[\"bus\"]\n av_bus_time = bus_total / self.total_passengers\n av_walk_time = walk_total / self.total_passengers\n\n print(f\"Average time on bus: {av_bus_time:.0f} min\")\n print(f\"Average walking time: {av_walk_time:.0f} min\")", "def compute_go_duration(self, units='seconds'):\n go_duration = 0\n for trial in self.trials:\n max_time = 0\n for event in trial.events:\n if self.stop > max_time:\n max_time = self.stop\n\n go_duration += max_time\n\n self.go_duration = (go_duration, units)", "def eta(start, completed, total):\n # Computation\n took = time_since(start)\n time_per_step = took / completed\n remaining_steps = total - completed\n remaining_time = time_per_step * remaining_steps\n\n return user_friendly_time(remaining_time)", "def pc_work_time_total(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_work_time_total(self)", "def pc_work_time_avg(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_work_time_avg(self)", "def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_work_time_avg(self)", "def elapsed_time(self):\n return self.__elapsed_time", "def runtime(self):\n return self.tmax_epochs - self.tmin_epochs", "def calculate_totalLLR(self, groundtype):\n\t\t#Initialize total log likelihood ratio\n\t\tif groundtype == 'Foreground':\n\t\t\ttot_LLR = np.zeros(self.foreground['npoints'])\n\t\telif groundtype == 'Background':\n\t\t\ttot_LLR = np.zeros(self.background['npoints'])\n\t\t\t\n\t\t#Iterate over all parameter groups\n\t\tfor group in self.group_names:\n\t\t\ttot_LLR += self.calculate_groupLLR(groundtype=groundtype, groupname=group)\n\t\t\n\t\treturn tot_LLR", "def get_elapsed_time(self):\n return self.app.get_elapsed_time() - self._pause_time", "async def getDelayTimeTotal(self):\n delay_time_total = await self.director.getItemVariableValue(\n self.item_id, \"DELAY_TIME_TOTAL\"\n )\n return delay_time_total", "def pc_work_time_avg(self):\n return _TestA_swig.cleanslate_sptr_pc_work_time_avg(self)", "def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc", "def pc_work_time_total(self) -> \"float\":\n return _beamforming_swig.phasedarray_sptr_pc_work_time_total(self)", "def elapsed(self, label: Optional[str] = None, total: bool = True) -> float:\n\n # Get current time\n t = timer()\n # Default label is self.default_label\n if label is None:\n label = self.default_label\n # Return 0.0 if default timer selected and it is not initialised\n if label not in self.t0:\n return 0.0\n # Raise exception if timer with specified label does not exist\n if label not in self.t0:\n raise KeyError(f\"Unrecognized timer key {label}.\")\n # If total flag is True return sum of accumulated time from\n # previous start/stop calls and current start call, otherwise\n # return just the time since the current start call\n te = 0.0\n if self.t0[label] is not None:\n te = t - self.t0[label] # type: ignore\n if total:\n te += self.td[label]\n\n return te", "def avg_extend_time(self):\r\n if self.total_extended:\r\n return self.total_extend_time/self.total_extended\r\n else: return 0", "def get_duration(self):\n dur = 0\n for clu in self._clusters:\n dur += self._clusters[clu].get_duration()\n return dur", "def compute_time(self):\n return self.compute_time_without_gc() + self.gc_time", "def getTotalTime(self):\n with self.lock:\n if self.tend == 0:\n total = -1\n else:\n total = self.tend - self.tstart\n return total", "def new_time(self, state, time, last_value, independent_sources):\n sum1 = np.matmul(state.__active_control[1],last_value) - state.__active_control[2]\n if (abs(sum1) > self.__absolute_error):\n sum2 = np.matmul(state.__active_control[1], np.matmul(self.__A, last_value) + np.matmul(self.__B, independent_sources))\n sum3 = np.matmul(state.__active_control[1], np.matmul(self.__A**2, last_value) + np.matmul(self.__A, \\\n np.matmul(self.__B, independent_sources)))\n return time + 1.0 / (sum3 / 2 / sum2 - sum2 / sum1)\n else:\n return -1", "def calculateRunTime(function, *args):\n startTime = time.time()\n result = function(*args)\n return time.time() - startTime, result", "def ElapsedTime(self):\n if self.force_auto_sync:\n self.get('ElapsedTime')\n return self._ElapsedTime", "def duration(self):\n return int(\n (self.finish_checkpoint - self.start_checkpoint) * 1000000\n )", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n df['travel_time'] = (pd.to_datetime(df['End Time']) - df['Start Time']).dt.total_seconds()\n print('total travel time is : {} seconds'.format(df['travel_time'].sum()))\n\n\n # display mean travel time\n print('mean travel time is : {} seconds'.format(df['travel_time'].mean()))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*140)", "def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25", "def lap(self):\n oldtime = self._clock() - self._starttime\n self._starttime = self._clock()\n return oldtime", "def get_elapsed_time(self):\n if hasattr(self, 'starttime'):\n return monotonic() - self.starttime\n else:\n return 0", "def _arrival_time(self):\n \n return self.mkt_time + timedelta(0, 0, self.latency)", "def time_function_total(function, runs=1):\n t0 = time.perf_counter()\n for _ in range(runs):\n function()\n t1 = time.perf_counter()\n\n return t1 - t0", "def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results", "def elapsed_micros(start: int, /) -> int:", "def time_elapsed(start_time):\n te = time.time() - start_time\n # print(str(datetime.timedelta(seconds=te)))\n return str(datetime.timedelta(seconds=te))", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n df['travel_time'] = df['End Time'] - df['Start Time'] \n #total_travel_time = df['travel_time'].sum()\n print('The total travel time is: ',df['travel_time'].sum())\n \n # TO DO: display mean travel time\n mean_travel_time = df['travel_time'].mean()\n print('The mean travel time is: ',mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def extras_total(self):\n total = self.wides + self.no_balls + self.byes + self.leg_byes\n return total", "def get_time_taken_sec(self) -> float:\n return self.time_stop - self.time_start", "def total_time(self):\n time = 0\n for segment in self.data:\n segment_time = 0\n\n last_time = None\n\n for point in segment:\n current_time = point[\"time\"]\n\n # in case data is missing skip point !\n if current_time is None:\n continue\n\n # the first valid element is processed, get distance\n if not (last_time is None):\n try:\n a = dt.strptime(last_time, \"%Y-%m-%d %H:%M:%S.%f\")\n b = dt.strptime(current_time, \"%Y-%m-%d %H:%M:%S.%f\")\n except ValueError:\n a = dt.strptime(last_time, \"%Y-%m-%d %H:%M:%S\")\n b = dt.strptime(current_time, \"%Y-%m-%d %H:%M:%S\")\n time_difference = b - a\n segment_time += time_difference.seconds\n\n last_time = current_time\n\n time += segment_time\n\n return time", "def print_time_elapsed(self):\r\n stop_time = time.time()\r\n elapsed_time = stop_time - self.start_time\r\n print(f\"-- time elapsed: {elapsed_time:.5f} s\", flush=True)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n df['Trip Duration'] = df['Trip Duration'].astype(np.float64) # Convert data type from int64 to float64 to avoid TypeError using datetime.timedelta\n total_travel_time = str(datetime.timedelta(seconds=df['Trip Duration'].sum()))\n print('\\nThe total travel time is {}.'.format(total_travel_time))\n\n avg_travel_time = str(datetime.timedelta(seconds=(df['Trip Duration'].sum()/df['Trip Duration'].count())))\n print('\\nThe average travel time is {}.'.format(avg_travel_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def total_tr(self):\r\n return sum(map(lambda x: self.times[x]['tr'], self.times))", "def calc_total_wait(self, current_time_step):\n self.total_wait = current_time_step - self.time_entered\n return self.total_wait", "def pc_work_time_total(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_work_time_total(self)", "def preparation_time_in_minutes(number_of_layers):\n return PREPARATION_TIME * number_of_layers", "def elapsed(timestamp):\n return repoze.timeago.get_elapsed(timestamp)", "def approximateTime(meal):\n RATE = 4.2535969274764765e-05 # seconds per character.\n time = len(meal)**1 * RATE\n return time", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n\n total_travel_time_in_sec = df['Trip Duration'].sum()\n total_travel_time_in_years = total_travel_time_in_sec // (60 * 60 * 24 *365)\n\n modulus1_in_sec = total_travel_time_in_sec - total_travel_time_in_years*(60 * 60 * 24 *365)\n #print(\"modulus1_in_sec:\", modulus1_in_sec)\n total_travel_time_in_months = modulus1_in_sec // (60 * 60 * 24 *31)\n\n modulus2_in_sec = modulus1_in_sec - total_travel_time_in_months*(60 * 60 * 24 *31)\n #print(\"modulus2_in_sec:\", modulus2_in_sec)\n total_travel_time_in_weeks = modulus2_in_sec // (60 * 60 * 24 *7)\n\n modulus3_in_sec = modulus2_in_sec - total_travel_time_in_weeks*(60 * 60 * 24 *7)\n #print(\"modulus3_in_sec:\", modulus3_in_sec)\n total_travel_time_in_days = modulus3_in_sec // (60 * 60 * 24)\n\n modulus4_in_sec = modulus3_in_sec - total_travel_time_in_days*(60 * 60 * 24)\n #print(\"modulus4_in_sec:\", modulus4_in_sec)\n total_travel_time_in_hours = modulus4_in_sec // (60 * 60)\n\n modulus5_in_sec = modulus4_in_sec - total_travel_time_in_hours*(60 * 60)\n #print(\"modulus5_in_sec:\", modulus5_in_sec)\n total_travel_time_in_minutes = modulus5_in_sec // 60\n\n modulus6_in_sec = modulus5_in_sec - total_travel_time_in_minutes*60\n #print(\"modulus6_in_sec:\", modulus6_in_sec)\n total_travel_time_in_seconds_modulus = modulus6_in_sec\n\n print(\"total travel time of all Users combined:\\n YEARS: {} \\n MONTHS: {} \\n WEEKS: {} \\n DAYS: {} \\n HOURS: {} \\n MINUTES: {} \\n SECONDS: {} \\n\".format(total_travel_time_in_years, total_travel_time_in_months, total_travel_time_in_weeks, total_travel_time_in_days, total_travel_time_in_hours, total_travel_time_in_minutes, total_travel_time_in_seconds_modulus))\n\n # TO DO: display mean travel time\n\n mean_travel_time_in_sec = df['Trip Duration'].mean()\n mean_travel_time_in_minutes = mean_travel_time_in_sec // 60\n modulus_in_sec = mean_travel_time_in_sec - mean_travel_time_in_minutes*60\n mean_travel_time_in_seconds_modulus = modulus_in_sec\n\n print(\"mean travel time:\\n MINUTES: {} \\n SECONDS: {} \\n\".format(int(mean_travel_time_in_minutes), mean_travel_time_in_seconds_modulus))\n\n#trip_duration_stats(pd.read_csv('{}.csv'.format(city)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)" ]
[ "0.75032663", "0.7467269", "0.74404085", "0.73925406", "0.64541507", "0.6305446", "0.6257169", "0.6257169", "0.6257169", "0.6000466", "0.5808721", "0.5773526", "0.5667682", "0.5637501", "0.5635833", "0.55388767", "0.552437", "0.5441152", "0.5426811", "0.53834176", "0.53707606", "0.5337911", "0.5314318", "0.530219", "0.52953476", "0.52905047", "0.52817667", "0.527911", "0.527379", "0.52000356", "0.5198503", "0.5194199", "0.5193878", "0.51915073", "0.51908123", "0.51746774", "0.51578265", "0.5157116", "0.5151563", "0.51334924", "0.51278377", "0.5126822", "0.51213235", "0.5114823", "0.5103056", "0.50824714", "0.5081086", "0.5076478", "0.50748205", "0.5074523", "0.5068644", "0.50684005", "0.50613564", "0.50549656", "0.5052413", "0.5052062", "0.50433123", "0.50431603", "0.5041662", "0.5041623", "0.5040728", "0.50396734", "0.50308996", "0.5030544", "0.5029139", "0.50090826", "0.5000911", "0.500006", "0.49645314", "0.4963677", "0.4963416", "0.49615186", "0.49574873", "0.49546266", "0.4947071", "0.49375826", "0.4937396", "0.49350786", "0.49320284", "0.4931225", "0.49230754", "0.4920203", "0.49174625", "0.49160606", "0.4896791", "0.48875904", "0.48742023", "0.4866519", "0.48638263", "0.4859782", "0.48569462", "0.48530006", "0.485124", "0.4848099", "0.48468563", "0.4845413", "0.48432165", "0.48410088", "0.48364675", "0.48319897" ]
0.7721855
0
get address by id
def get_address_by_id(address_id: int): try: address = address_service.get_address_by_id(address_id) current_app.logger.info("Return data for address_id: {}".format(address_id)) return jsonify({ "data": { "address": address }}), 200 except SQLCustomError as error: current_app.logger.error("Return error for address_id: {}".format(address_id)) return jsonify({ "errors": { "error": error.__dict__ } }), 400
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bank_address_by_id(bank_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from bank where name = '{}';\".format(bank_id)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def get_supplier_address_by_id(supplier_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from supplier where name = '{}';\".format(supplier_id)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def get_party_address_by_id(party_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from party where name = '{}';\".format(party_id)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def get_transport_address_by_id(transport_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from transport where name = '{}';\".format(transport_id)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def get_addressbook_info(self, id):\n logger.info(\"Function call: get_addressbook_info: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}'.format(id)))", "def get_location_by_id(self, location_id):", "def get_order_address(self, order_id, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), order_id), params={**kwargs})", "def getAddress(user):", "def users_byUserId_address_byAddressId_get(self, addressId, userId, headers=None, query_params=None):\n uri = self.url + \"/users/\"+userId+\"/address/\"+addressId\n uri = uri + build_query_string(query_params)\n return requests.get(uri, headers=headers)", "def get_place_by_id(id):\n rv = query_db('select * from places where place_id = ?',\n [id])\n return rv[0] if rv else None", "def get(self, id):\n return Contacts().get_one(id)", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def address(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n return rel.data[rel.app].get(\"address\")", "def findLocationById(cls, id):\r\n return cls.query.filter_by(id=id).first()", "def get_address(saved_home_id):\n\n sql = \"SELECT address FROM saved_homes WHERE saved_home_id= :saved_home_id\"\n\n cursor = db.session.execute(sql,{\"saved_home_id\": saved_home_id})\n\n old_address = cursor.fetchone()\n \n address = \" \".join(old_address)\n\n return address", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def address(self, name):\n return self.query(name).response.answer[0].items[0].address", "def get(self, id):\n return {'id': id}", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def getAddressAtIndex(self, index: int) -> ghidra.program.model.address.Address:\n ...", "def get(self, _id):", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get_by_id(cls, id):\n return cls.query().get(id)", "def getAddress(self) -> int:\n ...", "def get_model(self):\n return get_object_or_404(UserAddress, user=self.request.user, pk=self.kwargs['address_id'])", "def do_getaddress(self,args):\n ppdict(bitstamp.get_depositaddress())", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(self, id):\n return self.__model__.query.get(id)", "def address_details(self) -> 'outputs.AddressDetailsResponse':\n return pulumi.get(self, \"address_details\")", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def get(self, id):\n return Entry.query.filter(Entry.id == id).one()", "def test_get_shipping_address(self):\n self.cim.get_shipping_address(\n customer_profile_id=u\"900\",\n customer_address_id=u\"344\"\n )", "def search_address(query: str) -> Tuple[int, str]:\n\n url = 'https://api.n1.ru/api/v1/geo/geocoder/with_cities/'\n params = _search_params.copy()\n params['q'] = query\n\n try:\n r = requests.get(url, params=params, headers=_headers)\n response = r.json()\n\n if not 'result' in response or not response['result']:\n raise NotFoundException('Result not found or empty.')\n \n address = None\n house_number = query.split(',')[-1].strip()\n for x in response['result']:\n if x['name_ru'].lower() == house_number:\n address = x\n break\n \n if address is None:\n raise NotFoundException(f'Not found house number {house_number} in result: {response[\"result\"]}')\n \n return address['street']['id'], address['name_ru']\n except requests.RequestException as e:\n raise ParserException(f'Fail make request. query: {query}') from e\n except NotFoundException as e:\n raise ParserException('Invalid result.') from e\n except (KeyError, IndexError) as e:\n raise ParserException(f'Fail get street id or house number. value: {response[\"result\"]}') from e", "def find(self, id):\n response = self._connection.session.get(self.url + \"/%s\" % id)\n return self._raise_or_return_json(response)", "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "def find(self, text: unicode) -> ghidra.program.model.address.Address:\n ...", "def read_addresses(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT a.addressid, a.personid, a.addressline1, a.addressline2, a.pobox, a.city, a.state, \"\n \"a.zipcode, a.zip4, a.postalcode, a.status, a.typeid, a.sequenceno, c.typecode, c.typedescription \"\n \"FROM Address a \"\n \"JOIN codes c on c.typeid = a.typeid WHERE personid=? ORDER BY a.sequenceno ASC\", (person_id,))\n\n address_list = []\n for row in c:\n _address = Address()\n _address.address_id = row[\"addressid\"]\n _address.person_id = row[\"personid\"]\n _address.address_line_1 = row[\"addressline1\"]\n _address.address_line_2 = row[\"addressline2\"]\n _address.po_box = row[\"pobox\"]\n _address.city = row[\"city\"]\n _address.state = row[\"state\"]\n _address.zip_code = row[\"zipcode\"]\n _address.zip4 = row[\"zip4\"]\n _address.postal_code = row[\"postalcode\"]\n _address.status = row[\"status\"]\n _address.type_id = row[\"typeid\"]\n _address.sequence_number = row[\"sequenceno\"]\n _address.type_code = row[\"typecode\"]\n _address.type_description = row[\"typedescription\"]\n address_list.append(_address)\n conn.close()\n return address_list\n except:\n return []", "def get(self, currency, address):\n check_inputs(address=address, currency=currency) # abort if fails\n addr = commonDAO.get_address(currency, address)\n if addr:\n addr['tags'] = commonDAO.list_address_tags(currency, address)\n return addr\n abort(404, \"Address {} not found in currency {}\".format(address,\n currency))", "def getOneAddress(results):\n return getOnePayload(results).dottedQuad()", "def get_proximity_db_detail(self, context, id):\n zone_obj = self.dns_manager.get_proximity_db_detail(context, id)\n return zone_obj", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def _getcontact(id):\n contact = {}\n idwrapper = {}\n \n try:\n contact[\"name\"] = r.get(\"uid:\" + id + \":name\")\n contact[\"address\"] = r.get(\"uid:\" + id + \":address\")\n contact[\"phone\"] = r.get(\"uid:\" + id + \":phone\")\n contact[\"email\"] = r.get(\"uid:\" + id + \":email\")\n idwrapper[id] = contact\n\n return idwrapper\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def get_object(id):", "async def get_contact(request, id):\n _, contact = find_contact(id)\n if not contact:\n return empty(404)\n\n return json(contact)", "def find(self, id):\n return self._select_one('''\n select\n *\n from\n {table}\n where\n {primary_key} = %s\n '''.format(table=self.__class__._table,\n primary_key=self.__class__._primary_key), [id])", "def find_location_by_id(self, id):\n for location in self.locations:\n if location.id == id:\n yield location", "async def get_contact(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.\n q = \"\"\"select id, name, email, phone, active from contacts where id=%s\"\"\"\n q_args = (id,)\n row = await dbcon.fetch_row(q, q_args)\n contact = None\n if row:\n contact = object_models.Contact(*row)\n return contact", "def get_addressbook_variables(self, id):\n logger.info(\"Function call: get_addressbook_variables_list: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}/variables'.format(id)))", "def get(self,id) -> Contact:\n data=ContactSet.query.get(id)\n if data:\n contact = Contact(data.id,data.name,data.birthdate,data.contact_type,data.description, data.phone)\n return contact\n return None", "def get_restaurant(id):\r\n with current_app.app_context():\r\n if current_app.config[\"USE_MOCKS\"]:\r\n id -= 1 # restaurant IDs starting by 1\r\n if 0 <= id < len(restaurants):\r\n return restaurants[id]\r\n else:\r\n return None\r\n else:\r\n return get_from(current_app.config[\"REST_SERVICE_URL\"]+\"/restaurants/\"+str(id))", "def get_address() -> pd.DataFrame:\n return GETTER.organisationaddress.merge(GETTER.address, on=\"address_id\").drop(\n \"address_id\", 1\n )", "def get_entity_by_id(self, id):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities/{}'.format(self.url, id)\n r = requests.get(url, headers=self.headers_ld)\n return r.json()", "def get_by_id(cls, id):\n return db.session.query(cls).get(id)", "def get_city_details(self, location_id):\n sql = \"SELECT * FROM [location] WHERE [id] = %d\"%(location_id)\n self.cursor.execute(sql)\n row = self.cursor.fetchone()\n city = row['city']\n state = row['region']\n zip_code = row['postal_code']\n provider = row['provider']\n ip_address_int = random.randint(3221225729, 3758096126) # Class C\n #ip_address = socket.inet_ntop(socket.AF_INET6, struct.pack('L', int(socket.htonl(ip_address_int))))\n ip_address = socket.inet_ntoa(hex(ip_address_int)[2:].zfill(8).decode('hex')) \n return [city, state, zip_code, provider, ip_address]", "def get_address(self, ):\n return self.get_parameter('address')", "def get(id=None):\n return requests.get(\"/{}\".format(id))", "def getContactById(self, id):\n for contact in self.contacts:\n if contact.id == id:\n return contact\n if self.profile:\n if self.profile.id == id:\n return self.profile\n\n return None", "def get_emails_from_addressbook(self, id, limit=0, offset=0):\n logger.info(\"Function call: get_emails_from_addressbook: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}/emails'.format(id), 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def get_doctor_detail(id):\n headers = {\"Authorization\": 'Bearer ' + api_key}\n return requests.get(\n f\"{base_url}businesses/{id}\",\n headers=headers).json()", "def get(self, id):\n return Freigabe.find_by_id(id)", "def get_locations(self, id_):\n with self._db_connection() as connection:\n return connection.get_locations(id_)", "def _get_address(self, soup):\n street, city, state, zipcode = None, None, None, None\n try:\n # property detail tag\n street = soup.find('div', class_='main-address').get_text().strip()\n # find address tag\n address = soup.find('div', class_='c-address')\n \n # pattern for the address in this website\n locality = address.find_all('span', class_='locality')\n city = locality[0].get_text().strip()\n if len(locality) > 1:\n city = locality[1].get_text().strip()\n state = address.find('span', class_='region').get_text().strip()\n zipcode = address.find('span', class_='postal-code').get_text().strip()\n return street, city, state, zipcode\n except:\n return street, city, state, zipcode", "def resolve_from_local_lookup_table(self, id: str) -> GeoLocation:\n return self.local_lookup(id)", "def get_email_info_from_one_addressbooks(self, id, email):\n logger.info(\"Function call: get_email_info_from_one_addressbooks from: '{}'\".format(id, ))\n if not id or not email:\n self.__handle_error(\"Empty addressbook id or email\")\n return self.__handle_result(self.__send_request('addressbooks/{}/emails/{}'.format(id, email)))", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def address_to_update(id,con,cur):\n\n psql=f\"\"\"select line1,line2,city,state,postal_code,country from address where extern_client_id='{id}'\"\"\"\n cur.execute(psql)\n record=cur.fetchall()\n \n address={'line1':record[0][0],\n 'line2':record[0][1],\n 'city':record[0][2],\n 'state':record[0][3],\n 'postal_code':record[0][4],\n 'country':record[0][5]}\n return address", "def get_cab_route_by_id(self, id):\n cab_route = self.admin_repository.get_cab_route_by_id(id)\n if cab_route:\n print(\"Cab Number : {}\".format(cab_route[1]))\n print(\"Route Id : {}\".format(cab_route[2]))\n print(\"Stop Name : {}\".format(cab_route[3]))\n print(\"Stop stage : {}\".format(cab_route[4]))\n print(\"Timings : {}\".format(cab_route[5]))\n return cab_route\n else:\n print(\"Invalid Input\")\n return False", "def get(self, cls, id):\n pass", "def lookup(self, dict_id):\n\n return self.ep.get(\"{0}/{1}\".format(self.endpoint, dict_id))", "def get_entry_by_id(model, id):\n print(model, id)\n return db.session.query(model).filter_by(id=id).first()", "def get_person(self, id):\n if self.people is None:\n self.people = self.get_people()\n\n for person in self.people:\n if person['person']['id'] == id:\n return person['person']\n\n return None", "def address(self):\n return self.data.get('address')", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def getReferencedAddress(program: ghidra.program.model.listing.Program, address: ghidra.program.model.address.Address) -> ghidra.program.model.address.Address:\n ...", "def get_address(query):\n address = \"Dis-moi, quel endroit tu cherches ?\"\n data = get_data(query)\n try:\n address_data = data[\"results\"][0][\"formatted_address\"]\n address = (\"Si je ne me trompe pas,\"\n \" l'adresse que tu cherche, c'est ... \" + address_data + \". Sinon\"\n \", dis-moi le nom de lieu exact\")\n except IndexError:\n address = \"Désolé, je n'ai pas compris quel endroit tu cherches ?\"\n finally:\n return address", "def get_book_by_id(id):\n return Book.query.filter_by(id=id).first()", "def get_address(self, address=None):\n return self.__get_addr_grp('address', address)", "def get_record(self, id: uplink.Path):\n pass", "def get(self, _id):\n if not self.root:\n raise RootNotSet\n node = self.id_map.get(_id)\n if not node:\n raise IDNotFound(_id)\n\n link = node.get('link')\n if link:\n link_node = self.id_map.get(_id)\n if not link_node:\n logger.error('link node not found!')\n raise IDNotFound(link_node)\n data = self.get(node['link'])\n data['link'] = data['id']\n data['id'] = link_node['id']\n return data\n\n if node.get('type') == 'group' or node.get('type') == None:\n return self._adapter._get_group(_id)\n elif node.get('type') == 'data':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'json':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'config':\n data = self._adapter._load_data(_id)\n data.pop('name', None)\n return data\n else:\n raise UnsupportedType", "def address1(self, instance):\r\n return instance.user.profile.address1", "def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)", "def get_campaign_cost(self, id):\n logger.info(\"Function call: get_campaign_cost: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}/cost'.format(id)))", "def get_address(self):\n if self.get_entity: # needs an entity to work\n if self.building:\n address = self.get_entity.get_institutional_address()\n address.extend(self.building.get_postal_address())\n return address\n else:\n return self.get_entity.get_address()", "def get_address(address_file):\n if not path.exists(address_file) :\n print(\"file not found :\", address_file)\n return None\n addr_file = open(address_file,'r')\n address = addr_file.readlines()\n return address[0]", "def get_address_revision(transaction_id, address_id) -> dict:\n address_version = version_class(Address)\n address = db.session.query(address_version) \\\n .filter(address_version.transaction_id <= transaction_id) \\\n .filter(address_version.operation_type != 2) \\\n .filter(address_version.id == address_id) \\\n .filter(or_(address_version.end_transaction_id == None, # pylint: disable=singleton-comparison # noqa: E711,E501;\n address_version.end_transaction_id > transaction_id)) \\\n .order_by(address_version.transaction_id).one_or_none()\n return address", "def get_address(self):\n if self.address:\n return self.address", "def get_address_by_name(name, limit):\n request = \"{}/{}?key={}&q={}&type=json&limit={}\".format(config.GEOCODE_URL, config.GEOCODE_SEARCH_PATH, config.GEOCODE_KEY, name, limit)\n response = requests.get(request).json()\n return response", "def test_get_address(self):\r\n note_data = self.tape.dataframe.iloc[0]\r\n note = self.tape._get_note_object(note_data)\r\n eq_(note.get_address(), '8 Brown St, Methuen, MA 01844')", "def get_account_by_id(self, id_):\n return next((account for account in self.accounts\n if account.id == id_), None)", "def hbnb_states_byID_route(id=None):\n from models.state import State\n\n states = storage.all(State).values()\n if id:\n get_state = None\n for state in states:\n if state.id == id:\n get_state = state\n return render_template(\"9-states.html\", desired_state=get_state)\n return render_template(\"8-cities_by_states.html\", all_states=states)", "def get_address_id(self):\n return self.complete_address[-1]", "def get(self, id):\n\n session = Session()\n city = session.query(Cities).get(id)\n if city:\n response = dict(data=city.get_as_dict())\n else:\n return \"City with id={} does not exist!\".format(id), HTTP_NOT_FOUND_CODE\n\n return response, HTTP_OK_CODE", "def get_by_id(id: UUID) -> UrlModel:\n url = UrlModel.query.filter_by(id=id).first_or_404()\n\n return url", "def _get_address(self):\n return self.__address", "def get_order_detail(orderid): \n data = order_obj.get_order_detail(orderid)\n return data", "def get_or_404(self, id):\n return self.__model__.query.get_or_404(id)", "def deposit_address(self):\n response = self.query('deposit_address')\n return response", "def get_subnet_by_id(self, id):\n return self.network.get_subnet(id)" ]
[ "0.7158677", "0.70268", "0.6940104", "0.6777704", "0.67587537", "0.6659815", "0.6643687", "0.65674657", "0.6503904", "0.6471765", "0.64163893", "0.6403165", "0.63863844", "0.6383097", "0.6337203", "0.6270841", "0.6270169", "0.6261926", "0.62596136", "0.62596136", "0.6199845", "0.61945045", "0.6183538", "0.61809504", "0.61071247", "0.61006105", "0.6081085", "0.6037978", "0.6025714", "0.6018573", "0.5990854", "0.59878004", "0.5936429", "0.59128094", "0.5883727", "0.58796173", "0.586421", "0.5860718", "0.584939", "0.58449835", "0.58266157", "0.57840186", "0.5775538", "0.5760975", "0.5753021", "0.5751321", "0.5746672", "0.5745267", "0.57404166", "0.5740002", "0.5725242", "0.57080543", "0.57003975", "0.5696604", "0.56908154", "0.56806356", "0.5679394", "0.56781656", "0.56687886", "0.5658007", "0.563043", "0.5617354", "0.5611981", "0.56052834", "0.5597299", "0.55933756", "0.5589819", "0.5585906", "0.5579415", "0.55651426", "0.5544754", "0.55379957", "0.5535409", "0.5531957", "0.5529791", "0.55289215", "0.55212873", "0.5520435", "0.55115455", "0.550517", "0.5502199", "0.5494601", "0.54912215", "0.5490127", "0.5486435", "0.54810673", "0.5480172", "0.5479573", "0.54717404", "0.54691917", "0.5465283", "0.54534656", "0.5450146", "0.5442677", "0.5437307", "0.5424007", "0.54239106", "0.5416302", "0.54113066", "0.5405667" ]
0.75688535
0
delete address by id
def delete_address(address_id: int): try: current_app.logger.info("delete address : address_id: %s", address_id) return jsonify({ "status": address_service.delete_address_by_id(address_id) }), 200 except SQLCustomError as error: current_app.logger.error("fail to delete address : address_id: %s", address_id) return jsonify({ "errors": { "error": error.__dict__ } }), 400
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, request, *args, **kwargs):\n # validate address id and get object\n instance = self.get_object()\n\n # get last transaction save point id\n sid = transaction.savepoint()\n\n try:\n # soft delete address\n instance.delete_address(request.user)\n except Exception as err:\n logger.error(\"Unexpected error occurred : %s.\", err)\n # roll back transaction if any exception occur while delete address\n transaction.savepoint_rollback(sid)\n return APIResponse({\"message\": err.args[0]}, HTTP_400_BAD_REQUEST)\n\n return APIResponse({\"message\": DELETE_ADDRESS}, HTTP_OK)", "def delete(self, _id):", "def delete_address(self, address: dict) -> None:\n row = self.addresses_list.surface_address_row(address)\n\n row.open_kebab_menu()\n row.kebab_menu.delete_address()\n\n self.deletion_modal.confirm_address_deletion()", "def test_delete_shipping_address(self):\n self.cim.delete_shipping_address(\n customer_profile_id=u\"123\",\n customer_address_id=u\"543\"\n )", "def delete_address(self) -> object:\n self.delete_button.click()\n\n return DeletionModal(self).wait_for_component_to_be_present()", "def delete_location(self, location_id):", "def delete(self, id):\n raise NotImplementedError", "def test_client_address_delete(self):\n pass", "def delete(self,id):\r\n return delete(id=id)", "def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def test_delete_email_address(self):\n email_addr = 'delete@' + self.email_dom\n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n self.assertTrue(addr.delete(email_addr))", "async def test_delete_organization_address(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='DELETE',\n path='/v1/addresses/{address_id}'.format(address_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def delete(self, cls, id):\n pass", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def delete(self, id):\n empleadoeliminar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoeliminar:\n db.session.delete(empleadoeliminar)\n db.session.commit()\n return 201\n api.abort(404)", "def test_destroy_address(self):\n address = ProfileAddressFactory.create(city=\"city_destroyed\")\n \n url = reverse('v1:addresses-detail', args=[1, address.id])\n self.assertEqual(len(ProfileAddress.objects.all()), 1)\n\n # Check Anonymous User should return 403\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n # Profile Owner User\n self.client.credentials(Authorization='Bearer ' + 'regularusertoken')\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n \n self.assertEqual(len(ProfileAddress.objects.all()), 0)", "def delete(self, id):\n self.cursor.execute(\"DELETE FROM Book WHERE Id = ?\", (id,))\n self.connection.commit()", "def delete(self, id):\n response = remove_location(id)\n return response", "def delete(self, id):\n return Contacts().delete_one(id)", "def delete(self, id):\n try:\n deleted_id = self.borrow_repo.remove_one_by_id(id)\n if deleted_id:\n self.write({'id': deleted_id})\n else:\n self.write_not_found(\n 'A request with id {} was not found'.format(id)\n )\n except BumerangError as e:\n self.set_status(500)\n self.finish({'error': str(e)})", "def delete(cls, id):\n raise Exception('Not Implemented Yet')", "def delete(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n if boat.at_sea == False:\n query = Slip.query(Slip.current_boat == boat.id)\n result = query.fetch(limit = 1)\n for match in result:\n match.current_boat = None\n match.arrival_date = None\n match.put()\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\") \n else:\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\")", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def api_delete_by_id(id):\n mail_dao.delete_by_id(int(id))\n return api_all()", "def delete(id):\n db = core.connect()\n # FIXME: What happens to orphaned comments? - David 7/6/09\n del db[id]", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def delete(self, _id):\n self._db[_id].delete()", "def delcontact(id):\n delid = str(id)\n\n try:\n r.srem(\"contacts\", delid, 1)\n\n r.delete(\"uid:\" + delid + \":name\")\n r.delete(\"uid:\" + delid + \":address\")\n r.delete(\"uid:\" + delid + \":phone\")\n r.delete(\"uid:\" + delid + \":email\")\n\n return {}\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def delete_by_id(cls, id):\n\t\tbook = Book.query.get(id)\n\t\tdb.session.delete(book)\n\t\tdb.session.commit()", "def delete(self, mapitem_id: int):\n pass", "def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)", "def delete(self, id):\n try:\n self.gridfs.delete(ObjectId(id))\n except Exception, e:\n print e\n raise e", "def delete(self, id):\n\n query = \"DELETE FROM {} WHERE id = {}\".format(self.table, id)\n\n self.remove(query)\n return True", "def unlink(address):", "def delete():", "def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def delete(id_: int):\n logger.debug('Deleting employee with id %i.', id_)\n try:\n delete_employee = Employee.query.get(id_)\n db.session.delete(delete_employee)\n except Exception as exception:\n logger.error('An error occurred while deleting employee with id %i. '\n 'Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully deleted employee with id %i.', id_)", "def delete(id):\n elementFromDB = Advertisements().get_one_element(id)\n if elementFromDB is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n try:\n elements = Advertisements().delete_element(id)\n result = jsonify(elements)\n result.statut_code = 200\n return result\n except Exception as identifier:\n return abort(500, identifier)", "def delete(self, id):\n r = validate_get(id)\n tareaID = r.tarea.id\n r.destroySelf()\n flash(_(u'El %s fue eliminado permanentemente.') % name)\n raise redirect('../list/%d' % tareaID)", "def delete(self, id=None):\n if id is not None:\n self.where('id', '=', id)\n\n sql = self._grammar.compile_delete(self)\n\n return self._connection.delete(sql, self.get_bindings())", "def delete_by_id(cls, id):\n\t\tauthor = Author.query.get(id)\n\t\tauthor.saved = False\n\t\tdb.session.commit()", "def delete_entry(self, scenario_id):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_id,))", "def delete(self, id: int):\n self._select_interface(self._rc_delete, self._http_delete, id)", "def delete_entry(self, id, **args):\n args.update(id=id)\n return self.fetch(\"/entry/delete\", post_args=args)", "def delete_order(order_id):\n with MY_CONNECTION as connection:\n connection.execute(\"DELETE FROM Orders WHERE id_order=?\", (order_id,))", "def delete_record(self, id_: str) -> None:\n instance = self._get(id_)\n self._delete_from_db(instance)", "def test_ipam_ip_addresses_delete(self):\n pass", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('host', kwargs)", "def delete_user(id):\n pass", "def delete_ip(ip):\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n\n # Deleting single record now\n sql_delete_query = \"\"\"DELETE from Status where ip = ?\"\"\"\n\n cursor.execute(sql_delete_query, [ip])\n sql.commit()\n\n logging.debug(\"Record deleted successfully \")\n\n cursor.close()\n sql.close()", "def deleteOne(id):\n print(inspect.stack()[1][3])\n query = Followup.delete().where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to find the given client'}\n return {'status': \"Delete Succesful\"}", "def del_history_to_address(address):\n \n result = delete_transaction_observation_to_address(address)\n\n # if successfully deleted from observation list, return a plain 200\n if \"error\" in result:\n return make_response(jsonify(build_error(result[\"error\"])), result[\"status\"])\n else:\n return \"\"", "def delete_business(yelp_id, conn):\n return conn.execute(Business.delete().where(Business.c.yelp_id == yelp_id))", "def city_delete_by_id(city_id):\n\n fetched_obj = storage.get(\"City\", str(city_id))\n\n if fetched_obj is None:\n abort(404)\n\n storage.delete(fetched_obj)\n storage.save()\n\n return jsonify({})", "def delete_place(place_id):\n place = storage.get('Place', place_id)\n if place is None:\n abort(404)\n storage.delete(place)\n storage.save()\n return (jsonify({}), 200)", "def delete_place(place_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200", "def delete_by_id(state_id):\n delete_state = storage.get('State', state_id)\n if not delete_state:\n abort(404)\n else:\n delete_state.delete()\n storage.save()\n return jsonify({}), 200", "def delete(self, id):\n return self._call('%s.delete' % self._shopware_model, [int(id)])", "def del_city(city_id):\n for obj in storage.all(City).values():\n if obj.id == city_id:\n obj.delete()\n storage.save()\n return ({}, 200)\n abort(404)", "def delete(thing, id_):\n pass", "def delete_place(place_id):\n place = storage.get(\"Place\", place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({})", "def delete_entry(self, scenario_info):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_info[\"id\"],))", "def delete(self, id):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('delete', url)", "def test_delete_route(self):\n\n delete = {\n 'ip': 'test_ip',\n 'next_hop': 'test_nexthop',\n 'communities': 'test_commu'\n }\n route_id = self.database.add_route(delete)\n self.database.delete_route({'_id': route_id})\n route = self.database.route.find_one({'_id': route_id})\n self.assertEqual(route, None, 'deletion failed')", "def delete_place(place_id):\n place = storage.get(Place, place_id)\n if place is None:\n abort(404)\n storage.delete(place)\n storage.save()\n storage.reload()\n return jsonify({}), 200", "def delete(self):\n query = \"DELETE FROM \" + self.table + \" WHERE \" + self.idfield + \"=%s\"\n dbh = dbstuff.getRW(self.dbh_key)\n try:\n c = dbh.cursor()\n c.execute(query, self.id)\n c.close()\n dbh.commit()\n finally:\n dbstuff.release(dbh,self.dbh_key)", "def delete(self, id=None):\n if id:\n id = str(urllib.unquote(id))\n public_key = PublicKey.get_by_id(long(id))\n if public_key:\n public_key.delete()\n self.response.write({'status' : 'success'})\n return\n else:\n self.abort(404)\n self.abort(400)", "def delete(self, id: str) -> Any:\n\n return self.client.delete(self._url(id))", "def _delete (self):\n self._exec ('delete from table_name where id=%(id)s')", "def del_history_from_address(address):\n \n result = delete_transaction_observation_from_address(address)\n\n # if successfully deleted from observation list, return a plain 200\n if \"error\" in result:\n return make_response(jsonify(build_error(result[\"error\"])), result[\"status\"])\n else:\n return \"\"", "def __Delete(self, url, id = None):\n\n conn = self.__GetConnection()\n if (id != None):\n url += \"/\" + str(id)\n conn.request(\"DELETE\", url, \"\", self.__MakeHeaders(True))\n response = conn.getresponse()\n self.__CheckResponse(response)", "def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )", "def del_states_id(state_id):\n thing = storage.all('State')\n muricanState = \"State.\" + state_id\n state = thing.get(muricanState)\n if state is None:\n abort(404)\n else:\n state.delete()\n storage.save()\n return (jsonify({}), 200)", "def pin_delete(self, pin_id=None, path=None):\n if path == None:\n path = []\n if pin_id != None:\n path.insert(0, pin_id)\n path.insert(0, \"pin\")\n location = '/'.join(path)\n return self.send_delete(location,\n params={})", "def del_city_id(city_id):\n\n if storage.get(\"City\", city_id) is not None:\n obj = storage.get(\"City\", city_id)\n storage.delete(obj)\n storage.save()\n return make_response({}, 200)\n else:\n abort(404)", "def delete_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n storage.delete(data)\n storage.save()\n return jsonify({}), 200", "def delete_station_by_id(pool, id_):\n\n connection = pool.connection()\n try:\n with connection.cursor() as cursor:\n sql_statement = \"DELETE FROM `station` WHERE `id`=%s\"\n row_count = cursor.execute(sql_statement, id_)\n connection.commit()\n if row_count > 0:\n return True\n else:\n logger.info(\"There's no record of station in the database with the station id {}\".format(id_))\n return False\n except Exception as exception:\n connection.rollback()\n error_message = \"Deleting station with id {} failed.\".format(id_)\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def delete(self, id):\t\t\n\t\ttry:\n\t\t\tpost_service.delete(id)\n\t\texcept AssertionError as e:\n\t\t\tpost_space.abort(400, e.args[0], status = \"Could not delete post\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tpost_space.abort(500, e.args[0], status = \"Could not delete post\", statusCode = \"500\")", "def delete(self, object_id):\n libplasma.delete(self.conn, object_id)", "def delete(self, resource, id):\n self.request('/' + resource + '/' + str(id), 'DELETE')\n return True", "def delete_account_id(account_id):\n conn = get_connect()\n conn.execute(\"DELETE from account WHERE accountId = ?\", [account_id])\n conn.commit()\n conn.close()\n return", "def delete(self, **kwargs):\n if not any([i in kwargs for i in ('host', 'address', 'addresses')]):\n raise TypeError('Expected host, address, or addresses.')\n self.dbdel('vuln', kwargs)", "def delete(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n if slip.current_boat != None:\n \"\"\" Tests for a Boat \"docked\" in slip to be deleted. if found, sets the\n Boat entity at_sea property to True and deletes the slip. \"\"\"\n boat_dict = None\n query = Boat.query(Boat.at_sea == False)\n results = query.fetch(limit = MAX_BOATS)\n for match in results:\n if slip.current_boat == match.id:\n match.at_sea = True\n match.put()\n slip.key.delete()\n self.response.write(\"Slip has been deleted!\")\n else:\n slip.key.delete()\n self.response.write(\"Slip has been deleted!\")", "def delete(self, application_id):", "def delete(self, ip): # pylint: disable=invalid-name\n return self.request(\"DELETE\", ip)", "def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)", "def delete(self, id: int):\n\n del self.__clients[id]", "def delete(self, unique_id):\n return request(\n API_LIST.DNS_DELETE.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'id': unique_id\n }\n )", "def delete(id: int) -> dict:\n tarea = obtener_tarea(id)\n __tareas.remove(tarea)\n return tarea", "def delete(id):\n program = Programa.query.get(id)\n if program:\n try:\n db.session.delete(program)\n db.session.commit()\n return \"\", 200\n except Exception as e:\n logging.error(str(e))\n return jsonify({\"message\": str(e)}), 500\n return \"\", 404", "async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n q = \"\"\"delete from contacts where id=%s\"\"\"\n await dbcon.operation(q, (contact_id,))", "def delete_task(id):\n cursor = conn.cursor()\n cursor.execute(\"DELETE from tasks where id = %s;\", (id, ))\n conn.commit()\n print(\"Number of records deleted:\", cursor.rowcount)", "def delete(self, id):\r\n try:\r\n self.valid_args()\r\n inst = db.session.query(self.__class__).get(id)\r\n if inst is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).delete(inst)\r\n db.session.delete(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return '', 204\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='DELETE')" ]
[ "0.75762683", "0.72528064", "0.70932806", "0.7015976", "0.7014734", "0.6979125", "0.68229157", "0.6805178", "0.67385775", "0.67281526", "0.6688489", "0.66123915", "0.6570931", "0.6570451", "0.65646535", "0.6552276", "0.654469", "0.6536282", "0.6487301", "0.6466477", "0.6458435", "0.64548826", "0.6450864", "0.64279366", "0.6407502", "0.639376", "0.63900393", "0.63866895", "0.63820314", "0.6371938", "0.6361713", "0.63225895", "0.6319566", "0.6286608", "0.6277576", "0.62598747", "0.6244825", "0.6240336", "0.62365323", "0.6225047", "0.6224326", "0.6214071", "0.6200807", "0.61991954", "0.61967427", "0.61935353", "0.6190014", "0.6178017", "0.61730593", "0.61697364", "0.61697364", "0.61697364", "0.61697364", "0.61697364", "0.61573476", "0.61562407", "0.615494", "0.61457634", "0.6141468", "0.61334205", "0.6130429", "0.6122657", "0.610773", "0.610202", "0.60978323", "0.6092287", "0.6064027", "0.60592073", "0.6055826", "0.6044141", "0.6022279", "0.6021817", "0.60076964", "0.60075945", "0.6004147", "0.599169", "0.59826285", "0.59783095", "0.5969704", "0.59681857", "0.59668255", "0.5965334", "0.59649795", "0.596417", "0.59617025", "0.595277", "0.59508824", "0.5942369", "0.59342134", "0.5928438", "0.5922799", "0.59027886", "0.59017736", "0.58864856", "0.58840084", "0.58706075", "0.5866794", "0.5859775", "0.58563894", "0.5856037" ]
0.7932358
0
get all addresses list
def get_all_addresses(): try: addresses = address_service.get_all_addresses() current_app.logger.info("get all addresses") return jsonify({ "data": { "count": len(addresses), "addresses": addresses }}), 200 except SQLCustomError as error: current_app.logger.error("fail to get all addresses: %s", error) return jsonify({ "errors": { "error": error.__dict__ } }), 400
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_addrs(self) -> List[Multiaddr]:", "def addresses(self) -> \"List[str]\":\n return self._attrs.get(\"addresses\")", "def addresses(self) -> \"List[str]\":\n return self._attrs.get(\"addresses\")", "def addresses(self) -> \"List[str]\":\n return self._attrs.get(\"addresses\")", "def get_all_addresses(cls, buildfile):\r\n def lookup():\r\n if buildfile in cls._addresses_by_buildfile:\r\n return cls._addresses_by_buildfile[buildfile]\r\n else:\r\n return OrderedSet()\r\n\r\n addresses = lookup()\r\n if addresses:\r\n return addresses\r\n else:\r\n ParseContext(buildfile).parse()\r\n return lookup()", "def get(self):\n\n return self.get_request_handler(request.headers).get_all_addresses()", "def getRestaurantAddresses(restaurants):\n addresslist = []\n for rest in restaurants:\n if 'address' in rest:\n addressstring = str(rest['address']) + ' ' + str(rest['city'])\n addresslist.append(addressstring)\n\n # pprint.pprint(addresslist)\n return addresslist", "def accounts_with_address(self):\n return [account for account in self if account.address]", "def returnDepositAddresses(self):\n pass", "def addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"addresses\")", "def load_addresses():\n with open('addresses.txt') as f:\n return [address.strip() for address in f.readlines()]", "def list_addresses(self, region):\n assert is_valid_region(region), region\n page_token = None\n while True:\n params = {'maxResults': 250}\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api(\n '/regions/%s/addresses' % region, params=params, deadline=120)\n for addr in resp.get('items', []):\n yield addr\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def get_supervisor_addresses(self) -> List[str]:", "def getaddressesbyaccount(self, account):\n return self.proxy.getaddressesbyaccount(account)", "def get_addresses(self, name):\n return map(int, self.party.addresses)", "def addresses(self):\n if 'Ward Matters' in self.topics or 'City Matters' in self.topics:\n stname_pattern = \"(\\S*[a-z]\\S*\\s){1,4}?\"\n sttype_pattern = \"(ave|blvd|cres|ct|dr|hwy|ln|pkwy|pl|plz|rd|row|sq|st|ter|way)\"\n st_pattern = stname_pattern + sttype_pattern\n\n addr_pattern = \"(\\d(\\d|-)*\\s%s)\" %st_pattern\n intersec_pattern = exp = \"((?<=\\sat\\s)%s\\s?and\\s?%s)\" %(st_pattern, st_pattern)\n\n pattern = \"(%s|%s)\" %(addr_pattern, intersec_pattern)\n\n matches = re.findall(pattern, self.description, re.IGNORECASE)\n\n addresses = [m[0] for m in matches]\n return addresses\n\n return []", "def do_addresses(self, args):\n pprint(self.wallet.addresses)", "def __getAddresses(parsed: BeautifulSoup) -> list:\n\n # Addresses container\n address_divs = parsed.find_all('div', class_='mailer')\n\n # Building RegEx for phone number\n # The following RegEx extracts phone numbers in the following formats:\n # 1. (###) ###-####\n # 2. ###-###-####\n # 3. ##########\n phone_number_regex = re.compile(\n r'(\\(\\d{3}\\) \\d{3}-\\d{4}|\\d{3}-\\d{3}-\\d{4}|\\d{10})')\n\n # List for final addresses\n addresses = list()\n\n for address in address_divs:\n # Create dict for address\n address_parsed = dict()\n # Split text by newline\n address_items = address.text.split('\\n')\n # Removing leading and trailing spaces\n address_items = [i.strip() for i in address_items]\n\n # Variable to store street address\n street_address = ''\n\n # Iterate through each line\n for idx, address_item in enumerate(address_items):\n # First line is address type\n if idx == 0:\n address_parsed['type'] = address_item\n continue\n\n # Check if line has phone number\n phone_matches = phone_number_regex.findall(address_item)\n if len(phone_matches) == 1:\n # Stripping non-digit characters from phone number\n phone_number = re.sub('[^0-9]', '', phone_matches[0])\n address_parsed['phone'] = phone_number\n continue\n \n # If no number, add to address line\n street_address += address_item.strip() + ' '\n \n # Adding street address to parsed address\n address_parsed['street_address'] = street_address.strip()\n\n # Adding parsed address to addresses master list\n addresses += [address_parsed]\n\n return addresses", "def getAllInitializedAddressSet(self) -> ghidra.program.model.address.AddressSetView:\n ...", "def getEquateAddresses(self) -> ghidra.program.model.address.AddressIterator:\n ...", "def symbol_table_addresses(self):\n all_address = []\n for node in self.all_nodes[0]:\n all_address.extend(node['addresses'])\n return all_address", "def get_all_addresses(self, addresses=None, filters=None, allocation_ids=None):\r\n params = {}\r\n if addresses:\r\n self.build_list_params(params, addresses, 'PublicIp')\r\n if allocation_ids:\r\n self.build_list_params(params, allocation_ids, 'AllocationId')\r\n if filters:\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeAddresses', params, [('item', Address)], verb='POST')", "def get_all_cell_addresses() -> Tuple[CellAddress, ...]:\n return _all_cell_addresses", "def listreceivedbyaddress(self, minconf=1, includeempty=False):\n return [AddressInfo(**x) for x in\n self.proxy.listreceivedbyaddress(minconf, includeempty)]", "def get_all_locations(self):", "def street_addresses(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"street_addresses\")", "def test_client_addresses_list(self):\n pass", "def scan_addresses(self, root=None):", "async def test_list_entity_addresses(self):\n await test_service.list_entity_addresses(self)", "def get_empty_addresses(self, amt):\n\n empty = []\n for group in self.rpc.listaddressgroupings():\n for i in group:\n if i[1] == 0:\n empty.append(i[0])\n if len(empty) >= amt:\n return empty\n\n while len(empty) < amt:\n empty.append(self.rpc.getnewaddress())\n\n return empty", "def build_addresses(self):\n \n from ambry.geo.geocoders import DstkGeocoder\n\n facilities = self.partitions.find(table='facilities')\n\n def address_gen():\n for row in facilities.query(\"SELECT * FROM facilities\"):\n address = \"{}, {}, {} {}\".format(row['dba_address1'], row['dba_city'], 'CA', row['dba_zip_code'])\n yield (address, row)\n\n dstk_service = self.config.service('dstk')\n \n dstk_gc = DstkGeocoder(dstk_service, address_gen())\n \n p = self.partitions.find_or_new(table='facilities_addresses')\n p.clean()\n \n lr = self.init_log_rate(500)\n \n with p.inserter() as ins:\n for i, (k, r, inp_row) in enumerate(dstk_gc.geocode()):\n lr(\"Addresses \"+str(i))\n r['facilities_id'] = inp_row['id']\n ins.insert(r)", "def get_text_data_child_list(self):\n return self.address_list + self.urls", "def _address_fields(self):\n return list(ADDRESS_FIELDS)", "def _address_fields(self):\n return list(ADDRESS_FIELDS)", "def list_ip_addresses(data):\n ip_list = [item[0] for item in data]\n sorted_set = sorted(set(ip_list))\n addr_list = [ip for ip in sorted_set]\n return addr_list", "def get_worker_addresses(self) -> List[str]:", "def ListAddresses(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def get(self, **attrs) -> list:\n return await self._do_request(\"get\", get_address, self._user_auth, params=attrs)", "def getEquateAddresses(self, asv: ghidra.program.model.address.AddressSetView) -> ghidra.program.model.address.AddressIterator:\n ...", "def test_list_address(self):\n\n data = [\n dict(\n id=self.address.id,\n address_line1='random address 1',\n address_line2='',\n postal_code='RAN DOM',\n city='random city',\n state_province=dict(\n iso_code=self.random_state_province.iso_code,\n name=self.random_state_province.name,\n ),\n country=dict(\n iso_code=self.random_country.iso_code,\n name=self.random_country.name,\n ),\n ),\n ]\n\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('location:addresses'))\n\n self.assertEqual(json.loads(response.content)['results'], data)\n self.assertEqual(json.loads(response.content)['count'], 1)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def show_address_balances(self):\n\t\tret = []\n\t\tfor acct in self.wallet:\n\t\t\tutxos = get_unspent(acct[\"address\"], self.testnet)\n\t\t\tif len(utxos) != 0:\n\t\t\t\tbalance = sum(i['value'] for i in utxos)\n\t\t\t\tret.append(f\"Address {acct['address']} BTC: {str(balance/100000000.0)}\")\n\t\treturn ret", "def get_addresses_in_use(self, network_id):\n addresses = []\n for server in self.get_servers():\n if 'addresses' in server:\n addrs = server['addresses']\n for network_name in addrs.keys():\n for ip in addrs[network_name]:\n addresses.append(ip['addr'])\n return addresses", "def get_list_of_addressbooks(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_addressbooks\")\n return self.__handle_result(self.__send_request('addressbooks', 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def test_all_addresses_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info_1'}),\n '10.0.0.2': Mock(**{'serial.return_value': 'address_info_2'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertItemsEqual(['address_info_1', 'address_info_2'],\n rpc.get_all_addresses_info())", "def get_input_domains():\n df = pandas.read_excel(\"AutoScrapy/files/EBE21 - Top 100 Onlineshops to scrapp.ods\", engine=\"odf\")\n list_of_addresses = df['Domain'].to_list()\n list_of_addresses = [(\"http://\" + address) for address in list_of_addresses]\n print(list_of_addresses)\n return list_of_addresses", "def _get_all_eip_addresses(\n addresses=None, allocation_ids=None, region=None, key=None, keyid=None, profile=None\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.get_all_addresses(\n addresses=addresses, allocation_ids=allocation_ids\n )\n except boto.exception.BotoServerError as e:\n log.error(e)\n return []", "def get_addresses_by_account(account):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"getaddressesbyaccount\", account])\n addresses = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return addresses", "def address(self):\n addrlist = []\n for s in self.srv_socks:\n addrlist.append(s.getsockname())\n return addrlist", "def _get_ip_addresses(ip_addresses):\n ret = []\n for item in ip_addresses:\n ret.append(item)\n\n return ret", "def get_messages(self):\n return self.addresses", "def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts", "def __call__(self,\n skip: int = None,\n take: int = None,\n **kwargs) -> List[AddressBookEntryModel]:\n request_model = GetRequest(skip=skip, take=take)\n data = self.get(request_model, **kwargs)\n addressbook = [\n AddressBookEntryModel(\n address=Address(address=item['address'], network=self._network), label=item['label']\n ) for item in data['addresses']]\n return addressbook", "def get_citation_child_list(self):\n return self.address_list", "def addresses(self):\n addresses = collections.OrderedDict()\n for key in self.keyring:\n address = pubtoaddr(key[\"public\"])\n addresses[address] = key\n return addresses", "def get_queryset(self):\n return UserAddress._default_manager.filter(user=self.request.user)", "def getAddress(user):", "def address_tags(self):\n return self._address_tags", "def get_all_addresses(limit, paginated=False, cursor_key=None):\n\n if paginated and cursor_key:\n cursor = ndb.Cursor(urlsafe=cursor_key)\n else:\n cursor = ndb.Cursor()\n\n results, next_cursor, is_more = AddressEntry.query().fetch_page(limit, start_cursor=cursor)\n\n jdict = {\n \"entries\" : [entry.to_json_dict() for entry in results],\n \"is_more\" : is_more\n }\n\n if paginated:\n jdict[\"next\"] = cursor.urlsafe()\n\n return jdict", "def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2", "def address_generator():\n balances = _indexer_client().asset_balances(ASSET_ID)\n while balances.get(\"balances\"):\n for item in balances.get(\"balances\"):\n if check_address(item.get(\"address\")):\n yield item.get(\"address\")\n next_token = balances.get(\"next-token\")\n balances = _indexer_client().asset_balances(ASSET_ID, next_page=next_token)", "def get_address_iter(cls, prov_list=None, start_from=''):\n if prov_list is None:\n prov_list = []\n\n provinces = cls.get_provinces().consulta_provinciero.provinciero.prov\n if provinces == DotMap():\n logger.error(\"No provinces available right now (Service is down?)\")\n yield None\n\n for province in provinces:\n prov_name = province.np\n prov_num = province.cpine\n if prov_name == DotMap() or prov_num == DotMap():\n continue\n\n if len(prov_list) > 0 and prov_name not in prov_list:\n continue\n\n cities = cls.get_cities(prov_name).consulta_municipiero.municipiero.muni\n if cities == DotMap():\n logger.error(\"No cities available right now (Service is down?)\")\n return\n\n for city in cities:\n city_name = city.nm\n city_num = city.locat.cmc\n\n if city_name == DotMap() or city_num == DotMap():\n continue\n\n if start_from != '' and city_name != start_from:\n logger.debug(\"Skipping {}\".format(city_name))\n continue\n\n addresses = cls.get_addresses(prov_name, city_name).consulta_callejero.callejero.calle\n if addresses == DotMap():\n logger.error(\"No addresses available right now (Service is down?)\")\n return\n\n for address in addresses:\n\n address_dir = address.dir\n tv = address_dir.tv\n nv = address_dir.nv\n\n if tv == DotMap() or nv == DotMap():\n continue\n else:\n yield (prov_name, prov_num, city_name, city_num, address_dir, tv, nv)", "def scan_addresses(root_dir, base_path=None):\r\n\r\n addresses = OrderedSet()\r\n for buildfile in BuildFile.scan_buildfiles(root_dir, base_path):\r\n addresses.update(Target.get_all_addresses(buildfile))\r\n return addresses", "def get_all_locations():\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))", "def get_ipaddresses(auth):\n url_ipaddresses = \"http://\" + auth.ipaddr + \"/rest/\"+auth.version+\"/ipaddresses\"\n try:\n r = requests.get(url_ipaddresses, headers = auth.cookie)\n ipaddresses = json.loads(r.text)['ip_address_subnet_element']\n return ipaddresses\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + \" get_ipaddresses: An Error has occured\"", "def getReferencesFrom(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def address_txs(self):\n return self._address_txs", "def addresses( data ) :\n return list( set(chain.from_iterable( [ re.sub(r'\\[.*?\\]\\s+','',x['C1']).split('; ') for x in data ] )))", "def set_addresses(self, pardus_profile):\n\n addresses = []\n if self.method == \"manual\":\n net_mask = self.calculate_prefix(pardus_profile.get_net_mask())\n addresses.append(str(pardus_profile.get_net_address()))\n addresses.append(str(net_mask))\n addresses.append(str(pardus_profile.get_net_gateway()))\n addresses = \";\".join(addresses)\n addresses = addresses + \";\" # Make sure addresses end with ';'\n return addresses\n else:\n return \"none\"", "def getEquateAddresses(self, start: ghidra.program.model.address.Address) -> ghidra.program.model.address.AddressIterator:\n ...", "def get_all_places(self):\n self.cursor.execute(\"select * from places\")\n self.connection.commit()\n return self.cursor.fetchall()", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def addresses(self, addresses: \"List[str]\"):\n self._attrs[\"addresses\"] = addresses", "def street_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"street_addresses\")", "def street_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"street_addresses\")", "def test_ipam_ip_addresses_list(self):\n pass", "def test_addresses_list_for_user_one(self):\n\n card_holder_address_model = FundingSources.get_card_holder_address_model()\n\n card_holder_address_model[\"user_token\"] = self.user.token\n\n self.client.funding_sources.addresses.create(card_holder_address_model)\n\n self.create_card(self.create_card_product(), self.user)\n\n addresses = self.client.funding_sources.addresses.list_for_user(\n self.user.token)\n\n self.assertEqual(len(addresses), 1,\n 'Unexpected number of addresses retrieved')\n\n verify_card_holder_address_response(\n self, addresses[0], card_holder_address_model)\n\n with self.subTest('Address defined is not the default'):\n self.assertTrue(addresses[0].is_default_address)", "def getReferencesTo(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def build_sites_list():\n ns_getsites_filter = '''\n <nc:filter type=\"xpath\"\n xmlns:nc=\"urn:ietf:params:xml:ns:netconf:base:1.0\"\n xmlns:rm=\"http://cisco.com/ns/yang/Cisco-IOS-XE-route-map\"\n select=\"/native/route-map[substring(name, 1, 3)='To_']/name\"\n />\n '''\n m = manager.connect( host='10.112.83.100',\n port=830,\n username='cisco',\n password='cisco',\n hostkey_verify=False)\n answer = m.get_config(source='running', filter=ns_getsites_filter).data_xml\n c = xmltodict.parse (answer)\n # build the list\n liste_sites = [ r['name'][3:] for r in c['data']['native']['route-map'] ]\n return liste_sites", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def get_queryset(self):\n qs = super(IPAddressManager, self).get_queryset()\n return qs.annotate(host=RawSQL('INET(HOST(ipam_ipaddress.address))', [])).order_by('family', 'host')", "def get(self, currency, address):\n check_inputs(address=address, currency=currency) # abort if fails\n address_tags = commonDAO.list_address_tags(currency, address)\n return address_tags # can be empty list", "def get_address(self):\n return self.address.line[0]+\", \"+self.address.city+\", \"+self.address.state+\", \"+self.address.country", "def get_function_addresses(self):\n pass", "def readHouseAddresses():\n addressesRead = []\n with open(\"Files/HouseAddresses.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n details = line.split(\",\")\n address = []\n for detail in details:\n address.append(detail.rstrip('\\n').rstrip().lstrip())\n addressesRead.append(address)\n f.close()\n return addressesRead", "def _get_IP_addresses(hostname):\n try:\n answers, auth, addit = yield DNSclient.lookupAddress(hostname)\n except Exception as exc: # Too many different DNS failures to catch...\n log.exception('DNS Resolution failure: %r for name: %r', exc, hostname)\n returnValue([])\n\n returnValue(\n [answer.payload.dottedQuad()\n for answer in answers if answer.type == dns.A])", "def get_all_namenode_addresses(hdfs_site):\n nn_addresses = []\n name_services = get_nameservices(hdfs_site)\n if not name_services:\n name_services = [None] #fall back to config handling without name services\n for ns in name_services:\n nn_addresses += _get_all_namenode_addresses_single_ns(hdfs_site, ns)\n return nn_addresses", "def get_addrs(self):\n # TODO check if server is listening\n return self.multiaddrs", "def getPCAdress(self) -> ghidra.program.model.address.Address:\n ...", "def getStreets (self) :\n if self.buffer == None :\n self.buffer = self.fetch ()\n return self.buffer.intersec5", "def get_clients(self):\n clis = []\n for c in self._clients:\n clis.append(c.get_address())\n return clis", "def get_clients(self):\n clis = []\n for c in self._clients:\n clis.append(c.get_address())\n return clis", "def _set_search_addresses(self):\n if self._report_data and self._report_data['details']:\n for detail in self._report_data['details']:\n if detail.get('ownerGroups'):\n for group in detail['ownerGroups']:\n for owner in group['owners']:\n Report._format_address(owner['address'])\n if detail.get('location') and 'address' in detail['location']:\n Report._format_address(detail['location']['address'])\n if detail.get('notes'):\n for note in detail['notes']:\n if note.get('contactAddress'):\n Report._format_address(note['contactAddress'])\n elif note.get('givingNoticeParty') and note['givingNoticeParty'].get('address'):\n Report._format_address(note['givingNoticeParty']['address'])", "def get_address() -> pd.DataFrame:\n return GETTER.organisationaddress.merge(GETTER.address, on=\"address_id\").drop(\n \"address_id\", 1\n )", "def get_existing_values(self): #DONE\n return (value.value for value in self.address.values() if value.value)", "def listToAddr(location):\n\n start_time = time.time()\n wk = [key for key in location.keys() if key in ('street', 'house_num', 'suburb', 'city', 'province', 'country', 'pos_code')]\n address = re.sub(',', '', ', '.join(value for value in dict(zip(wk, [location[k] for k in wk])).values() if value), 1)\n print('--- Tiempo de ejecucion listToAddr: {} segundos ---'.format((time.time() - start_time)))\n return address", "def _formatting_address_fields(self):\n return self._address_fields()", "def get_ip(self):\n json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')\n json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)\n self.iplist = IpList()\n for ip in json_obj['Value']:\n r = Ip()\n r.ip_addr = ip['Value']\n r.resid = ip['ResourceId']\n r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None\n self.iplist.append(r)", "def GetGlobalDNSAddresses(self):\n return (misc.noneToString(self.dns1), misc.noneToString(self.dns2),\n misc.noneToString(self.dns3))", "def extract(self):\n print(\"Extracting addresses from google form spreadsheet\")\n r = requests.get(self.config_dict.get('remote_url'))\n r.encoding = \"utf-8\"\n data = r.text\n with open(f\"{self.config_dict.get('proj_dir')}addresses.csv\", \"w\") as output_file:\n output_file.write(data)" ]
[ "0.7885754", "0.7816518", "0.7816518", "0.7816518", "0.7325519", "0.7184814", "0.71506286", "0.70696783", "0.705442", "0.70192426", "0.7001219", "0.6982373", "0.6977537", "0.69752955", "0.69717383", "0.6926956", "0.69209373", "0.69204617", "0.68843", "0.68579096", "0.68555427", "0.6801689", "0.6677202", "0.6675852", "0.66591674", "0.6653722", "0.66429645", "0.6631208", "0.6596156", "0.6517325", "0.65061635", "0.6506095", "0.6501456", "0.6501456", "0.6500362", "0.6474397", "0.6457528", "0.6443681", "0.64425874", "0.6406829", "0.6390983", "0.63887167", "0.63763905", "0.6375652", "0.63522285", "0.63225573", "0.63220024", "0.6296325", "0.6296325", "0.6287821", "0.6270241", "0.6252215", "0.620488", "0.61952823", "0.6192587", "0.6188134", "0.61871016", "0.6183024", "0.61759716", "0.61727405", "0.61661375", "0.61496395", "0.6112097", "0.61033267", "0.61018974", "0.6096429", "0.6085231", "0.60741884", "0.6070075", "0.60608375", "0.6058423", "0.6058423", "0.6058423", "0.6048881", "0.6048881", "0.6037661", "0.6025866", "0.60131836", "0.60053825", "0.5983791", "0.59756035", "0.59747595", "0.59690225", "0.59659505", "0.5960605", "0.595927", "0.59565526", "0.59524035", "0.5938675", "0.59375954", "0.58961385", "0.58961385", "0.5892755", "0.58863133", "0.5885377", "0.58667034", "0.58619136", "0.58604836", "0.58574563", "0.5844718" ]
0.7560295
4
Get the XML representation as `ElementTree` object.
def get_xml(self): profile = self.profile version = self.version #self.attribs['xmlns'] = "http://www.w3.org/2000/svg" self.attribs['xmlns:xlink'] = "http://www.w3.org/1999/xlink" self.attribs['xmlns:ev'] = "http://www.w3.org/2001/xml-events" self.attribs['baseProfile'] = profile self.attribs['version'] = version return super(Drawing, self).get_xml()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()", "def get_xml(self):\n return etree.tostring(self.get_etree())", "def get_tree():\n root = ET.fromstring(xmlstring)\n return ET.ElementTree(root)", "def xml(self):\n rough_string = ElementTree.tostring(self.dom, \"utf-8\")\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "def xml(self):\n return lxml.etree.fromstring(self.content, _XML_PARSER)", "def getXmlEtree(xml):\n\n parser = XMLParser(remove_blank_text=True)\n if xml.startswith('<?xml') or xml.startswith('<'):\n return (parse(StringIO(xml), parser).getroot(),\n getNamespacePrefixDict(xml))\n else:\n if os.path.isfile(xml): xmlStr = open(xml).read()\n else: xmlStr = urlopen(xml).read()\n return (parse(StringIO(xmlStr), parser).getroot(),\n getNamespacePrefixDict(xmlStr))", "def xml(self):\n return self._xml", "def xml(self):\n return self._xml", "def _tree_to_xml(self, tree):\n\n body = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n\n return body + self._element_to_xml(tree)", "def _tree_to_xml(self, tree):\n\n body = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n\n return body + self._element_to_xml(tree)", "def xml_tostring(tree):\n if six.PY2:\n return ET.tostring(tree)\n else:\n return ET.tostring(tree, encoding='unicode')", "def toxml(self) :\n\t\treturn self.doc.toxml()", "def to_xml(self):\n return xmltodict.unparse(\n self.to_xml_dict(),\n pretty=True\n ).encode(\"utf-8\")", "def toXMLElement(self):\n # NOTE Subclasses should call Component.toXMLElement(self) to obtain\n # base node and then add further attributes and sub-elements\n return ET.Element(self.__class__.__name__)", "def to_xml(self):\r\n element = ET.Element(\"node\")\r\n\r\n element.attrib['name'] = self.name\r\n element.attrib['description'] = self.description\r\n\r\n return element", "def xml(self):\n return parse_xml(self, tab=\"\\t\", id=self.id or \"\")", "def get_xml(self):\n\t\t# get the XML description of the VM\n\t\tvm_xml = self.clonezilla_vm_obj.XMLDesc(0)\n\t\troot = ET.fromstring(vm_xml)\n\t\treturn root", "def get_etree(xml):\n\n parser = XMLParser(remove_blank_text=True)\n if xml.startswith('<?xml') or xml.startswith('<'):\n return (parse(StringIO(xml), parser).getroot(),\n get_ns_dict(xml))\n else:\n if os.path.isfile(xml): xml_str = open(xml).read()\n else: xml_str = urlopen(xml).read()\n return (parse(StringIO(xml_str), parser).getroot(),\n get_ns_dict(xml_str))", "def get_etree(self):\n if self._etree:\n return self._etree\n from richtimes.scripts.shell import get_etree\n self._etree = get_etree(self.filename)\n return self._etree", "def getFullTree(self):\n parsedStr = xml.dom.minidom.parseString(\n ET.tostring(self._tree.getroot())\n )\n return '\\n'.join([line.rstrip()\n for line in parsedStr.toprettyxml().splitlines()\n if line.strip()])", "def xml(self):\n raise NotImplementedError('This api does not return xml')", "def saveToXml(self) -> org.jdom.Element:\n ...", "def get_xml_parser(encoding=None):\n parser = etree.ETCompatXMLParser(\n huge_tree=True,\n remove_comments=True,\n strip_cdata=False,\n remove_blank_text=True,\n resolve_entities=False,\n encoding=encoding\n )\n\n return parser", "def get_etree(self):\n if self._etree:\n return self._etree\n root = self.issue.get_etree()\n self._etree = root.xpath(self.xpath)[0]\n return self._etree", "def xml_string(self):\n if self._xml_string is not None:\n return self._xml_string\n\n return etree.tostring(self._xml_node)", "def xml_string(self):\r\n if self._xml_string is not None:\r\n return self._xml_string\r\n\r\n return etree.tostring(self._xml_node)", "def toXMLNode(self):\n return _libsbml.SBase_toXMLNode(self)", "def importETree():\r\n etree_in_c = None\r\n try: # Is it Python 2.5+ with C implemenation of ElementTree installed?\r\n import xml.etree.cElementTree as etree_in_c\r\n from xml.etree.ElementTree import Comment\r\n except ImportError:\r\n try: # Is it Python 2.5+ with Python implementation of ElementTree?\r\n import xml.etree.ElementTree as etree\r\n except ImportError:\r\n try: # An earlier version of Python with cElementTree installed?\r\n import cElementTree as etree_in_c\r\n from elementtree.ElementTree import Comment\r\n except ImportError:\r\n try: # An earlier version of Python with Python ElementTree?\r\n import elementtree.ElementTree as etree\r\n except ImportError:\r\n raise ImportError(\"Failed to import ElementTree\")\r\n if etree_in_c: \r\n if etree_in_c.VERSION < \"1.0.5\":\r\n raise RuntimeError(\"cElementTree version 1.0.5 or higher is required.\")\r\n # Third party serializers (including ours) test with non-c Comment\r\n etree_in_c.test_comment = Comment\r\n return etree_in_c\r\n elif etree.VERSION < \"1.1\":\r\n raise RuntimeError(\"ElementTree version 1.1 or higher is required\")\r\n else:\r\n return etree", "def get_xml(xml_file_path: str):\n root = et.parse(xml_file_path).getroot()\n\n return root", "def new_xml(self, root_name):\n\n self.tree = ET.ElementTree(ET.fromstring('<?xml version=\"1.0\" encoding=\"UTF-8\"?><%s></%s>'%(\n root_name, root_name)))\n return self.tree.getroot()", "def convert_etree(tree):\n return objectify.fromstring(etree.tostring(tree))", "def file_to_xml(cls, file_object):\r\n return etree.parse(file_object, parser=edx_xml_parser).getroot()", "def getXMLTree(xmlString, debug=False):\n\tfrom lxml import etree as lxtree\n\ttree = lxtree.fromstring(_nukeNamespaces(xmlString))\n\n\tif debug:\n\t\tlxtree.dump(tree)\n\treturn tree", "def prettify(self):\n rough_string = ET.tostring(self.root, encoding='utf-8', method='xml').decode('utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\")", "def stringToXML(self, unparsedXML):\n xmlRoot = ET.fromstring(unparsedXML)\n self.xmlRoot = xmlRoot\n return xmlRoot", "def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder", "def __str__(self):\n result = xml.dom.minidom.parseString(\n xml.etree.ElementTree.tostring(\n self.ToXMLElement(), encoding='utf-8')).toprettyxml(indent=' ')\n\n return result", "def serialize(self):\n return self.xmlnode.serialize(encoding=\"utf-8\")", "def test_simple_XML(self):\n\n e = ET.XML('''\n <root>\n <e />\n <e>text</e>\n <e name=\"value\" />\n <e name=\"value\">text</e>\n <e> <a>text</a> <b>text</b> </e>\n <e> <a>text</a> <a>text</a> </e>\n <e> text <a>text</a> </e>\n </root>\n ''')\n\n d = etree_to_dict(e)\n\n if SHOW_RESULTS:\n pprint(d)\n\n e = dict_to_xml_str(d)\n\n if SHOW_RESULTS:\n print(e)\n print(prettify(e))", "def get_xml(self):\n xml = svgwrite.etree.etree.Element(self.elementname)\n if self.debug:\n self.validator.check_all_svg_attribute_values(self.elementname, self.attribs)\n for attribute, value in self.attribs.items():\n # filter 'None' values\n if value is not None:\n value = self.value_to_string(value)\n if value: # just add not empty attributes\n xml.set(attribute, value)\n \n for element in self.elements:\n xml.append(element)\n return xml", "def read_xml(file_dir):\n tree = ElementTree()\n tree.parse(file_dir)\n return tree", "def unpack_xml(text) -> ET.ElementTree:\n etree: ET.ElementTree = ET.parse(io.StringIO(text))\n _check_for_errors(etree)\n return etree", "def getXmlTree(url):\n return lxml.etree.parse(url)", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def to_etree(self, data, options=None, name=None, depth=0):\r\n if isinstance(data, (list, tuple)):\r\n element = Element(name or 'objects')\r\n if name:\r\n element = Element(name)\r\n element.set('type', 'list')\r\n else:\r\n element = Element('objects')\r\n for item in data:\r\n element.append(self.to_etree(item, options, depth=depth + 1))\r\n elif isinstance(data, dict):\r\n if depth == 0:\r\n element = Element(name or 'response')\r\n else:\r\n element = Element(name or 'object')\r\n element.set('type', 'hash')\r\n for (key, value) in data.iteritems():\r\n element.append(self.to_etree(value, options, name=key, depth=depth + 1))\r\n else:\r\n element = Element(name or 'value')\r\n simple_data = self.to_simple(data, options)\r\n data_type = get_type_string(simple_data)\r\n\r\n if data_type != 'string':\r\n element.set('type', get_type_string(simple_data))\r\n\r\n if data_type != 'null':\r\n if isinstance(simple_data, unicode):\r\n element.text = simple_data\r\n else:\r\n element.text = force_unicode(simple_data)\r\n\r\n return element", "def createElements(self):\n if self.__builder.checkRootTag(self.__content):\n elements = self.__content.findall(\"*\")\n\n for el in elements:\n self.parseXml(el, {})\n\n return self.__builder.getRoot()\n else:\n print(\"The Element \", self.__content.tag, \" is unkown.\")\n return None", "def format_xml(elem):\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\")", "def to_element_tree(doc: Document) -> etree.ElementTree:\n elements = []\n tags = []\n root, sub, tag = None, None, None\n # Document must have a root element\n if not doc.tags[0].end - doc.tags[0].start == len(doc.text):\n raise ValueError('{} is not a valid root element'.format(doc.tags[0]))\n\n for i, tag in enumerate(doc.tags):\n next_tag = doc.tags[i + 1] if i < len(doc.tags) - 1 else None\n # This is the root element\n if not len(elements):\n root = etree.Element(tag.name, attrib=tag.attrib)\n root.text = doc.text[tag.start:next_tag.start]\n elements.append(root)\n tags.append(tag)\n # If tag is a child of the last element\n elif tags[-1].overlaps(tag):\n # A tag may only have one parent\n intersection = tags[-1].intersection(tag)\n if not len(intersection) == len(tag):\n raise ValueError(\n '{} cannot be a child of {} with intersection {}'.format(tag, tags[-1], len(intersection)))\n sub = etree.SubElement(elements[-1], tag.name, tag.attrib)\n # if the tag has children\n if next_tag is not None and tag.overlaps(next_tag):\n sub.text = doc.text[tag.start:next_tag.start]\n else:\n sub.text = doc.text[tag.start:tag.end]\n elements.append(sub)\n tags.append(tag)\n else:\n sibling_tag, sibling_element = None, None\n # Step out until we find the parent node\n finished = tags[-1].overlaps(tag)\n while not finished and len(tags) > 1:\n sibling_element = elements.pop()\n sibling_tag = tags.pop()\n sibling_element.tail = doc.text[sibling_tag.end:tags[-1].end]\n if tags[-1].overlaps(tag):\n finished = True\n\n # Put the tail on the previous sibling\n if sibling_element is not None:\n sibling_element.tail = doc.text[sibling_tag.end:tag.start]\n\n intersection = tags[-1].intersection(tag)\n if not len(intersection) == len(tag):\n raise ValueError(\n '{} cannot be a child of {} with intersection {}'.format(tag, tags[-1], len(intersection)))\n sub = etree.SubElement(elements[-1], tag.name, tag.attrib)\n # If the next tag is a child of this tag\n if next_tag is not None and tag.overlaps(next_tag):\n sub.text = doc.text[tag.start:next_tag.start]\n else:\n sub.text = doc.text[tag.start:tag.end]\n elements.append(sub)\n tags.append(tag)\n\n if sub is not None and tag is not None:\n sub.tail = doc.text[tag.end:len(doc.text)]\n\n # Remove any newline elements added in the read step\n for element in root.iter():\n if element.text is not None:\n element.text = element.text.replace('\\n', '')\n if element.tail is not None:\n element.tail = element.tail.replace('\\n', '')\n return etree.ElementTree(root)", "def to_etree(self):\n\n # Base block element\n attrib = {\n \"name\": self.name,\n \"instance\": self.instance,\n }\n if not self.is_leaf:\n attrib[\"mode\"] = self.mode if self.mode is not None else \"default\"\n\n elem = ET.Element(\"block\", attrib)\n\n # If this is an \"open\" block then skip the remaining tags\n if self.name == \"open\":\n return elem\n\n # Attributes / parameters\n if self.is_leaf:\n for tag, data in zip([\"attributes\", \"parameters\"], [self.attributes, self.parameters]):\n xml_list = ET.Element(tag)\n\n sub_tag = tag[:-1]\n for key, value in data.items():\n xml_item = ET.Element(sub_tag, {\"name\": key})\n xml_item.text = value\n xml_list.append(xml_item)\n\n elem.append(xml_list)\n\n # Ports\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n xml_ports = ET.Element(tag)\n port_type = tag[:-1]\n\n keys = self.ports.keys()\n for key in keys:\n port = self.ports[key]\n if port.type == port_type:\n # Encode port\n xml_port = port.to_etree()\n xml_ports.append(xml_port)\n\n # Rotation map\n if port.rotation_map:\n # Encode\n rotation = []\n for i in range(port.width):\n rotation.append(str(port.rotation_map.get(i, \"open\")))\n\n # Make an element\n xml_rotation_map = ET.Element(\"port_rotation_map\", {\"name\": port.name})\n xml_rotation_map.text = \" \".join(rotation)\n xml_ports.append(xml_rotation_map)\n\n elem.append(xml_ports)\n\n # Recurse\n keys = self.blocks.keys()\n for key in keys:\n xml_block = self.blocks[key].to_etree()\n elem.append(xml_block)\n\n return elem", "def pprint_xml(et):\n \n return tostring(et, pretty_print=True)", "def to_xml(self):\n \n root = ET.Element(\"Document\")\n root.set('xmlns',\"urn:iso:std:iso:20022:tech:xsd:pacs.008.001.02\")\n root_fito = ET.SubElement(root, \"FIToFICstmrCdtTrf\")\n \n self.xml_header(root_fito)\n self.xml_transaction(root_fito)\n\n ET.ElementTree(root)\n \n return ET.tostring(root,encoding='utf-8',xml_declaration=True).decode('utf-8')", "def parse_xml(xmlfile):\n # create element tree object\n root = ET.parse(xmlfile).getroot()\n return root", "def str_to_etree(xml_str, encoding='utf-8'):\n # parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n # return xml.etree.ElementTree.ElementTree(\n # return xml.etree.ElementTree.fromstring(xml_str)\n # )\n # parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n # return xml.etree.ElementTree.ElementTree(\n # xml.etree.ElementTree.fromstring(xml_str, parser=parser)\n # )\n\n parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n return xml.etree.ElementTree.fromstring(xml_str, parser=parser)", "def toXMLString(self):\n return _libsbml.XMLNode_toXMLString(self)", "def to_xml(self, doc = None, output = None, indent = False):\r\n \r\n # Determine what format to return the values as\r\n if output != None and output.lower() != 'str' and output.lower() != 'dom':\r\n # Check to see if it's a filepath\r\n if output.lower().split('.')[-1] == 'xml':\r\n output = os.path.abspath(output)\r\n else:\r\n raise ValueError('Unrecognized format: %s' % output)\r\n \r\n # If doc is None, create a document\r\n if doc != None:\r\n is_root = False\r\n else:\r\n is_root = True\r\n doc = domlette.implementation.createDocument(None, None, None)\r\n #doc.publicId = \"-//W3C//DTD XHTML 1.0 Strict//EN\"\r\n #doc.systemId = \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\"\r\n \r\n # Prepare an XML fragment to hold this entity's data\r\n frag = doc.createDocumentFragment()\r\n frag_root=doc.createElementNS(None, self.__class__.entity_name.lower())\r\n frag.appendChild(frag_root)\r\n\r\n # Append all of the key value pairs \r\n for att_name in self.__class__.entity_atts:\r\n att = getattr(self, att_name)\r\n self.__att_to_xml(doc, frag_root, att_name, att)\r\n\r\n # Return the result\r\n if not is_root:\r\n return frag\r\n else:\r\n # Determine how to return\r\n doc.appendChild(frag) \r\n if output == None:\r\n # Default returns XML as an objectified gnosis object\r\n return gnosis.xml.objectify.XML_Objectify(doc, parser='DOM').make_instance()\r\n elif output == 'str':\r\n # Return as string\r\n buf = StringIO()\r\n if indent:\r\n domlette.PrettyPrint(doc, stream=buf, encoding='us-ascii')\r\n else:\r\n domlette.Print(doc, stream=buf)\r\n xml_string = buf.getvalue()\r\n buf.close() \r\n return xml_string\r\n elif output == 'dom':\r\n # Return as a XML Document (Raw DOM)\r\n return doc\r\n else:\r\n f = open(output, 'w')\r\n if indent:\r\n domlette.PrettyPrint(doc, stream=f)\r\n else:\r\n domlette.Print(doc, stream=f)\r\n f.close()\r\n return True", "def toxml(self) -> ET.Element:\n # Dummy element that ElementTree extend() will strip\n root = ET.Element('root')\n\n connection = ET.SubElement(root, 'Connection')\n\n origin = ET.SubElement(connection, 'Origin')\n origin.set('ToolID', self.origin_tool.tool_id)\n origin.set('Connection', self.origin_output)\n\n destination = ET.SubElement(connection, 'Destination')\n destination.set('ToolID', self.destination_tool.tool_id)\n destination.set('Connection', self.destination_input)\n\n return root", "def to_xml(self, data, options=None):\r\n options = options or {}\r\n\r\n if lxml is None:\r\n raise UnsupportedSerializationFormat(\"Usage of the XML aspects requires lxml.\")\r\n\r\n return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')", "def getrootelement(self,filename):\n parser = etree.XMLPullParser(strip_cdata=False)\n root = etree.parse(filename,parser)\n return root", "def xml(self):\n xml = []\n xml.append('<?xml version=\"1.0\" encoding=\"%s\"?>' % XML_ENCODING.get(self.encoding, self.encoding))\n xml.append(\"<%s>\" % XML_TEXT)\n xml.extend([sentence.xml for sentence in self])\n xml.append(\"</%s>\" % XML_TEXT)\n return \"\\n\".join(xml)", "def xml_prettify(elem):\n\n\tfrom xml.etree import ElementTree\n\tfrom xml.dom import minidom\n\n\trough_string = ElementTree.tostring(elem, 'utf-8')\n\treparsed = minidom.parseString(rough_string)\n\treturn reparsed.toprettyxml(indent=\" \")", "def toXML(self):\n return self._xmlpre+\"\\n\".join(map(lambda f:f.toXML(),self._items))+self._xmlpost", "def to_xml(self, scene_dir: str) -> Tuple[Et.Element, bool]:\n raise NotImplementedError", "def get_translated_xml_string(self):\n return etree.tostring(self._translated_xml_tree)", "def prettify(root):\n rough_string = ET.tostring(root, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "def example_xml(example_xml_file):\n return etree.fromstring(example_xml_file.encode('utf-8'))", "def get_projects_xml(indir=\"../projects\"):\n return to_xml(get_projects(indir))", "def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)", "def encode(self):\n\n return lxml.etree.tostring(self.get_content(),\n pretty_print = True,\n encoding = self.encoding,\n xml_declaration = True)", "def pretty_xml(xml):\n try:\n #parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.fromstring(xml.encode('utf-8'))\n return etree.tostring(tree, pretty_print=True).decode()\n except:\n print(f\"ERROR pretty_xml() xml:{xml}\")\n raise", "def load_xml_etree_cElementTree(finder, module):\n finder.IncludeModule(\"xml.etree.ElementTree\")", "def doc(self):\n\n if not hasattr(self, \"_doc\"):\n self.loader.cdr_cursor.execute(self.SELECT_XML, self.id)\n xml = self.loader.cdr_cursor.fetchone().xml\n self._doc = etree.fromstring(xml.encode(\"utf-8\"))\n return self._doc", "def pprintXml(et):\n \n return tostring(et, pretty_print=True)", "def read_xml_file(filename):\n###############################################################################\n with __FILE_OPEN(filename) as file_:\n tree = ET.parse(file_)\n root = tree.getroot()\n # End with\n return tree, root", "def to_etree(self):\n\n # Top-level root block\n attr = {\n \"name\": self.name,\n \"instance\": self.instance,\n }\n\n if self.arch_id is not None:\n attr[\"architecture_id\"] = self.arch_id\n if self.netlist_id is not None:\n attr[\"atom_netlist_id\"] = self.netlist_id\n\n root = ET.Element(\"block\", attr)\n\n # Top-level ports\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n xml_ports = ET.Element(tag)\n xml_ports.text = \" \".join(self.ports[tag])\n root.append(xml_ports)\n\n # CLB blocks\n keys = self.blocks.keys()\n for key in keys:\n xml_block = self.blocks[key].to_etree()\n root.append(xml_block)\n\n return root", "def gen_tree(path):\n # print(\"CALLING.. Tree\")\n parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.parse(path, parser)\n root = tree.getroot() \n return root, tree", "def get_etree_root(doc, encoding=None):\n tree = get_etree(doc, encoding)\n root = tree.getroot()\n\n return root", "def tostring(self):\n header, body = None, None\n\n if self._header:\n header = b'<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n\n if self._main() is not None:\n body = ET.tostring(self._main(), 'utf-8')\n\n return (header if header else b'') + (body if body else b'')", "def toString(doc):\n return doc.toxml()", "def _get_ome_xml_root(self) -> ElementTree:\n ome_metadata_element = ElementTree.fromstring(self._ome_metadata)\n tree = ElementTree.ElementTree(ome_metadata_element)\n return tree.getroot()", "def to_etree(self):\n\n # Format connections\n text = []\n for i in range(self.width):\n if i in self.connections:\n text.append(str(self.connections[i]))\n else:\n text.append(\"open\")\n\n elem = ET.Element(\"port\", attrib={\"name\": self.name})\n elem.text = \" \".join(text)\n return elem", "def storeAndReturnXML(self):\n self._storeItems()\n return self.toXML()", "def prettify(elem):\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n #return reparsed.toprettyxml(indent=\" \")\n return reparsed.toxml()", "def prettify(self):\n reparsed = minidom.parseString(self.tostring())\n return reparsed.toprettyxml(indent=' ', encoding='utf-8')", "def prettify(elem): # Cf. https://stackoverflow.com/questions/17402323/use-xml-etree-elementtree-to-print-nicely-formatted-xml-files\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = xml.dom.minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\")", "def _toxml_rec(self, root, obj=None, ns_cur=None):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n try:\n\n if (obj == None):\n obj = self._client.factory.create(root)\n\n ns = '{%s}' % self._get_element_ns(obj.__class__.__name__)\n if (ns != '{None}' and ns != ns_cur):\n doc = Element(ns + root)\n else:\n doc = Element(root)\n ns = ns_cur\n\n for key in obj.__keylist__:\n subelem = obj[key]\n\n if (subelem == None):\n SubElement(doc, key).text = '?'\n elif (subelem == [] or '[]' in subelem.__str__()):\n inner_doc = self._toxml_rec(key, None, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n else:\n el_type = self._get_element_type(\n subelem.__class__.__name__)\n if (el_type == 'Simple'):\n SubElement(doc, key).text = '?'\n elif (el_type == 'Complex'):\n inner_doc = self._toxml_rec(key, subelem, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n\n return doc\n\n except TypeNotFound:\n return None", "def prettify(self, elem):\n try:\n rough_string = ET.tostring(elem, 'utf8')\n except Exception:\n print(elem)\n root = etree.fromstring(rough_string)\n return etree.tostring(root, pretty_print=True)", "def prettify(self):\n re_parsed = minidom.parseString(tostring(self.dom))\n return re_parsed.toprettyxml()", "def _build_tree(self, markup):\n clean_markup = tidy_document(markup,\n options={'numeric-entities':1,\n 'output-xml':1,\n 'output-encoding':'utf8'})[0]\n # Small fix for a cornercase involving invalid characters...\n clean_markup = clean_markup.replace('\\x15', '_')\n etree = self._fix_treetags(ET.fromstring(clean_markup))\n return etree", "def getXml(self):\n return _SALOMERuntime.InputXmlPort_getXml(self)", "def importXml ( r ):\n rawText = r.read ()\n rawText = rawText.strip ()\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, '', rawText )\n xml = ET.fromstring ( text )\n assert str ( type ( xml ) ) == \"<type 'instance'>\"\n return xml", "def xml_to_string(xml_tag):\n return parseString(ET.tostring(xml_tag)).toprettyxml(indent=\" \")", "def get_query_xml(query):\n root = get_query_etree(query)\n\n # can get actual field types by inspecting cursor.description,\n # but these (I think) depend on the database back-end\n # so I decided it was too hard to be smart here,\n # and instead just say that everything is \"CDATA #REQURED\"...\n attlist = '\\n'.join(' {col} CDATA #REQUIRED'.format(col=col) for col in query.cols)\n\n # generating dtd means we can automatically validate output, for testing\n dtd = '<!DOCTYPE {root} [\\n' \\\n ' <!ELEMENT {root} ({row})*>\\n' \\\n ' <!ELEMENT {row} EMPTY>\\n' \\\n ' <!ATTLIST {row}\\n{attlist}\\n' \\\n ' >\\n' \\\n ']>'.format(root=query.root, row=query.row, attlist=attlist)\n\n # DEFAULT_CHARSET is used by HttpResponse, so make etree use it too.\n # Output may become invalid if DEFAULT_CHARSET cannot be used to encode field names!\n # e.g. MySQL columns may include characters from Unicode Basic Multiingual Plane,\n # which could be inexpressible if DEFAULT_CHARSET were ascii, giving invalid xml.\n return etree.tostring(\n root,\n pretty_print = False,\n encoding = settings.DEFAULT_CHARSET,\n xml_declaration = True,\n doctype = dtd,\n )", "def prettifyXml(elem):\n uglyStr = ET.tostring(elem, 'utf-8')\n reparsedStr = xml.dom.minidom.parseString(uglyStr)\n return reparsedStr.toprettyxml(indent=\" \", encoding=\"utf-8\")", "def toXML(self):\n return _libsbml.Layout_toXML(self)", "def LoadXML(NAME):\r\n # Basics snaged from https://docs.python.org/2/library/xml.etree.elementtree.html\r\n Tree = parse(NAME) # opens and turns the xml file into a tree\r\n Root = Tree.getroot()\r\n return(Root)", "def _api_call(url: str) -> ET.Element:\n result = requests.get(url)\n if result.status_code != 200:\n raise RequestException(f\"API status code {result.status_code} for URL: {url}\")\n\n # Remove HTML line breaks (which cause confusion in the XML parsing)\n t: str = re.sub(r\"\\s*(<br/>)+\\s*\", r\" \", result.text)\n\n x_tree = ET.fromstring(t)\n return x_tree", "def get_obj(self):\n assert self._obj is not None, \"Object XML tree has not been generated yet!\"\n return self._obj", "def xpath_as_xml(self, expr=''):\n results = []\n for result in self.xpath(expr):\n if result:\n results.append(result.toxml())\n \n return results", "def get_inner_html_as_xml(self, selector, outer_node=None):\n from lxml import html\n\n if outer_node:\n node_open = '<{}>'.format(outer_node)\n node_close = '</{}>'.format(outer_node)\n else:\n node_open = node_close = ''\n return html.fromstring(node_open + self.get_inner_html(selector) + node_close)", "def readDoc(self, filename):\n try:\n doc = ET.parse( filename, parser=LineNumberingParser() )\n except self.ET_exc_class:\n raise XmlError( str(sys.exc_info()[1]) )\n\n rootnode = recurse_construct_ET_to_XmlNode( None, doc.getroot() )\n\n return rootnode", "def prettyxml(pyxb_xml, validate=True, ns=None, xslt=None):\n\n def indent(depth, prev):\n return ('\\n' + (' ' * 4 * depth)) if prev != 'd' else \"\"\n \n def args(alist):\n if len(alist):\n return ' ' + ' '.join([k+'=\"'+v+'\"' for (k,v) in alist.items()])\n return ''\n\n pyxb.defaultNamespace = ns\n pyxb.utils.domutils.BindingDOMSupport.SetDefaultNamespace(ns if ns else defaultNS)\n pyxb.RequireValidWhenGenerating(validate)\n rawxml = pyxb_xml if isinstance(pyxb_xml, basestring) else pyxb_xml.toxml()\n\n seq = []\n p = xml.parsers.expat.ParserCreate('UTF-8')\n p.StartElementHandler = lambda name, attrs: seq.append( ('s', name, attrs))\n p.EndElementHandler = lambda name: seq.append( ('e', name ))\n p.CharacterDataHandler = lambda data: seq.append( ('d', data ))\n p.Parse(rawxml.encode('UTF-8'),1)\n\n depth, prev, rval = (0, 'e', '<?xml version=\"1.0\" encoding=\"UTF-8\"?>' + (xslt if xslt else \"\"))\n dataText = ''\n while len(seq):\n e = seq.pop(0)\n if e[0] == 's':\n # shortcut the simple start end tag\n if seq[0][0] == 'e':\n assert e[1] == seq[0][1]\n seq.pop(0)\n rval += indent(depth, prev)\n rval += '<' + e[1] + args(e[2]) + '/>'\n depth -= 1\n else:\n if prev != 'e':\n depth += 1\n if dataText:\n rval += dataText\n dataText = ''\n rval += indent(depth, prev)\n rval += '<' + e[1] + args(e[2]) + '>'\n elif e[0] == 'e':\n if prev != 'd':\n depth -= 1\n else:\n rval += escape(unescape(dataText))\n dataText = ''\n rval += indent(depth, prev)\n rval += '</' + e[1] + '>'\n else:\n if prev != 'e' or e[0] <> 'd' or e[1].strip():\n dataText += e[1]\n if prev != 'e' or e[0] <> 'd' or e[1].strip():\n prev = e[0]\n return rval" ]
[ "0.76870763", "0.75312847", "0.74353814", "0.69993716", "0.68324274", "0.6808984", "0.67279035", "0.67279035", "0.67270184", "0.67270184", "0.6613549", "0.65791756", "0.65375465", "0.6401415", "0.6362028", "0.6343287", "0.63272727", "0.6314633", "0.6282842", "0.6246787", "0.6219432", "0.6139695", "0.6096717", "0.6058396", "0.60533243", "0.60182637", "0.6005463", "0.59978014", "0.59901744", "0.59888124", "0.5976454", "0.59440976", "0.58988947", "0.5874374", "0.58646375", "0.5854752", "0.58519346", "0.58473355", "0.5846384", "0.5800053", "0.5779245", "0.57748735", "0.5761204", "0.57569355", "0.57559574", "0.5755586", "0.5746839", "0.5740134", "0.57227325", "0.57133186", "0.57083476", "0.56932306", "0.56812644", "0.5664652", "0.5664532", "0.56615293", "0.5650261", "0.56335354", "0.56142014", "0.5610486", "0.56084657", "0.55944806", "0.5578324", "0.5562834", "0.555447", "0.554932", "0.553621", "0.5508162", "0.55011404", "0.5498053", "0.5488716", "0.54777795", "0.54744244", "0.5457839", "0.5455167", "0.5451476", "0.5440073", "0.5426419", "0.5420064", "0.5414535", "0.54099923", "0.54007906", "0.5397051", "0.53955", "0.538435", "0.53526473", "0.5350613", "0.5348693", "0.5332764", "0.5324655", "0.52766126", "0.5267961", "0.52629775", "0.52498454", "0.523864", "0.52158415", "0.5213015", "0.52064145", "0.5205081", "0.5201932", "0.51943797" ]
0.0
-1
Get the XML representation as `ElementTree` object.
def get_xml(self): xml = svgwrite.etree.etree.Element(self.elementname) if self.debug: self.validator.check_all_svg_attribute_values(self.elementname, self.attribs) for attribute, value in self.attribs.items(): # filter 'None' values if value is not None: value = self.value_to_string(value) if value: # just add not empty attributes xml.set(attribute, value) for element in self.elements: xml.append(element) return xml
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()", "def get_xml(self):\n return etree.tostring(self.get_etree())", "def get_tree():\n root = ET.fromstring(xmlstring)\n return ET.ElementTree(root)", "def xml(self):\n rough_string = ElementTree.tostring(self.dom, \"utf-8\")\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "def xml(self):\n return lxml.etree.fromstring(self.content, _XML_PARSER)", "def getXmlEtree(xml):\n\n parser = XMLParser(remove_blank_text=True)\n if xml.startswith('<?xml') or xml.startswith('<'):\n return (parse(StringIO(xml), parser).getroot(),\n getNamespacePrefixDict(xml))\n else:\n if os.path.isfile(xml): xmlStr = open(xml).read()\n else: xmlStr = urlopen(xml).read()\n return (parse(StringIO(xmlStr), parser).getroot(),\n getNamespacePrefixDict(xmlStr))", "def xml(self):\n return self._xml", "def xml(self):\n return self._xml", "def _tree_to_xml(self, tree):\n\n body = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n\n return body + self._element_to_xml(tree)", "def _tree_to_xml(self, tree):\n\n body = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n\n return body + self._element_to_xml(tree)", "def xml_tostring(tree):\n if six.PY2:\n return ET.tostring(tree)\n else:\n return ET.tostring(tree, encoding='unicode')", "def toxml(self) :\n\t\treturn self.doc.toxml()", "def to_xml(self):\n return xmltodict.unparse(\n self.to_xml_dict(),\n pretty=True\n ).encode(\"utf-8\")", "def toXMLElement(self):\n # NOTE Subclasses should call Component.toXMLElement(self) to obtain\n # base node and then add further attributes and sub-elements\n return ET.Element(self.__class__.__name__)", "def to_xml(self):\r\n element = ET.Element(\"node\")\r\n\r\n element.attrib['name'] = self.name\r\n element.attrib['description'] = self.description\r\n\r\n return element", "def xml(self):\n return parse_xml(self, tab=\"\\t\", id=self.id or \"\")", "def get_xml(self):\n\t\t# get the XML description of the VM\n\t\tvm_xml = self.clonezilla_vm_obj.XMLDesc(0)\n\t\troot = ET.fromstring(vm_xml)\n\t\treturn root", "def get_etree(xml):\n\n parser = XMLParser(remove_blank_text=True)\n if xml.startswith('<?xml') or xml.startswith('<'):\n return (parse(StringIO(xml), parser).getroot(),\n get_ns_dict(xml))\n else:\n if os.path.isfile(xml): xml_str = open(xml).read()\n else: xml_str = urlopen(xml).read()\n return (parse(StringIO(xml_str), parser).getroot(),\n get_ns_dict(xml_str))", "def get_etree(self):\n if self._etree:\n return self._etree\n from richtimes.scripts.shell import get_etree\n self._etree = get_etree(self.filename)\n return self._etree", "def getFullTree(self):\n parsedStr = xml.dom.minidom.parseString(\n ET.tostring(self._tree.getroot())\n )\n return '\\n'.join([line.rstrip()\n for line in parsedStr.toprettyxml().splitlines()\n if line.strip()])", "def xml(self):\n raise NotImplementedError('This api does not return xml')", "def saveToXml(self) -> org.jdom.Element:\n ...", "def get_xml_parser(encoding=None):\n parser = etree.ETCompatXMLParser(\n huge_tree=True,\n remove_comments=True,\n strip_cdata=False,\n remove_blank_text=True,\n resolve_entities=False,\n encoding=encoding\n )\n\n return parser", "def get_etree(self):\n if self._etree:\n return self._etree\n root = self.issue.get_etree()\n self._etree = root.xpath(self.xpath)[0]\n return self._etree", "def xml_string(self):\n if self._xml_string is not None:\n return self._xml_string\n\n return etree.tostring(self._xml_node)", "def xml_string(self):\r\n if self._xml_string is not None:\r\n return self._xml_string\r\n\r\n return etree.tostring(self._xml_node)", "def toXMLNode(self):\n return _libsbml.SBase_toXMLNode(self)", "def importETree():\r\n etree_in_c = None\r\n try: # Is it Python 2.5+ with C implemenation of ElementTree installed?\r\n import xml.etree.cElementTree as etree_in_c\r\n from xml.etree.ElementTree import Comment\r\n except ImportError:\r\n try: # Is it Python 2.5+ with Python implementation of ElementTree?\r\n import xml.etree.ElementTree as etree\r\n except ImportError:\r\n try: # An earlier version of Python with cElementTree installed?\r\n import cElementTree as etree_in_c\r\n from elementtree.ElementTree import Comment\r\n except ImportError:\r\n try: # An earlier version of Python with Python ElementTree?\r\n import elementtree.ElementTree as etree\r\n except ImportError:\r\n raise ImportError(\"Failed to import ElementTree\")\r\n if etree_in_c: \r\n if etree_in_c.VERSION < \"1.0.5\":\r\n raise RuntimeError(\"cElementTree version 1.0.5 or higher is required.\")\r\n # Third party serializers (including ours) test with non-c Comment\r\n etree_in_c.test_comment = Comment\r\n return etree_in_c\r\n elif etree.VERSION < \"1.1\":\r\n raise RuntimeError(\"ElementTree version 1.1 or higher is required\")\r\n else:\r\n return etree", "def get_xml(xml_file_path: str):\n root = et.parse(xml_file_path).getroot()\n\n return root", "def new_xml(self, root_name):\n\n self.tree = ET.ElementTree(ET.fromstring('<?xml version=\"1.0\" encoding=\"UTF-8\"?><%s></%s>'%(\n root_name, root_name)))\n return self.tree.getroot()", "def convert_etree(tree):\n return objectify.fromstring(etree.tostring(tree))", "def file_to_xml(cls, file_object):\r\n return etree.parse(file_object, parser=edx_xml_parser).getroot()", "def getXMLTree(xmlString, debug=False):\n\tfrom lxml import etree as lxtree\n\ttree = lxtree.fromstring(_nukeNamespaces(xmlString))\n\n\tif debug:\n\t\tlxtree.dump(tree)\n\treturn tree", "def prettify(self):\n rough_string = ET.tostring(self.root, encoding='utf-8', method='xml').decode('utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\")", "def stringToXML(self, unparsedXML):\n xmlRoot = ET.fromstring(unparsedXML)\n self.xmlRoot = xmlRoot\n return xmlRoot", "def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder", "def __str__(self):\n result = xml.dom.minidom.parseString(\n xml.etree.ElementTree.tostring(\n self.ToXMLElement(), encoding='utf-8')).toprettyxml(indent=' ')\n\n return result", "def serialize(self):\n return self.xmlnode.serialize(encoding=\"utf-8\")", "def test_simple_XML(self):\n\n e = ET.XML('''\n <root>\n <e />\n <e>text</e>\n <e name=\"value\" />\n <e name=\"value\">text</e>\n <e> <a>text</a> <b>text</b> </e>\n <e> <a>text</a> <a>text</a> </e>\n <e> text <a>text</a> </e>\n </root>\n ''')\n\n d = etree_to_dict(e)\n\n if SHOW_RESULTS:\n pprint(d)\n\n e = dict_to_xml_str(d)\n\n if SHOW_RESULTS:\n print(e)\n print(prettify(e))", "def read_xml(file_dir):\n tree = ElementTree()\n tree.parse(file_dir)\n return tree", "def unpack_xml(text) -> ET.ElementTree:\n etree: ET.ElementTree = ET.parse(io.StringIO(text))\n _check_for_errors(etree)\n return etree", "def getXmlTree(url):\n return lxml.etree.parse(url)", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def createElements(self):\n if self.__builder.checkRootTag(self.__content):\n elements = self.__content.findall(\"*\")\n\n for el in elements:\n self.parseXml(el, {})\n\n return self.__builder.getRoot()\n else:\n print(\"The Element \", self.__content.tag, \" is unkown.\")\n return None", "def to_etree(self, data, options=None, name=None, depth=0):\r\n if isinstance(data, (list, tuple)):\r\n element = Element(name or 'objects')\r\n if name:\r\n element = Element(name)\r\n element.set('type', 'list')\r\n else:\r\n element = Element('objects')\r\n for item in data:\r\n element.append(self.to_etree(item, options, depth=depth + 1))\r\n elif isinstance(data, dict):\r\n if depth == 0:\r\n element = Element(name or 'response')\r\n else:\r\n element = Element(name or 'object')\r\n element.set('type', 'hash')\r\n for (key, value) in data.iteritems():\r\n element.append(self.to_etree(value, options, name=key, depth=depth + 1))\r\n else:\r\n element = Element(name or 'value')\r\n simple_data = self.to_simple(data, options)\r\n data_type = get_type_string(simple_data)\r\n\r\n if data_type != 'string':\r\n element.set('type', get_type_string(simple_data))\r\n\r\n if data_type != 'null':\r\n if isinstance(simple_data, unicode):\r\n element.text = simple_data\r\n else:\r\n element.text = force_unicode(simple_data)\r\n\r\n return element", "def format_xml(elem):\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\")", "def to_element_tree(doc: Document) -> etree.ElementTree:\n elements = []\n tags = []\n root, sub, tag = None, None, None\n # Document must have a root element\n if not doc.tags[0].end - doc.tags[0].start == len(doc.text):\n raise ValueError('{} is not a valid root element'.format(doc.tags[0]))\n\n for i, tag in enumerate(doc.tags):\n next_tag = doc.tags[i + 1] if i < len(doc.tags) - 1 else None\n # This is the root element\n if not len(elements):\n root = etree.Element(tag.name, attrib=tag.attrib)\n root.text = doc.text[tag.start:next_tag.start]\n elements.append(root)\n tags.append(tag)\n # If tag is a child of the last element\n elif tags[-1].overlaps(tag):\n # A tag may only have one parent\n intersection = tags[-1].intersection(tag)\n if not len(intersection) == len(tag):\n raise ValueError(\n '{} cannot be a child of {} with intersection {}'.format(tag, tags[-1], len(intersection)))\n sub = etree.SubElement(elements[-1], tag.name, tag.attrib)\n # if the tag has children\n if next_tag is not None and tag.overlaps(next_tag):\n sub.text = doc.text[tag.start:next_tag.start]\n else:\n sub.text = doc.text[tag.start:tag.end]\n elements.append(sub)\n tags.append(tag)\n else:\n sibling_tag, sibling_element = None, None\n # Step out until we find the parent node\n finished = tags[-1].overlaps(tag)\n while not finished and len(tags) > 1:\n sibling_element = elements.pop()\n sibling_tag = tags.pop()\n sibling_element.tail = doc.text[sibling_tag.end:tags[-1].end]\n if tags[-1].overlaps(tag):\n finished = True\n\n # Put the tail on the previous sibling\n if sibling_element is not None:\n sibling_element.tail = doc.text[sibling_tag.end:tag.start]\n\n intersection = tags[-1].intersection(tag)\n if not len(intersection) == len(tag):\n raise ValueError(\n '{} cannot be a child of {} with intersection {}'.format(tag, tags[-1], len(intersection)))\n sub = etree.SubElement(elements[-1], tag.name, tag.attrib)\n # If the next tag is a child of this tag\n if next_tag is not None and tag.overlaps(next_tag):\n sub.text = doc.text[tag.start:next_tag.start]\n else:\n sub.text = doc.text[tag.start:tag.end]\n elements.append(sub)\n tags.append(tag)\n\n if sub is not None and tag is not None:\n sub.tail = doc.text[tag.end:len(doc.text)]\n\n # Remove any newline elements added in the read step\n for element in root.iter():\n if element.text is not None:\n element.text = element.text.replace('\\n', '')\n if element.tail is not None:\n element.tail = element.tail.replace('\\n', '')\n return etree.ElementTree(root)", "def to_etree(self):\n\n # Base block element\n attrib = {\n \"name\": self.name,\n \"instance\": self.instance,\n }\n if not self.is_leaf:\n attrib[\"mode\"] = self.mode if self.mode is not None else \"default\"\n\n elem = ET.Element(\"block\", attrib)\n\n # If this is an \"open\" block then skip the remaining tags\n if self.name == \"open\":\n return elem\n\n # Attributes / parameters\n if self.is_leaf:\n for tag, data in zip([\"attributes\", \"parameters\"], [self.attributes, self.parameters]):\n xml_list = ET.Element(tag)\n\n sub_tag = tag[:-1]\n for key, value in data.items():\n xml_item = ET.Element(sub_tag, {\"name\": key})\n xml_item.text = value\n xml_list.append(xml_item)\n\n elem.append(xml_list)\n\n # Ports\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n xml_ports = ET.Element(tag)\n port_type = tag[:-1]\n\n keys = self.ports.keys()\n for key in keys:\n port = self.ports[key]\n if port.type == port_type:\n # Encode port\n xml_port = port.to_etree()\n xml_ports.append(xml_port)\n\n # Rotation map\n if port.rotation_map:\n # Encode\n rotation = []\n for i in range(port.width):\n rotation.append(str(port.rotation_map.get(i, \"open\")))\n\n # Make an element\n xml_rotation_map = ET.Element(\"port_rotation_map\", {\"name\": port.name})\n xml_rotation_map.text = \" \".join(rotation)\n xml_ports.append(xml_rotation_map)\n\n elem.append(xml_ports)\n\n # Recurse\n keys = self.blocks.keys()\n for key in keys:\n xml_block = self.blocks[key].to_etree()\n elem.append(xml_block)\n\n return elem", "def pprint_xml(et):\n \n return tostring(et, pretty_print=True)", "def to_xml(self):\n \n root = ET.Element(\"Document\")\n root.set('xmlns',\"urn:iso:std:iso:20022:tech:xsd:pacs.008.001.02\")\n root_fito = ET.SubElement(root, \"FIToFICstmrCdtTrf\")\n \n self.xml_header(root_fito)\n self.xml_transaction(root_fito)\n\n ET.ElementTree(root)\n \n return ET.tostring(root,encoding='utf-8',xml_declaration=True).decode('utf-8')", "def parse_xml(xmlfile):\n # create element tree object\n root = ET.parse(xmlfile).getroot()\n return root", "def str_to_etree(xml_str, encoding='utf-8'):\n # parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n # return xml.etree.ElementTree.ElementTree(\n # return xml.etree.ElementTree.fromstring(xml_str)\n # )\n # parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n # return xml.etree.ElementTree.ElementTree(\n # xml.etree.ElementTree.fromstring(xml_str, parser=parser)\n # )\n\n parser = xml.etree.ElementTree.XMLParser(encoding=encoding)\n return xml.etree.ElementTree.fromstring(xml_str, parser=parser)", "def to_xml(self, doc = None, output = None, indent = False):\r\n \r\n # Determine what format to return the values as\r\n if output != None and output.lower() != 'str' and output.lower() != 'dom':\r\n # Check to see if it's a filepath\r\n if output.lower().split('.')[-1] == 'xml':\r\n output = os.path.abspath(output)\r\n else:\r\n raise ValueError('Unrecognized format: %s' % output)\r\n \r\n # If doc is None, create a document\r\n if doc != None:\r\n is_root = False\r\n else:\r\n is_root = True\r\n doc = domlette.implementation.createDocument(None, None, None)\r\n #doc.publicId = \"-//W3C//DTD XHTML 1.0 Strict//EN\"\r\n #doc.systemId = \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\"\r\n \r\n # Prepare an XML fragment to hold this entity's data\r\n frag = doc.createDocumentFragment()\r\n frag_root=doc.createElementNS(None, self.__class__.entity_name.lower())\r\n frag.appendChild(frag_root)\r\n\r\n # Append all of the key value pairs \r\n for att_name in self.__class__.entity_atts:\r\n att = getattr(self, att_name)\r\n self.__att_to_xml(doc, frag_root, att_name, att)\r\n\r\n # Return the result\r\n if not is_root:\r\n return frag\r\n else:\r\n # Determine how to return\r\n doc.appendChild(frag) \r\n if output == None:\r\n # Default returns XML as an objectified gnosis object\r\n return gnosis.xml.objectify.XML_Objectify(doc, parser='DOM').make_instance()\r\n elif output == 'str':\r\n # Return as string\r\n buf = StringIO()\r\n if indent:\r\n domlette.PrettyPrint(doc, stream=buf, encoding='us-ascii')\r\n else:\r\n domlette.Print(doc, stream=buf)\r\n xml_string = buf.getvalue()\r\n buf.close() \r\n return xml_string\r\n elif output == 'dom':\r\n # Return as a XML Document (Raw DOM)\r\n return doc\r\n else:\r\n f = open(output, 'w')\r\n if indent:\r\n domlette.PrettyPrint(doc, stream=f)\r\n else:\r\n domlette.Print(doc, stream=f)\r\n f.close()\r\n return True", "def toXMLString(self):\n return _libsbml.XMLNode_toXMLString(self)", "def toxml(self) -> ET.Element:\n # Dummy element that ElementTree extend() will strip\n root = ET.Element('root')\n\n connection = ET.SubElement(root, 'Connection')\n\n origin = ET.SubElement(connection, 'Origin')\n origin.set('ToolID', self.origin_tool.tool_id)\n origin.set('Connection', self.origin_output)\n\n destination = ET.SubElement(connection, 'Destination')\n destination.set('ToolID', self.destination_tool.tool_id)\n destination.set('Connection', self.destination_input)\n\n return root", "def to_xml(self, data, options=None):\r\n options = options or {}\r\n\r\n if lxml is None:\r\n raise UnsupportedSerializationFormat(\"Usage of the XML aspects requires lxml.\")\r\n\r\n return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')", "def getrootelement(self,filename):\n parser = etree.XMLPullParser(strip_cdata=False)\n root = etree.parse(filename,parser)\n return root", "def xml(self):\n xml = []\n xml.append('<?xml version=\"1.0\" encoding=\"%s\"?>' % XML_ENCODING.get(self.encoding, self.encoding))\n xml.append(\"<%s>\" % XML_TEXT)\n xml.extend([sentence.xml for sentence in self])\n xml.append(\"</%s>\" % XML_TEXT)\n return \"\\n\".join(xml)", "def xml_prettify(elem):\n\n\tfrom xml.etree import ElementTree\n\tfrom xml.dom import minidom\n\n\trough_string = ElementTree.tostring(elem, 'utf-8')\n\treparsed = minidom.parseString(rough_string)\n\treturn reparsed.toprettyxml(indent=\" \")", "def toXML(self):\n return self._xmlpre+\"\\n\".join(map(lambda f:f.toXML(),self._items))+self._xmlpost", "def to_xml(self, scene_dir: str) -> Tuple[Et.Element, bool]:\n raise NotImplementedError", "def get_translated_xml_string(self):\n return etree.tostring(self._translated_xml_tree)", "def prettify(root):\n rough_string = ET.tostring(root, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")", "def example_xml(example_xml_file):\n return etree.fromstring(example_xml_file.encode('utf-8'))", "def get_projects_xml(indir=\"../projects\"):\n return to_xml(get_projects(indir))", "def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)", "def encode(self):\n\n return lxml.etree.tostring(self.get_content(),\n pretty_print = True,\n encoding = self.encoding,\n xml_declaration = True)", "def pretty_xml(xml):\n try:\n #parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.fromstring(xml.encode('utf-8'))\n return etree.tostring(tree, pretty_print=True).decode()\n except:\n print(f\"ERROR pretty_xml() xml:{xml}\")\n raise", "def load_xml_etree_cElementTree(finder, module):\n finder.IncludeModule(\"xml.etree.ElementTree\")", "def doc(self):\n\n if not hasattr(self, \"_doc\"):\n self.loader.cdr_cursor.execute(self.SELECT_XML, self.id)\n xml = self.loader.cdr_cursor.fetchone().xml\n self._doc = etree.fromstring(xml.encode(\"utf-8\"))\n return self._doc", "def pprintXml(et):\n \n return tostring(et, pretty_print=True)", "def read_xml_file(filename):\n###############################################################################\n with __FILE_OPEN(filename) as file_:\n tree = ET.parse(file_)\n root = tree.getroot()\n # End with\n return tree, root", "def to_etree(self):\n\n # Top-level root block\n attr = {\n \"name\": self.name,\n \"instance\": self.instance,\n }\n\n if self.arch_id is not None:\n attr[\"architecture_id\"] = self.arch_id\n if self.netlist_id is not None:\n attr[\"atom_netlist_id\"] = self.netlist_id\n\n root = ET.Element(\"block\", attr)\n\n # Top-level ports\n for tag in [\"inputs\", \"outputs\", \"clocks\"]:\n xml_ports = ET.Element(tag)\n xml_ports.text = \" \".join(self.ports[tag])\n root.append(xml_ports)\n\n # CLB blocks\n keys = self.blocks.keys()\n for key in keys:\n xml_block = self.blocks[key].to_etree()\n root.append(xml_block)\n\n return root", "def gen_tree(path):\n # print(\"CALLING.. Tree\")\n parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.parse(path, parser)\n root = tree.getroot() \n return root, tree", "def get_etree_root(doc, encoding=None):\n tree = get_etree(doc, encoding)\n root = tree.getroot()\n\n return root", "def tostring(self):\n header, body = None, None\n\n if self._header:\n header = b'<?xml version=\"1.0\" encoding=\"utf-8\"?>'\n\n if self._main() is not None:\n body = ET.tostring(self._main(), 'utf-8')\n\n return (header if header else b'') + (body if body else b'')", "def toString(doc):\n return doc.toxml()", "def _get_ome_xml_root(self) -> ElementTree:\n ome_metadata_element = ElementTree.fromstring(self._ome_metadata)\n tree = ElementTree.ElementTree(ome_metadata_element)\n return tree.getroot()", "def to_etree(self):\n\n # Format connections\n text = []\n for i in range(self.width):\n if i in self.connections:\n text.append(str(self.connections[i]))\n else:\n text.append(\"open\")\n\n elem = ET.Element(\"port\", attrib={\"name\": self.name})\n elem.text = \" \".join(text)\n return elem", "def storeAndReturnXML(self):\n self._storeItems()\n return self.toXML()", "def prettify(elem):\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n #return reparsed.toprettyxml(indent=\" \")\n return reparsed.toxml()", "def prettify(self):\n reparsed = minidom.parseString(self.tostring())\n return reparsed.toprettyxml(indent=' ', encoding='utf-8')", "def prettify(elem): # Cf. https://stackoverflow.com/questions/17402323/use-xml-etree-elementtree-to-print-nicely-formatted-xml-files\n rough_string = ET.tostring(elem, 'utf-8')\n reparsed = xml.dom.minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\"\\t\")", "def _toxml_rec(self, root, obj=None, ns_cur=None):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n try:\n\n if (obj == None):\n obj = self._client.factory.create(root)\n\n ns = '{%s}' % self._get_element_ns(obj.__class__.__name__)\n if (ns != '{None}' and ns != ns_cur):\n doc = Element(ns + root)\n else:\n doc = Element(root)\n ns = ns_cur\n\n for key in obj.__keylist__:\n subelem = obj[key]\n\n if (subelem == None):\n SubElement(doc, key).text = '?'\n elif (subelem == [] or '[]' in subelem.__str__()):\n inner_doc = self._toxml_rec(key, None, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n else:\n el_type = self._get_element_type(\n subelem.__class__.__name__)\n if (el_type == 'Simple'):\n SubElement(doc, key).text = '?'\n elif (el_type == 'Complex'):\n inner_doc = self._toxml_rec(key, subelem, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n\n return doc\n\n except TypeNotFound:\n return None", "def prettify(self, elem):\n try:\n rough_string = ET.tostring(elem, 'utf8')\n except Exception:\n print(elem)\n root = etree.fromstring(rough_string)\n return etree.tostring(root, pretty_print=True)", "def prettify(self):\n re_parsed = minidom.parseString(tostring(self.dom))\n return re_parsed.toprettyxml()", "def _build_tree(self, markup):\n clean_markup = tidy_document(markup,\n options={'numeric-entities':1,\n 'output-xml':1,\n 'output-encoding':'utf8'})[0]\n # Small fix for a cornercase involving invalid characters...\n clean_markup = clean_markup.replace('\\x15', '_')\n etree = self._fix_treetags(ET.fromstring(clean_markup))\n return etree", "def getXml(self):\n return _SALOMERuntime.InputXmlPort_getXml(self)", "def importXml ( r ):\n rawText = r.read ()\n rawText = rawText.strip ()\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, '', rawText )\n xml = ET.fromstring ( text )\n assert str ( type ( xml ) ) == \"<type 'instance'>\"\n return xml", "def xml_to_string(xml_tag):\n return parseString(ET.tostring(xml_tag)).toprettyxml(indent=\" \")", "def get_query_xml(query):\n root = get_query_etree(query)\n\n # can get actual field types by inspecting cursor.description,\n # but these (I think) depend on the database back-end\n # so I decided it was too hard to be smart here,\n # and instead just say that everything is \"CDATA #REQURED\"...\n attlist = '\\n'.join(' {col} CDATA #REQUIRED'.format(col=col) for col in query.cols)\n\n # generating dtd means we can automatically validate output, for testing\n dtd = '<!DOCTYPE {root} [\\n' \\\n ' <!ELEMENT {root} ({row})*>\\n' \\\n ' <!ELEMENT {row} EMPTY>\\n' \\\n ' <!ATTLIST {row}\\n{attlist}\\n' \\\n ' >\\n' \\\n ']>'.format(root=query.root, row=query.row, attlist=attlist)\n\n # DEFAULT_CHARSET is used by HttpResponse, so make etree use it too.\n # Output may become invalid if DEFAULT_CHARSET cannot be used to encode field names!\n # e.g. MySQL columns may include characters from Unicode Basic Multiingual Plane,\n # which could be inexpressible if DEFAULT_CHARSET were ascii, giving invalid xml.\n return etree.tostring(\n root,\n pretty_print = False,\n encoding = settings.DEFAULT_CHARSET,\n xml_declaration = True,\n doctype = dtd,\n )", "def prettifyXml(elem):\n uglyStr = ET.tostring(elem, 'utf-8')\n reparsedStr = xml.dom.minidom.parseString(uglyStr)\n return reparsedStr.toprettyxml(indent=\" \", encoding=\"utf-8\")", "def toXML(self):\n return _libsbml.Layout_toXML(self)", "def LoadXML(NAME):\r\n # Basics snaged from https://docs.python.org/2/library/xml.etree.elementtree.html\r\n Tree = parse(NAME) # opens and turns the xml file into a tree\r\n Root = Tree.getroot()\r\n return(Root)", "def _api_call(url: str) -> ET.Element:\n result = requests.get(url)\n if result.status_code != 200:\n raise RequestException(f\"API status code {result.status_code} for URL: {url}\")\n\n # Remove HTML line breaks (which cause confusion in the XML parsing)\n t: str = re.sub(r\"\\s*(<br/>)+\\s*\", r\" \", result.text)\n\n x_tree = ET.fromstring(t)\n return x_tree", "def get_obj(self):\n assert self._obj is not None, \"Object XML tree has not been generated yet!\"\n return self._obj", "def get_inner_html_as_xml(self, selector, outer_node=None):\n from lxml import html\n\n if outer_node:\n node_open = '<{}>'.format(outer_node)\n node_close = '</{}>'.format(outer_node)\n else:\n node_open = node_close = ''\n return html.fromstring(node_open + self.get_inner_html(selector) + node_close)", "def xpath_as_xml(self, expr=''):\n results = []\n for result in self.xpath(expr):\n if result:\n results.append(result.toxml())\n \n return results", "def readDoc(self, filename):\n try:\n doc = ET.parse( filename, parser=LineNumberingParser() )\n except self.ET_exc_class:\n raise XmlError( str(sys.exc_info()[1]) )\n\n rootnode = recurse_construct_ET_to_XmlNode( None, doc.getroot() )\n\n return rootnode", "def prettyxml(pyxb_xml, validate=True, ns=None, xslt=None):\n\n def indent(depth, prev):\n return ('\\n' + (' ' * 4 * depth)) if prev != 'd' else \"\"\n \n def args(alist):\n if len(alist):\n return ' ' + ' '.join([k+'=\"'+v+'\"' for (k,v) in alist.items()])\n return ''\n\n pyxb.defaultNamespace = ns\n pyxb.utils.domutils.BindingDOMSupport.SetDefaultNamespace(ns if ns else defaultNS)\n pyxb.RequireValidWhenGenerating(validate)\n rawxml = pyxb_xml if isinstance(pyxb_xml, basestring) else pyxb_xml.toxml()\n\n seq = []\n p = xml.parsers.expat.ParserCreate('UTF-8')\n p.StartElementHandler = lambda name, attrs: seq.append( ('s', name, attrs))\n p.EndElementHandler = lambda name: seq.append( ('e', name ))\n p.CharacterDataHandler = lambda data: seq.append( ('d', data ))\n p.Parse(rawxml.encode('UTF-8'),1)\n\n depth, prev, rval = (0, 'e', '<?xml version=\"1.0\" encoding=\"UTF-8\"?>' + (xslt if xslt else \"\"))\n dataText = ''\n while len(seq):\n e = seq.pop(0)\n if e[0] == 's':\n # shortcut the simple start end tag\n if seq[0][0] == 'e':\n assert e[1] == seq[0][1]\n seq.pop(0)\n rval += indent(depth, prev)\n rval += '<' + e[1] + args(e[2]) + '/>'\n depth -= 1\n else:\n if prev != 'e':\n depth += 1\n if dataText:\n rval += dataText\n dataText = ''\n rval += indent(depth, prev)\n rval += '<' + e[1] + args(e[2]) + '>'\n elif e[0] == 'e':\n if prev != 'd':\n depth -= 1\n else:\n rval += escape(unescape(dataText))\n dataText = ''\n rval += indent(depth, prev)\n rval += '</' + e[1] + '>'\n else:\n if prev != 'e' or e[0] <> 'd' or e[1].strip():\n dataText += e[1]\n if prev != 'e' or e[0] <> 'd' or e[1].strip():\n prev = e[0]\n return rval" ]
[ "0.7688683", "0.75327605", "0.7435341", "0.6999228", "0.6832689", "0.68084466", "0.67295486", "0.67295486", "0.67260617", "0.67260617", "0.66133803", "0.65811235", "0.65377057", "0.64042026", "0.63633126", "0.63447636", "0.6327719", "0.6314219", "0.6283882", "0.6245147", "0.622094", "0.6140487", "0.6096841", "0.60597086", "0.60542697", "0.60191584", "0.6007144", "0.5996614", "0.5991245", "0.5989244", "0.5976104", "0.5944021", "0.5898205", "0.58734334", "0.5863931", "0.585526", "0.58533627", "0.584904", "0.58452654", "0.5779001", "0.57755053", "0.5760941", "0.5758568", "0.5756785", "0.57556194", "0.5746608", "0.5738685", "0.5722508", "0.5714187", "0.57095987", "0.5693265", "0.5679834", "0.56669617", "0.5665034", "0.56632", "0.5650392", "0.56347567", "0.5614632", "0.5610198", "0.5608474", "0.55949473", "0.55798703", "0.55615914", "0.5554597", "0.5549774", "0.553723", "0.55088985", "0.55003405", "0.5497081", "0.5490974", "0.5478579", "0.5474622", "0.54574007", "0.5454044", "0.54523945", "0.54415756", "0.5427884", "0.5420693", "0.5414727", "0.5411115", "0.54003245", "0.5396622", "0.53946424", "0.5385786", "0.5351626", "0.53497684", "0.53476083", "0.533494", "0.532669", "0.5277137", "0.5269059", "0.52624756", "0.52505225", "0.5239207", "0.52167505", "0.52154285", "0.5206143", "0.5205603", "0.5201723", "0.51950395" ]
0.58009404
39
This will create persuasions in bulk and will push it to ES API /persuasion/bulk/create [ {
def bulk_create(): logger.info("Creating persuasions in bulk") try: request_data = json.loads(request.data) with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor: {executor.submit(PersuasionServices.create, data): data for data in request_data} return jsonify( dict(status="success", message="Your request is in the queue, persuasion will create shortly")) except Exception as e: logger.error("Exception while creating persuasions in bulk - " + repr(e)) return jsonify(dict(status="failure", error=repr(e)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle(self, *args, **options):\n self.create_indices()\n self.bulk()", "def insert_to_elastic(elastic, paper_authors, papers, authors, index_name):\n helpers.bulk(elastic, merge_to_elastic(paper_authors, papers, authors, index_name))", "def bulk_refresh():\n logger.info(\"Refreshing/Updating persuasions in bulk\")\n try:\n request_data = json.loads(request.data)\n args = request.args\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.refresh, data, args): data for data in request_data}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will be updated shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def upload_entities(self, batch):\n # TODO Include a Do Not Overwrite call\n results = None\n atlas_endpoint = self.endpoint_url + \"/entity/bulk\"\n\n payload = AtlasClient._prepare_entity_upload(batch)\n\n postBulkEntities = requests.post(\n atlas_endpoint,\n json=payload,\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(postBulkEntities)\n\n return results", "def create_doc_bulk(self, file: str, index: str) -> None:\n\n with open(file, \"r\") as f:\n bulk(self.es, self.make_documents(f, index))", "def bulk_create(cls, cb, policy_ids):\n request = [{\"policy_id\": s} for s in policy_ids]\n url = cls.urlobject.format(cb.credentials.org_key) + \"/_bulk\"\n resp = cb.post_object(url, body=request)\n result = resp.json()\n item_list = result.get(\"results\", [])\n return [cls(cb, item[\"id\"], item) for item in item_list]", "def bulk_push_to_elastic(elastic_search_url, index, docs):\n CREATE_TEMPLATE = {\"create\": {\"_index\": index, \"_type\": \"_doc\", \"_id\": \"\"}}\n\n bulk_request_body = \"\"\n for doc in docs:\n CREATE_TEMPLATE[\"create\"][\"_id\"] = doc[HASH_ID]\n bulk_request_body += json.dumps(CREATE_TEMPLATE) + NEW_LINE\n bulk_request_body += json.dumps(doc) + NEW_LINE\n\n # Request\n headers = {\"content-type\": \"application/x-ndjson\"}\n\n url = elastic_search_url + \"/\" + \"_bulk\"\n\n response = requests.post(url, data=bulk_request_body, headers=headers)\n return response", "def create_instance_bulk(self, context, tenant_id, neutron_ports, vms,\n port_profiles, sync=False):", "def bulk(self) -> None:\n helpers.bulk(self.client, self.gen_business_data(BUSINESS_FP))\n helpers.bulk(self.client, self.gen_review_data(REVIEW_FP))\n helpers.bulk(self.client, self.gen_tip_data(TIP_FP))", "def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT):\n posts = []\n\n for i in range(num):\n posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility))\n\n return posts", "def post_bulk(bulk_json):\n\n nbtry=0\n success=False\n\n # Bulk insert\n ####################################################################\n cnx.request(\"POST\",config.index+\"/\"+config.typ+\"/_bulk\",bulk_json) #\n ####################################################################\n\n # Get and read response from Elastic Search server\n resp=cnx.getresponse()\n resp_msg_json= resp.read()\n #sys.stderr.write( resp_msg_json + \"\\n\")\n resp_msg=json.loads(resp_msg_json)\n # Check status: both HTTP and within the Elastic Search answer\n if resp.status != httplib.OK or resp_msg[\"errors\"] is True:\n sys.stderr.write( bulk_json)\n sys.stderr.write( resp_msg_json +\"\\n\")\n raise Exception(\"ERROR when bulk loading into %s/%s: %d %s\\n\" % (config.index,config.typ, resp.status, resp.reason))", "def do_bulk(self, args):\n pass", "def schedule_refresh():\n logger.info(\"Refreshing/Updating persuasions in bulk\")\n try:\n args = request.args\n if not args.get(\"type\") or not args.get(\"sub_type\"):\n return jsonify(\n dict(status=\"failure\", message=\"Invalid type/sub_type\"))\n\n persuasions_request = PersuasionServices.get_persuasions_request_from_es(args)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.refresh, data, args): data for data in persuasions_request}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will be updated shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "async def insert_many(self, models):\n\n pass", "def bulk_insert(engine, model, entries):\n with session_scope(engine) as session:\n session.bulk_insert_mappings(model, entries)\n session.commit()", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def create_entities(self, entity_type):\n data = self.read_file(entity_type)\n base_url = data['url']\n for entity in data['entities']:\n url = base_url + entity['url']\n for data in entity['entities']:\n r = requests.post(url, json.dumps(data))\n print(r.text)", "def bulk_insert(objects, model, session):\n session.bulk_insert_mappings(model, objects)", "def bulk_create(cls, cb, approvals):\n url = cls.urlobject.format(cb.credentials.org_key) + \"/_bulk\"\n resp = cb.post_object(url, body=approvals)\n result = resp.json()\n item_list = result.get(\"results\", [])\n return [cls(cb, item[\"id\"], item) for item in item_list]", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_create_action(self):\n pass", "def bulk_create_samples(request):\n sample_names = request.data['names']\n library_uuid = request.data['library']\n library = SampleGroup.objects.get(pk=library_uuid)\n try:\n membership_queryset = request.user.organization_set.filter(pk=library.organization.pk)\n authorized = membership_queryset.exists()\n except AttributeError: # occurs if user is not logged in\n authorized = False\n if not authorized:\n raise PermissionDenied(_('Insufficient permissions to get group manifest.'))\n uuids = []\n for name in sample_names:\n sample = library.create_sample(name=name)\n uuids.append(sample.uuid)\n return Response({'uuids': uuids}, status=201)", "def store_index(self, index, doc_type, source_list, init_id):\n\n bulk_actions = []\n doc_id = init_id\n\n for source in source_list:\n data_body = ElasticSearchUtility.__index_data_body(index, doc_type, doc_id, source[\"_source\"])\n bulk_actions.append(data_body)\n doc_id += 1\n\n print 'inserting - ', len(bulk_actions)\n helpers.bulk(self.es, bulk_actions)", "def insert_mass_data(topics=100, posts=100):\n user1 = User.query.filter_by(id=1).first()\n user2 = User.query.filter_by(id=2).first()\n forum = Forum.query.filter_by(id=1).first()\n\n created_posts = 0\n created_topics = 0\n\n if not (user1 or user2 or forum):\n return False\n\n # create 1000 topics\n for i in range(1, topics + 1):\n\n # create a topic\n topic = Topic()\n post = Post()\n\n topic.title = \"Test Title %s\" % i\n post.content = \"Test Content\"\n topic.save(post=post, user=user1, forum=forum)\n created_topics += 1\n\n # create 100 posts in each topic\n for j in range(1, posts + 1):\n post = Post()\n post.content = \"Test Post\"\n post.save(user=user2, topic=topic)\n created_posts += 1\n\n return created_topics, created_posts", "def test_create_bulk_academic(self):\n pass", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)", "def createDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # this is create method, no update allowed\n if \"_rev\" in document: del document[\"_rev\"]\n if \"_deleted\" in document: del document[\"_deleted\"]\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def bulk_create(cls, raw_list):\n\t\tresource_list = [cls(**item) for item in raw_list]\n\t\tdb.session.add_all(resource_list)\n\t\tdb.session.commit()\n\n\t\treturn resource_list", "async def create_many(\n self,\n documents: List[Dict[str, Any]],\n *,\n unique_key: Optional[str] = None,\n unique_filter: Optional[Dict[str, Any]] = None,\n session: Optional[Any] = None,\n **kwargs: Any,\n ) -> InsertManyResult:\n return await self._database.create_many(\n self.name,\n documents=documents,\n unique_key=unique_key,\n unique_filter=unique_filter,\n session=session,\n **kwargs,\n )", "async def bulk_insert(self, documents, alias=None):\n\n is_valid = True\n docs_to_insert = []\n\n for document_index, document in enumerate(documents):\n self.update_field_on_save_values(document, document._id is not None)\n try:\n is_valid = is_valid and self.validate_document(document)\n except Exception:\n err = sys.exc_info()[1]\n raise ValueError(\n \"Validation for document %d in the documents you are saving failed with: %s\"\n % (document_index, str(err))\n )\n\n if not is_valid:\n return\n\n docs_to_insert.append(document.to_son())\n\n if not is_valid:\n return\n\n doc_ids = await self.coll(alias).insert(docs_to_insert)\n\n for object_index, object_id in enumerate(doc_ids):\n documents[object_index]._id = object_id\n\n return documents", "def batch(self, request):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s/batch\" % self.url_index_name, self.client.timeout, request)", "def bulkSave(self, objList: List[PermissionContext], tokenData: TokenData):", "def insertmany(cls, *args):\n return InsertQuery(cls).bulk(True).set(*args)", "def upload_entities(self, entity_upload_parameters, progress=None):\n \n tmp_file = path.join(self.working_directory, '{0}.csv'.format(uuid.uuid1()))\n with BulkFileWriter(tmp_file) as writer:\n for entity in entity_upload_parameters.entities:\n writer.write_entity(entity)\n\n if (self.need_to_try_upload_entity_records_sync_first(entity_upload_parameters)):\n return self.bulkupload_entitie_records(entity_upload_parameters, tmp_file, progress)\n else:\n return self.bulkupload_entities(entity_upload_parameters, tmp_file, progress)", "def bulk_index(data):\n\n def bulk_api_string(item):\n return f\"{{\\\"index\\\":{{}}\\n{json.dumps(item)}\"\n\n body = '\\n'.join([bulk_api_string(item) for item in data]) + '\\n'\n\n return make_request(\n requests.post,\n url=f\"{connection.hostname}:{connection.port}/{connection.index}/_bulk\",\n headers={'Content-Type': 'application/json'},\n auth=auth,\n data=body\n )", "def save_posts(self, posts):\n return self.collection.insert_many(map(lambda post: post.serialize(), posts))", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def upload(self, documents: List[ElasticDocument], vectorise_func, index: str = None) -> None:\n if not index:\n index = self._index\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n for batch in batches:\n payload = []\n # Calculate vectors\n vectorise_func(batch, self)\n\n for document in batch:\n # JSON representation of document\n doc_json = document.to_elastic()\n\n # Add correct index\n doc_json[\"_index\"] = index\n\n # Rename id key\n doc_json[\"_id\"] = doc_json[\"id\"]\n del doc_json[\"id\"]\n\n payload.append(doc_json)\n\n # Bulk upload to elasticsearch\n helpers.bulk(self._client, payload)\n\n # Update index\n self._client.indices.refresh(index=self._index)", "def upload_pon(pon_df, pon_name, namespace, workspace):\n os.system('mkdir -p PoNs')\n filename = './PoNs/fc_upload_PoN_%s.txt' % (pon_name)\n pon_df.to_csv(filename, '\\t', index=False)\n res = upload_entities_from_tsv(namespace, workspace, 'PoNs/fc_upload_PoN_%s.txt'%pon_name)\n return res", "def create_multiple(\n self,\n service_name,\n entries=None,\n additional_data=None\n ):\n return self.page(\n service_name=service_name,\n function=CreateMultiple,\n entries=entries,\n additional_data=additional_data,\n )", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def finish_documents():\n\n doc_ids = json.loads(request.form['doc_ids'])\n\n for docid in doc_ids:\n\n document = Document.query.filter_by(id=docid).first_or_404()\n\n document.status = \"OK\"\n\n db.session.add(document)\n\n db.session.commit()", "def batch(self, requests):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"POST\", \"/1/indexes/*/batch\", self.timeout, {\"requests\": requests})", "def create_indices():\n destroy_indices()\n\n ActionDocument._index.create(ignore=[400, 404])\n ClassificationDocument._index.create(ignore=[400, 404])\n FunctionDocument._index.create(ignore=[400, 404])\n PhaseDocument._index.create(ignore=[400, 404])\n RecordDocument._index.create(ignore=[400, 404])\n\n yield\n\n destroy_indices()", "def bulk_insert(cls, path=\"data.json\"):\n from json import load\n from codecs import open\n \n lists = load(open(path, \"r\", \"utf8\"))\n for lst in lists:\n ing = cls(content = lst)\n ing.put()", "def processed_bulk(self, pipeline):\n docs = [Document([], text=t) for t in EN_DOCS]\n return pipeline(docs)", "def create_indices(self) -> None:\n self.client.indices.create(\n index=\"business\",\n body=BUSINESS_MAPPINGS\n )\n self.client.indices.create(\n index=\"review\",\n body=REVIEW_MAPPINGS\n )\n self.client.indices.create(\n index=\"tip\",\n body=TIP_MAPPINGS\n )", "def process_es_bulk(pub_list, es):\n bulk_response = es.bulk(\n body=''.join(pub_list),\n refresh='wait_for',\n request_timeout=3600,\n )\n if bulk_response.get('errors'):\n logger.error('failed on bulk indexing:\\n%s',\n bulk_response)\n raise IndexingErrorException()\n return len(pub_list)", "def write(cls, templates, values, *args):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n Product = Pool().get('product.product')\n\n rv = super(Template, cls).write(templates, values, *args)\n\n products = []\n for template in templates:\n products.extend([Product(p) for p in template.products])\n IndexBacklog.create_from_records(products)\n return rv", "def bulk_entries_create(self, feed_id: int, parsed_entries: List) -> None:\n entries_to_create = []\n for entry in parsed_entries:\n entries_to_create.append(\n Item(\n feed_id=feed_id,\n title=entry[\"title\"],\n link=entry[\"link\"],\n description=entry[\"summary\"],\n published_at=datetime.fromtimestamp(\n mktime(entry[\"published_parsed\"])\n ),\n )\n )\n self.bulk_create(entries_to_create)", "def push_bulk(self, obj_list, doc_type=None, refresh=True):\n assert isinstance(obj_list, collections.Sequence)\n assert len(obj_list) > 0\n\n es_obj_list = []\n for obj in obj_list:\n if obj is None:\n logger.warning(\"None object in input list\")\n continue\n\n doc_type, es_repr = self._validate_doc_and_get_type_and_repr(obj, doc_type)\n metadata = {\n '_op_type': 'index',\n \"_index\": self.index_name,\n \"_type\": doc_type,\n }\n es_repr.update(**metadata)\n\n es_obj_list.append(es_repr)\n\n helpers.bulk(client=self.conn.elastic_search_client, actions=es_obj_list,\n stats_only=True, refresh=u'true' if refresh else u'false')", "def create(self, dictionaries):\n \n return self.ep.post(self.endpoint, params=dictionaries)", "def import_documents(self, index, documents, **kwargs):\n self._logger.info('%s documents to index into %s', len(documents), index)\n response = None\n if 'pipeline' in kwargs:\n pipeline_name = kwargs.get(\"pipeline\")\n response = helpers.bulk(self.es, documents, index=index, doc_type=self.doc_type, pipeline=pipeline_name)\n else:\n response = helpers.bulk(self.es, documents, index=index, doc_type=self.doc_type)\n\n # It returns a tuple with summary information - \n # number of successfully executed actions and either list of errors or number of errors if stats_only is set to True.\n return response", "def to_db(self):\n bulk = conn_db().initialize_ordered_bulk_op()\n for fiction in self.fictions:\n bulk.find({'id': fiction.id}).upsert().update({'$set': fiction.__dict__})\n bulk.execute()", "def create_multiple_people(sqla, n):\n person_schema = PersonSchema()\n new_people = []\n for i in range(n):\n valid_person = person_schema.load(person_object_factory())\n new_people.append(Person(**valid_person))\n sqla.add_all(new_people)\n sqla.commit()", "def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")", "def build_create_or_update_entities_request(\n *,\n json: Any = None,\n content: Any = None,\n **kwargs: Any\n) -> HttpRequest:\n content_type = kwargs.pop(\"content_type\", None)\n accept = \"application/json\"\n\n # Construct URL\n url = kwargs.pop(\"template_url\", '/atlas/v2/entity/bulk')\n\n # Construct headers\n header_parameters = kwargs.pop(\"headers\", {}) # type: Dict[str, Any]\n if content_type is not None:\n header_parameters['Content-Type'] = _SERIALIZER.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = _SERIALIZER.header(\"accept\", accept, 'str')\n\n return HttpRequest(\n method=\"POST\",\n url=url,\n headers=header_parameters,\n json=json,\n content=content,\n **kwargs\n )", "def create(cls, vlist):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n Product = Pool().get('product.product')\n\n templates = super(Template, cls).create(vlist)\n products = []\n for template in templates:\n products.extend([Product(p) for p in template.products])\n IndexBacklog.create_from_records(products)\n return templates", "def set_many(self, mapping, timeout=None):\n values = [self._get_doc(key, value, timeout) for key, value in mapping.iteritems()]\n self.collection.insert_many(values)\n return True", "def save_posts(self):\n logger.info(\"Savings posts to database\")\n records = self.df.to_dict(\"records\")\n\n for record in records:\n Company.objects.get_or_create(name=record[\"company\"])\n\n Post.objects.get_or_create(\n title=record[\"title\"],\n company_id=record[\"company\"],\n defaults={\n \"date_posted\": record[\"date_posted\"],\n \"description\": record[\"description\"],\n \"location\": record[\"location\"],\n \"is_sponsored\": False,\n \"date_added_db\": record[\"date_added_db\"],\n \"source_id\": record[\"source\"],\n \"link\": record[\"link\"],\n },\n )", "def post_document_bulk_annotation(document_id: int, annotation_list, session=konfuzio_session()):\n url = post_project_api_document_annotations_url(document_id)\n r = session.post(url, json=annotation_list)\n r.raise_for_status()\n return r", "def test_get_people_list(self):\n person_1 = Person(\n first_name='Emilia',\n last_name='Clarke',\n aliases='Emi'\n )\n person_2 = Person(\n first_name='Peter',\n last_name='Dinklage',\n )\n person_3 = Person(\n first_name='Thomas',\n last_name='McCarthy',\n aliases='Thom'\n )\n\n Person.objects.bulk_create([person_1, person_2, person_3])\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('count'), Person.objects.count())", "def populate(self):\n\n self.create_index()\n self.check_type()\n self.create_mapping()\n\n f = open(self.csv_file, 'rU')\n\n # Read the first line for all the headers\n headers = f.readline().split(',')\n\n # Read the rest of the document\n rows = f.readlines()\n added_counter = 0\n\n actions = []\n for row in rows:\n fields = row.split(',')\n obj = {}\n for header in headers:\n # we call lower-case here because we were originally using\n # analyzed strings in elasticsearch (and they were\n # automatically converted). Code was built based on that so it's\n # easiest to convert for now\n try:\n obj[header.replace('\\n', '')] = float(fields[\n headers.index(header)].replace('\\n', '').lower())\n except ValueError:\n obj[header.replace('\\n', '')] = fields[\n headers.index(header)].replace('\\n', '').lower()\n # check afterwards to replace empty strings with None (which json.dumps hopefully writes to null)\n if obj[header.replace('\\n', '')] == '':\n obj[header.replace('\\n', '')] = None\n try:\n item = {\n '_index': self.es_main_index,\n '_type': self.es_main_type,\n '_source': obj\n }\n\n actions.append(item)\n\n added_counter += 1\n print('%s new records added' % added_counter,\n end='\\r')\n sys.stdout.flush()\n\n if added_counter % self.chunk_size == 0:\n helpers.bulk(self.es, actions)\n actions = []\n\n except ConnectionError:\n print('There was a connection error. Check your Elastic' +\n ' Search setting and make sure Elastic Search is ' +\n 'running.')\n return False\n\n # add the remaining items\n if actions:\n helpers.bulk(self.es, actions)\n\n print('The update is completed. %s new records were added.' %\n added_counter)", "def insert(self, index, documents, batch_size=100):\n actions = []\n latest_index_id, begin_timestamp = self.__get_latest_index(index)\n\n for idx, doc in enumerate(documents):\n index_id = latest_index_id\n\n if doc[\"request_time\"] <= begin_timestamp:\n index_id = self.get_query_index(index, doc[\"request_time\"])\n\n action = {\n \"_index\": index + \"_\" + str(index_id),\n \"_type\": \"docs\",\n \"_source\": doc,\n }\n actions.append(action)\n\n if len(actions) == batch_size or idx == len(documents) - 1:\n print(\"Bulk ingesting started...\")\n\n try:\n bulk(self.client, actions, raise_on_error=True, request_timeout=200)\n except:\n print(\"Could not write the data.\")\n raise\n \n actions.clear()\n print(\"Bulk ingesting done\")\n if self.__get_index_size(index, latest_index_id) >= self.THRESHOLD:\n begin_timestamp = self.__update_index_timerange(\n index, latest_index_id\n )\n latest_index_id = self.__create_new_index(\n index, latest_index_id + 1, begin_timestamp\n )", "def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)", "def store_documents(self, documents: list):\n results = app.Results()\n entries = [\n { \n 'Id': str(uuid1()),\n 'MessageBody': json.dumps(doc)\n }\n for doc in documents\n ]\n ids = [ e['Id'] for e in entries ]\n self.Logger.info(f'Store {ids} in sqs')\n self.Logger.debug(f'Saving {entries} in sqs {self.sqs_queue_url}')\n self.sqs_client.send_message_batch(\n QueueUrl=self.sqs_queue_url,\n Entries=entries\n )\n results.ActionStatus = 0\n results.Results = ids\n return results", "def bulk_upsert(self, data, parameters=None, many_to_many_clear: bool = True):\n elements = []\n\n # Skip if no identifier is set\n if not self.unique_identifier or not data:\n return\n\n for element in data:\n if isinstance(parameters, dict):\n element.update(parameters)\n\n element, created = self.upsert(data=element, many_to_many_clear=many_to_many_clear)\n elements.append(element)\n\n return elements", "def publish_impl(self) -> None:\n\n LOGGER.warn('ElasticsearchPublisher is being deprecated in favor of using SearchMetadatatoElasticasearchTask\\\n which publishes ES metadata with mappings compatible with amundsensearch >= 4.0.0')\n\n actions = [json.loads(line) for line in self.file_handler.readlines()]\n # ensure new data exists\n if not actions:\n LOGGER.warning(\"received no data to upload to Elasticsearch!\")\n return\n\n # Convert object to json for elasticsearch bulk upload\n # Bulk load JSON format is defined here:\n # https://www.elastic.co/guide/en/elasticsearch/reference/6.2/docs-bulk.html\n bulk_actions = []\n cnt = 0\n\n # create new index with mapping\n self.elasticsearch_client.indices.create(index=self.elasticsearch_new_index, body=self.elasticsearch_mapping)\n\n for action in actions:\n index_row = dict(index=dict(_index=self.elasticsearch_new_index))\n action['resource_type'] = self.elasticsearch_type\n\n bulk_actions.append(index_row)\n bulk_actions.append(action)\n cnt += 1\n if cnt == self.elasticsearch_batch_size:\n self.elasticsearch_client.bulk(bulk_actions)\n LOGGER.info('Publish %i of records to ES', cnt)\n cnt = 0\n bulk_actions = []\n\n # Do the final bulk actions\n if bulk_actions:\n self.elasticsearch_client.bulk(bulk_actions)\n\n # fetch indices that have {elasticsearch_alias} as alias\n elasticsearch_old_indices = self._fetch_old_index()\n\n # update alias to point to the new index\n actions = [{\"add\": {\"index\": self.elasticsearch_new_index, \"alias\": self.elasticsearch_alias}}]\n\n # delete old indices\n delete_actions = [{\"remove_index\": {\"index\": index}} for index in elasticsearch_old_indices]\n actions.extend(delete_actions)\n\n update_action = {\"actions\": actions}\n\n # perform alias update and index delete in single atomic operation\n self.elasticsearch_client.indices.update_aliases(update_action)", "def bulkCreateTasks(request, *args, **kwargs):\n import settings\n\n # keep track of our own timelimit (20 seconds)\n timelimit = 20000\n timekeeper = Timekeeper(timelimit)\n\n post_dict = request.POST\n\n bulk_create_key = post_dict.get('bulk_create_key')\n if not bulk_create_key:\n return error_handler.logErrorAndReturnOK(\n 'Not all POST data specified in: %s' % post_dict)\n\n bulk_data = bulk_create_model.GCIBulkCreateData.get(bulk_create_key)\n if not bulk_data:\n return error_handler.logErrorAndReturnOK(\n 'No valid data found for key: %s' % bulk_create_key)\n\n # note that we only query for the quota once\n org_admin = bulk_data.created_by\n task_quota = org_logic.getRemainingTaskQuota(org_admin.scope)\n\n tasks = bulk_data.tasks\n while len(tasks) > 0:\n try:\n # check if we have time\n timekeeper.ping()\n\n if settings.GCI_TASK_QUOTA_LIMIT_ENABLED and task_quota <= 0:\n return error_handler.logErrorAndReturnOK(\n 'Task quota reached for %s' %(org_admin.scope.name))\n\n # remove the first task\n task_as_string = tasks.pop(0)\n\n loaded_task = simplejson.loads(task_as_string)\n task = {}\n for key, value in loaded_task.iteritems():\n # If we don't do this python will complain about kwargs not being\n # strings when we try to save the new task.\n task[key.encode('UTF-8')] = value\n\n logging.info('Uncleaned task: %s' %task)\n # clean the data\n errors = _cleanTask(task, org_admin)\n\n if errors:\n logging.warning(\n 'Invalid task data uploaded, the following errors occurred: %s'\n %errors)\n bulk_data.errors.append(db.Text(\n 'The task in row %i contains the following errors.\\n %s' \\\n %(bulk_data.tasksRemoved(), '\\n'.join(errors))))\n\n # at-most-once semantics for creating tasks\n bulk_data.put()\n\n if errors:\n # do the next task\n continue\n\n # set other properties\n task['link_id'] = 't%i' % (int(time.time()*100))\n task['scope'] = org_admin.scope\n task['scope_path'] = org_admin.scope_path\n task['program'] = org_admin.program\n task['status'] = 'Unpublished'\n task['created_by'] = org_admin\n task['modified_by'] = org_admin\n\n # create the new task\n logging.info('Creating new task with fields: %s' %task)\n task_logic.updateOrCreateFromFields(task)\n task_quota = task_quota - 1\n except DeadlineExceededError:\n # time to bail out\n pass\n\n if len(tasks) == 0:\n # send out a message\n notifications.sendBulkCreationCompleted(bulk_data)\n bulk_data.delete()\n else:\n # there is still work to be done, do a non 500 response and requeue\n task_params = {\n 'bulk_create_key': bulk_data.key().id_or_name()\n }\n new_task = taskqueue.Task(params=task_params,\n url=BULK_CREATE_URL)\n # add to the gci queue\n new_task.add(queue_name='gci-update')\n\n # we're done here\n return http.HttpResponse('OK')", "def setup(session: 'Session') -> None:\n\n create_many(session, LEVEL_NAMES, commit=False)", "def multipart():\n with commit():\n link_and_create_multipart_volumes()\n reindex_pidtype('docid')\n reindex_pidtype('serid')", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def bulkInsert(self, url, values):\n pass", "def create_streams(streams):\n for stream in streams:\n Stream.objects.create(**stream)", "def build_es_bulk(line):\n action = '{\"index\": {\"_index\": \"%s\"}}' % EPMC_METADATA_INDEX\n data = line + '\\n'\n return '\\n'.join([action, data])", "def spawnBulkCreateTasks(data, org_admin):\n data = StringIO.StringIO(data.encode('UTF-8'))\n tasks = csv.DictReader(data, fieldnames=DATA_HEADERS, restval=\"\")\n\n task_list = []\n for task in tasks:\n # pop any extra columns\n task.pop(None,None)\n task_list.append(db.Text(simplejson.dumps(task)))\n\n bulk_data = bulk_create_model.GCIBulkCreateData(\n tasks=task_list, created_by=org_admin, total_tasks=len(task_list))\n bulk_data.put()\n\n task_params = {\n 'bulk_create_key': bulk_data.key()\n }\n\n logging.info('Enqueued bulk_create with: %s' %task_params)\n new_task = taskqueue.Task(params=task_params,\n url=BULK_CREATE_URL)\n # add to the gci queue\n new_task.add(queue_name='gci-update')", "def add_elasticsearch_records(self, data_list):\n actions = [self.create_data_record(data_dict) for data_dict in data_list]\n self.actions_buffer.extend(actions)", "def upload(self, documents: List[Document], vectorise_func) -> None:\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n # Update document class conveniently\n if issubclass(type(documents[0]), ChunkedDocument):\n self._doc_class = ChunkedDocument\n\n for batch in batches:\n vectorise_func(batch, self)\n self.documents += batch", "def upload_multiple():\n gene_list = request.json['gene_list']\n project_id = request.json['project_id']\n all_message = ''\n html = ''\n added_list = []\n button_list = ''\n\n for gene in sorted(gene_list):\n if gene == \"\" or gene in added_list:\n continue\n dct = create_panel_get_tx(gene, project_id)\n if dct[\"message\"] == \"added\":\n added_list.append(gene)\n else:\n all_message += dct[\"message\"]\n try:\n html += dct[\"html\"]\n except KeyError:\n pass\n try:\n button_list += dct[\"button_list\"]\n except KeyError:\n pass\n\n if len(added_list) > 0:\n added_message = render_template(\"added_list.html\", added_list=enumerate(added_list), length=len(added_list))\n all_message += added_message\n\n return jsonify({'message': all_message, 'html': html, 'button_list': button_list})", "def insert_many(self, documents: Iterable[dict]) -> None:\n for i, document in enumerate(documents):\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\n f\"The document at index {i} was not a dictionary. All documents must be dictionaries.\"\n )\n self._dump()", "def upload(self, durations, skill_name, skill_args):\n for dur in durations:\n doc = {\"outcome\": 1, \"error\": \"\", \"name\": skill_name, \"duration\": dur}\n args_dict = dict()\n if skill_args != None:\n for arg in skill_args:\n if len(arg) == 1:\n args_dict[arg[0]] = \".*\"\n else:\n args_dict[arg[0]] = np.random.choice(arg[1:])\n doc[\"args\"] = args_dict\n print(doc)\n if not self.dry_run:\n self.lookup_col.insert_one(doc)", "def create(requests, user=None):\n return rest.post_multi(resource=_resource, entities=requests, user=user)", "def post_samples(self, text, entities, *args):\n params = {'v': WIT_API_VERSION}\n data = []\n if text and entities:\n data.append({'text': text,'entities': entities})\n if len(args)>1:\n x=0\n while x<len(args):\n data.append({'text': args[x],'entities': args[x+1]})\n x+=2\n return req(self.logger, self.access_token, 'POST', '/samples', params,data=json.dumps(data))", "def createParticles(self, type, style, *args):\n if not self.rank:\n logging.info('Creating particles {} with args'.format(type) + (' {}' * len(args)).format(*args))\n\n self.lmp.command('create_atoms {} {}'.format(type, style) + (' {}' * len(args)).format(*args))", "def create(cls, vlist):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n\n products = super(Product, cls).create(vlist)\n IndexBacklog.create_from_records(products)\n return products", "def post(self):\n datastore_hooks.SetPrivilegedRequest()\n urlsafe_keys = self.request.get('keys').split(',')\n keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys]\n results = ndb.get_multi(keys)\n\n tests = []\n entities = []\n\n for e in results:\n if e.key.kind() == 'TestMetadata':\n tests.append(e)\n else:\n entities.append(e)\n\n for t in tests:\n t.UpdateSheriff()\n t.put()\n\n ndb.put_multi(entities)", "def index_bulk_from_files(self, files):\r\n\r\n docs = self._mailextractor.extract_jsons(files) # Generator-Iterable\r\n actions = self.convert_docstrs_to_bulk_actions(docs) # Generator-Iterable\r\n\r\n self._cur_print = 0\r\n actions_for_chunk = self.print_chunk_progress(actions) # Generator-Iterable\r\n (cnt_success, errors_index) = es_helpers.bulk(\r\n self._es, actions_for_chunk, chunk_size=constants.ES_BULK_CHUNK_SIZE)\r\n\r\n cnt_total = self._mailextractor.cnt_total\r\n errors_convert = self._mailextractor.errors_convert\r\n cnt_error = len(errors_convert) + len(errors_index)\r\n return Summary(cnt_total=cnt_total, cnt_success=cnt_success, cnt_error=cnt_error,\r\n errors_convert=errors_convert, errors_index=errors_index)", "def test_bulk_actors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actor)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.actor_url, data)\n assert res.status_code == status.HTTP_201_CREATED, (\n responses.get(res.status_code, res.status_code), res.content)", "def bulk_indice(self, records: List[dict], index_name: str, pipeline: str) -> None:\n events = []\n for record in records:\n event = {\"_id\": self.calc_hash(record), \"_index\": index_name, \"_source\": record}\n if pipeline != \"\":\n event[\"pipeline\"] = pipeline\n events.append(event)\n bulk(self.es, events, raise_on_error=False)", "def Create(self, domainsList) :\n\t\t...", "def create(server,idx):\n \n uaModel.__create_type(server,idx)\n uaModel.__create_objects(server,idx)", "def _create_articles(articles: List[ArticleDto]):\n if articles:\n existing = ArticleRepo.get_existing_objects(\n id_list=[article.id for article in articles]\n )\n articles = [article for article in articles if article.id not in existing]\n ArticleRepo.create_articles(articles=articles)", "def createobj(self, firstname='', middlename='', lasttname='', email='', phno='', gender='', address='',\n peronjson={}, notes='', tenantid=''):\n personobj = {'firstname': firstname, 'middlename': middlename, 'lasttname': lasttname, 'email': email, 'phno':\n phno, 'gender': gender, 'address': address, 'peronjson': peronjson, 'notes': notes, 'tenantid': tenantid}\n self.persontdetailscoll.insert(tenantobj, safe=True)", "def insertMasivo(self, objects):\n retorno = 1\n \n try:\n n = len(objects)\n for i in xrange(n):\n self.session.add(objects[i])\n if i % 100 == 0:\n self.session.flush()\n #for object in objects:\n # self.session.add(object)\n self.session.commit()\n except sqlalchemy.exc.DBAPIError, e:\n if self.session is not None:\n self.session.rollback()\n retorno = -1\n print(\"Error!\", e)\n finally:\n if self._DAOAlchemy__cerrarSesion:\n self.session.close()\n return retorno", "def create_dataset(request):\n body = json.loads(request.body)\n try:\n org = Organization.objects.get(pk=body['organization_id'])\n except Organization.DoesNotExist:\n return {\"status\": 'error',\n 'message': 'organization_id not provided'}\n record = ImportRecord.objects.create(\n name=body['name'],\n app=\"seed\",\n start_time=datetime.datetime.now(),\n created_at=datetime.datetime.now(),\n last_modified_by=request.user,\n super_organization=org,\n owner=request.user,\n )\n\n return {\n 'status': 'success',\n 'id': record.pk,\n 'name': record.name,\n }", "def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []", "def test_bulk(es_bulk, mock_es_client):\n actions = []\n chunk_size = 10\n elasticsearch.bulk(actions=actions, chunk_size=chunk_size)\n\n es_bulk.assert_called_with(\n mock_es_client.return_value,\n actions=actions,\n chunk_size=chunk_size,\n max_chunk_bytes=settings.ES_BULK_MAX_CHUNK_BYTES,\n )", "async def insert_many(self, models: t.Sequence[Model]):\n\n for model in models:\n if not isinstance(model, Model):\n raise ValueError('insert_many method expects Model instances.')\n\n data = []\n\n for model in models:\n model_as_dict = model.as_dict\n\n if not model_as_dict.get('_id'):\n model_as_dict.pop('_id')\n\n data.append(model_as_dict)\n\n return await self.collection.insert_many(data)", "def BatchCreate(self, request, global_params=None):\n config = self.GetMethodConfig('BatchCreate')\n return self._RunMethod(\n config, request, global_params=global_params)" ]
[ "0.60707426", "0.6044942", "0.6015414", "0.5868079", "0.58612776", "0.58422595", "0.57517433", "0.5748778", "0.5713514", "0.56829447", "0.56696784", "0.5647391", "0.5640021", "0.56258446", "0.5564406", "0.5555156", "0.5547321", "0.5522877", "0.5503289", "0.5487169", "0.5446748", "0.5438601", "0.54330784", "0.54143333", "0.5403617", "0.53948283", "0.5386578", "0.53577405", "0.53415215", "0.53323394", "0.53317887", "0.53307897", "0.53077257", "0.53074455", "0.5292724", "0.5281651", "0.52430445", "0.52377725", "0.5234141", "0.5226357", "0.522452", "0.52168167", "0.51752573", "0.5157943", "0.51574814", "0.51542753", "0.5106583", "0.50939727", "0.5087876", "0.50590205", "0.50474036", "0.5044962", "0.5029526", "0.50212836", "0.5020642", "0.5017058", "0.50074375", "0.5002954", "0.4995208", "0.4988046", "0.4986841", "0.496571", "0.49629536", "0.49546656", "0.49480656", "0.49377048", "0.49300596", "0.49238423", "0.49135062", "0.49078763", "0.49050105", "0.48982006", "0.4891108", "0.48909172", "0.48791447", "0.4875368", "0.48713124", "0.48555833", "0.48414445", "0.48372746", "0.48363447", "0.48363215", "0.48342556", "0.4830828", "0.48286915", "0.48200876", "0.48190466", "0.48179471", "0.48171675", "0.481597", "0.4806129", "0.48052022", "0.4803582", "0.47973174", "0.47851214", "0.47840393", "0.47826895", "0.47820047", "0.47805396", "0.4772079" ]
0.79547685
0
This will update the persuasion and will return it API /persuasion/refresh?push_to_es=true&push_to_inflow=false {
def refresh_persuasion(): try: request_data = json.loads(request.data) meta = request.args persuasion = PersuasionServices.refresh(request_data, meta) except Exception as e: error_msg = "Getting persuasion details - " + repr(e) logger.error(error_msg) return jsonify(dict(status="failure", error=error_msg)) return jsonify(dict(status="success", data=persuasion if persuasion else dict()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schedule_refresh():\n logger.info(\"Refreshing/Updating persuasions in bulk\")\n try:\n args = request.args\n if not args.get(\"type\") or not args.get(\"sub_type\"):\n return jsonify(\n dict(status=\"failure\", message=\"Invalid type/sub_type\"))\n\n persuasions_request = PersuasionServices.get_persuasions_request_from_es(args)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.refresh, data, args): data for data in persuasions_request}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will be updated shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def bulk_refresh():\n logger.info(\"Refreshing/Updating persuasions in bulk\")\n try:\n request_data = json.loads(request.data)\n args = request.args\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.refresh, data, args): data for data in request_data}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will be updated shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def fullRefresh(self, p):\n p = self.generalisedRefresh(p)\n return p", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def refresh_details(self) -> None:\n data = request(\n 'get',\n f'/api/v0/projects/{self.id}/',\n ).json()\n self.data.update(data)", "def refresh_details(self) -> 'outputs.RefreshDetailsResponse':\n return pulumi.get(self, \"refresh_details\")", "def refresh(self):\n\t\tif self.id is None:\n\t\t\tprint(\"({cls}): self.id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id and self.project_id is None:\n\t\t\tprint(\"({cls}): self.project_id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id:\n\t\t\targs = [self.project_id, self.id]\n\t\telse:\n\t\t\targs = [self.id]\n\n\t\tres = getattr(self._client, \"get_\" + self.method)(*args, raw=True)\n\t\tself._create_fields(res)", "def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())", "def refresh(request):\n \n t, created = LastUpdate.objects.get_or_create(id=1)\n if created:\n t.save()\n t = t.unix_time()\n else:\n t = t.unix_time()\n latest = int(t) - 2\n return JsonResponse({'latest':int(latest)})", "def refresh(self):\n return self._refresh", "def refresh(self): \n return self._config.refreshObj(self)", "def refresh(self):\n pass", "def refresh(self):\n pass", "def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def updates(self, request, version):\n task = EtlTask.objects.get_for_model(self.queryset.model)\n if task.last_changes:\n offset = task.last_changes.strftime(\"%Y-%m-%d %H:%M\")\n queryset = self.queryset.filter(last_modify_date__gte=offset)\n else:\n offset = \"none\"\n queryset = self.queryset.all()\n queryset = self.paginate_queryset(queryset)\n serializer = self.get_serializer(queryset, many=True)\n return self.get_paginated_response(serializer.data)\n # return Response(serializer.data,\n # headers={'update-date': offset})", "def refresh_parcel_repos(self):\n if self.api_client.api_version < 'v16':\n logger.warning('Detected API version without support '\n 'for refreshParcelRepos (%s). Sleeping instead ...',\n self.api_client.api_version)\n sleep(30)\n else:\n return self.api_client.refresh_parcel_repos()", "def refresh_counterfactual_json(request):\t\n\n\n \"\"\"\n the following json are for compound exposures\n \"\"\"\n #json_file = '/home/zhou/Downloads/jsons/compound/json_9.json'\n\tjson_file = '/Users/jiaozi/Downloads/jsons/compound/json_9.json'\n\n exposure_sequence = read_json(json_file)#list of exposures{mean,sd,non_rate}\n\n\tprimeCoordinator = PrimeCoordinator()\n\tprimeCoordinator.get_counterfactual_compound_exposures(exposure_sequence)\n\t\n\t# get the data in the interface\n\tb_output_mortality \t= primeCoordinator.output_baseline_mortality # baseline mortality list for all outcomes\n\tb_output_mortality_num \t= primeCoordinator.output_baseline_mortality_num # baseline mortality sum up for each outcome\n\tb_total_mortality \t= primeCoordinator.output_baseline_mortality_total# baseline mortality sum up for all outcomes\n\tc_output_mortality \t= primeCoordinator.output_counterfactual_mortality# counterfactual mortality for all outcomes\n\tc_output_mortality_num \t= primeCoordinator.output_counterfactual_mortality_num# counterfactual mortality for each outcome\n\tc_total_mortality \t= primeCoordinator.output_counterfactual_mortality_total# counterfactual mortality sum up for all outcomes\n\ttotal_population\t= primeCoordinator.output_total_population\n\tall_mortality_exposure\t= primeCoordinator.output_all_mortality_exposure\n\ttotal_death_averted\t= str(round(primeCoordinator.output_total_death_averted,0))\n\ttotal_death_baseline\t= str(primeCoordinator.output_total_death_baseline)\n\n\t#transmit the parameters\n\ttemplate = loader.get_template('primemodel/index.html')\n\tpara_view = {\n\t\t\t'b_output_mortality_num':\tb_output_mortality_num,\n\t\t\t'b_total_mortality':\t\tb_total_mortality,\n\t\t\t'c_output_mortality_num':\tc_output_mortality_num,\n\t\t\t'c_total_mortality':\t\tc_total_mortality,\n\t\t\t'total_population':\t\ttotal_population,\n\t\t\t'total_death_averted':\t\ttotal_death_averted,\n\t\t\t'total_death_baseline':\t\ttotal_death_baseline,\n\t\t\t'all_mortality_exposure':\tall_mortality_exposure\n\t\t\t}\n\n\t#context to transmit the parameters to show\n\tcontext = Context(para_view)\n\tresponse = template.render(context)\n\treturn HttpResponse(response)", "def refresh(self) -> object:\n requestor = Requestor(local_api_key=self._api_key)\n url = self.instance_url()\n response, api_key = requestor.request(method=RequestMethod.GET, url=url, params=self._retrieve_params)\n self.refresh_from(values=response, api_key=api_key)\n return self", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def _refresh(self):\n self._value = self._service.get_value(self._entry_point, self._path)", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def _refresh(self):\n url = self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)\n resp = self._cb.get_object(url)\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def _update_from_rest_data(self) -> None:", "def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def _Refresh(self):\n raise NotImplementedError", "def refresh(self, new_content):\n pass", "def Refresh(self):\n pass", "def refresh(self, id):\n exports.execute_export.delay(id)\n return render({\"id\": id})", "def refresh(modeladmin, request, queryset):\n if not queryset.exists():\n return # 404\n opts = queryset[0].content_object._meta\n module = '%s.%s' % (opts.app_label, opts.object_name)\n ids = queryset.values_list('object_id', flat=True)\n # Execute get_state isolated on a process to avoid gevent polluting the stack\n # and preventing this silly complain \"gevent is only usable from a single thread\"\n # ids listqueryset is converted in a list in order to be properly serialized\n result = get_state.delay(module, ids=list(ids), lock=False)\n try:\n # Block until finish\n result.get()\n except OperationLocked:\n msg = 'This operation is currently being executed by another process.'\n messages.error(request, msg)\n else:\n msg = 'The state of %d %ss has been updated.' % (queryset.count(), opts.object_name)\n modeladmin.message_user(request, msg)", "def refresh():\n DB.drop_all()\n DB.create_all()\n df_meas = open_api.measurements(city='Los Angeles', parameter='pm25', df=True)\n df_meas['date.utc'] = df_meas['date.utc'].astype(str)\n create_DB_records(df_meas)\n DB.session.commit()\n message = 'Data refreshed on: ' + str(datetime.datetime.now())\n over9s = Record.query.filter(Record.value > 9)\n recs = Record.query.filter(Record.id < 20)\n over5s = Record.query.filter(Record.value > 5)\n return render_template('base.html', message=message, over9s=over9s, over5s=over5s, recs=recs)", "def refresh(self):\n self._refresh_method()", "def refresh():\n\tsocketio.emit('refresh')\n\treturn status()", "async def poll_refresh(self) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.poll_refresh))", "def refresh():\n current_user = get_jwt_identity()\n\n user = get_user_by_username(current_user)\n\n if not user:\n return make_response(CONST_LOGIN_MSG, 401, {\n 'WWW-Authenticate': f'Basic realm=\"{CONST_REALM_MSG}\"'})\n\n if user.is_admin:\n claims = {'is_admin': True}\n else:\n claims = {'is_admin': False}\n\n now = datetime.datetime.now(datetime.timezone.utc)\n access_expires = (now + jwt_config.access_expires).timestamp()\n refresh_expires = (now + jwt_config.refresh_expires).timestamp()\n\n response = {\n 'access_token': create_access_token(identity=current_user,\n user_claims=claims),\n 'access_expires': access_expires,\n 'refresh_expires': refresh_expires,\n 'refresh_token': create_refresh_token(identity=current_user),\n 'user': get_user_details(user)\n\n }\n return jsonify(response), 200", "async def async_update(self):\n await self._coordinator.async_request_refresh()", "async def async_update(self):\n await self._coordinator.async_request_refresh()", "async def async_update(self):\n await self._coordinator.async_request_refresh()", "def _refresh_query(session, query_id):\n resp = session.post('{}/api/queries/{}/refresh'.format(REDASH_HOST, query_id))\n return resp", "def update(self):\n return self._process('update')", "def refreshPage(self):\n cmdId = self.executeCommand(Command.REFRESH)\n return cmdId", "def refresh_view():\n pass", "def refresh(self):\n\n metadata = project_scrape(self.url)\n if metadata:\n if not self.done:\n self.progress.set_fraction(metadata['percent_raised'])\n self.progress.set_text(metadata['pretty_percent'])\n self.progress.set_show_text(True)\n self.pledged.set_text(metadata['pledged'])\n self.backers.set_text(metadata['backers'])\n self.updates.set_label(metadata['updates'])\n\n return True", "def refresh(self):\n self.__refresh()", "async def async_update(self):\n await self.coordinator.async_request_refresh()", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "def refresh(self):\n yield from self._db._update_series(self.provider, self._dbattr(self.provider.IDATTR), dirty=self.providers)", "def refresh(self):\n resp = self._imgur._send_request(self._INFO_URL)\n self._populate(resp)\n self._has_fetched = True\n # NOTE: What if the object has been deleted in the meantime? That might\n # give a pretty cryptic error.", "def update():", "def update():", "def refresh():\n current_user = get_jwt_identity()\n ret = {\n 'access_token': create_access_token(identity=current_user)\n }\n return jsonify(ret), 200", "def refresh():\n return __apf_cmd(\"-e\")", "def next_update(self):\r\n request = http.Request('GET', '/metadata/next_update.json')\r\n return request, parsers.parse_json", "def refresh_list(self):\n if self._dominfo_lock.acquire(False):\n try:\n return self._refresh_list()\n finally:\n self._dominfo_lock.release()\n else:\n # wait until the refresh done by the other party is complete\n with self._dominfo_lock:\n pass", "async def update(self):\n\t\tstate = await self._client.get_state('command.cgi?cmd=getObject&oid={0}&ot={1}'.format(self._oid,self._ot))\n\t\tif state is not None:\n\t\t\tself._raw_result = state", "def refresh(self):\n if self.refresh_token is None:\n raise InvalidInvocation('refresh token not provided')\n self._request_token(grant_type='refresh_token',\n refresh_token=self.refresh_token)", "def update(self):\n # TO DO for updating urls if changed\n pass", "def refresh():\n print(\"refresh request\")\n old_token = request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200", "async def async_update(self):\n await self.wrapper.async_request_refresh()", "async def refresh(ctx):\n await update_tournament_list()\n res = await refresh_algorithm()\n if res == True:\n await ctx.send(\"Successfully refreshed data from sheet.\")\n else:\n await ctx.send(\":warning: Unsuccessfully refreshed data from sheet.\")", "def update( ):\r\n pass", "def refresh(self):\n self.details = self.workspace.get_job(self.id).details", "def refresh(self):\n self.lease = self.blazar.lease.get(self.id)", "def _refresh(self):\n self._value = (self._service.get_value(self._entry_point, self._path)=='true')", "def refresh(self):\n raise UncodedError", "def _refresh(self):\n self._value = int(self._service.get_value(self._entry_point, self._path))", "async def refresh(self):\n while True:\n await asyncio.sleep(5/6 * self.lifetime)\n\n request = stun.Message(message_method=stun.Method.REFRESH,\n message_class=stun.Class.REQUEST)\n request.attributes['LIFETIME'] = self.lifetime\n self.__add_authentication(request)\n await self.request(request, self.server)", "def refresh(self) -> None:\n self._itempage.get()", "def update(self):\n self.arest.update()", "def update(self):\n if self._refreshed_at is None or (\n self._refreshed_at + self._refresh_rate <= datetime.datetime.now()):\n\n self.run()", "def post(self, request):\n refreshed_data = refresh_token(request, request.DATA['token'])\n if refreshed_data:\n data = {}\n auth_data = string_to_dict(refreshed_data.replace(\"\\\"\", \"\"))\n data['access_token'] = auth_data['access_token']\n data['refresh_token'] = auth_data[' refresh_token']\n response_data = get_response_json(\n uri=request._request.path,\n lang=request.DATA['lang'],\n region=request.DATA['region'],\n created=False,\n responsecode=200,\n start=0,\n count=0,\n total=1,\n data=data)\n return Response(data=response_data, status=status.HTTP_200_OK)\n response_data = get_error_json(\n uri=request._request.path,\n lang=request.query_params.get(\"lang\"),\n region=request.query_params.get(\"region\"),\n description='Bad request',\n detail=\"Bad request\",\n responsecode=400,\n )\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)", "def update_positions(request):\n raw_data = request.raw_post_data\n logging.info(\"isrproxy update_positions: %s\" % raw_data)\n update_positions_data = simplejson.loads(raw_data)\n\n # redirect update to dev server in production environment\n if not settings.DEV:\n dev_positions = filter(lambda p: p.get(\"Operator_ID\") == WB_DEV_OPERATOR_ID, update_positions_data)\n if dev_positions:\n deferred.defer(safe_fetch, url=\"http://dev.latest.waybetter-app.appspot.com/fleet/isrproxy/update/positions/\", payload=simplejson.dumps(dev_positions), method=POST, notify=False)\n update_positions_data = filter(lambda p: p.get(\"Operator_ID\") != WB_DEV_OPERATOR_ID, update_positions_data)\n\n\n ride_positions = []\n for rp_data in update_positions_data:\n station_id = rp_data.get(\"Operator_ID\")\n taxi_id = rp_data.get(\"Vehicle_ID\")\n lat = rp_data.get(\"Lat\")\n lon = rp_data.get(\"Lon\")\n timestamp = rp_data.get(\"Timestamp\")\n\n ride_uuid = ISRProxy.get_taxi_assignment(taxi_id, station_id)\n\n if all([station_id, ride_uuid, taxi_id, lat, lon, timestamp]):\n timestamp = dateutil_parser.parse(timestamp)\n timestamp = normalize_naive_israel_dt(timestamp)\n ride_positions.append(TaxiRidePosition(station_id, taxi_id, ride_uuid, lat, lon, timestamp))\n\n fleet_manager.update_positions(ride_positions)\n return HttpResponse(\"OK\")", "def editPublication():\n # Get _id and path from url\n id = request.args.get(\"_id\")\n path = request.args.get(\"path\")\n\n # Execute database query\n active = True\n result = \"\"\n if active == True:\n client = MongoClient(db_config)\n db = client['Publications']\n publications = db['Publications']\n result = publications.find_one({\"_id\": id, \"path\": path})\n\n # Pre-processing\n del result[\"htmlResult\"] # Remove HTML output\n result[\"path\"].replace(\",\", \"/\") # Place slashes in path\n\n # Handle dates\n if len(result[\"date\"]) == 10:\n date = result[\"date\"]\n result[\"date\"] = {\"toggledaterange\": False, \"sdate\": date, \"start\": date, \"end\": date}\n else:\n # Format: {'start': '2014-01-01', 'end': '2015-10-01'}\n print(\"Result Date\")\n date = str(result[\"date\"][0]).replace(\"'\", \"\\\"\")\n dates = json.loads(date)\n start = dates[\"start\"]\n end = dates[\"end\"]\n result[\"date\"] = {\"toggledaterange\": True, \"sdate\": start, \"start\": start, \"end\": end}\n\n # Convert to json stringØ\n jsonStr = json.dumps(result)\n\n return render_template(\"editPublication.html\", results=jsonStr)", "def updates(self):\n return self._get_page('updates').json()", "def refresh_rate_quote(mortgage_profile):\n api = MortechApi(mortgage_profile)\n api.get_response()", "def __refresh(self):\n\n\t\tself.status_code = self.__response.status_code\n\n\t\t# Handle errors. Sometimes there may not be a response body (which is\n\t\t# why ValueError) must be caught.\n\t\ttry:\n\t\t\tif (self.__response.status_code not in [200,204]) or \"error\" in self.__response.json():\n\t\t\t\terror = self.__response.json()\n\t\t\t\traise ResponseError(error[\"error\"][\"message\"], error[\"error\"][\"code\"])\n\t\texcept ValueError:\n\t\t\t# Sometimes DELETE returns nothing. When this is the case, it will\n\t\t\t# have a status code 204\n\t\t\tif self.__response.request.method is not \"DELETE\" and self.__response.status_code is not 204:\n\t\t\t\traise ResponseError(\"Unknown error occured.\", self.__response.status_code)\n\n\t\t# Get information on paging if response is paginated\n\t\tif \"X-Resultset-Total\" in self.__response.headers and self.__response.headers[\"X-Resultset-Total\"] > self.__response.headers[\"X-Resultset-Limit\"]:\n\t\t\tself.is_paginated = True\n\t\t\tself.records = int(self.__response.headers.get(\"X-Resultset-Total\"))\n\t\t\tself.page_size = int(self.__response.headers[\"X-Resultset-Limit\"])\n\t\t\tself.current_page = int(self.__response.headers[\"X-Resultset-Page\"])\n\t\t\tself.max_page = int(math.ceil(float(self.records)/int(self.page_size)))\n\n\t\t# Save the content of the request\n\t\ttry:\n\t\t\tself.json = self.__response.json()\n\t\texcept ValueError:\n\t\t\t# Sometimes DELETE returns nothing. When this is the case, it will have a status code 204\n\t\t\tif self.__response.request.method == \"DELETE\" and self.__response.status_code is 204:\n\t\t\t\tself.json = {\"success\":True}\n\t\t\telse:\n\t\t\t\traise ValueError(\"No JSON object could be decoded\" + self.__response.request.method)", "def refresh(self, check_mode=True):\n query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name\n return self.__exec_sql(query, check_mode=check_mode)", "def refresh():\n DB.drop_all()\n DB.create_all()\n results = get_results()\n for i, result in enumerate(results):\n record = Record(id=i, datetime=result[0], value=result[1])\n DB.session.add(record)\n DB.session.commit()\n return redirect(\"/\")", "def repo_refresh_for_unfinished():\n project_list = Project.objects()\n crawl_list = []\n for repo in project_list:\n if repo.analyser_progress != \"100%\":\n crawl_list.append(repo.project_name)\n analyser.add_repos(current_user.username, crawl_list)\n flash('Refresh for unfinished successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def executeForRefresh(self):\n value = random.randint(1, 10000000)\n calcEngine = CalcEngine.factory(self.client_session)\n calcEngine.setNodeProperties(\n \"pyplan_refresh\", [{\"name\": \"definition\", \"value\": \"result = \" + str(value)}])\n return calcEngine.setNodeProperties(\"cub_refresh\", [{\"name\": \"definition\", \"value\": \"result = \" + str(value)}])", "def refresh():\n print(\"refresh request\")\n old_token = flask.request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200", "def Refresh(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"refresh\", payload=payload, response_object=None)", "def update(self):\n\n pass", "def update(self, params):", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass" ]
[ "0.68004", "0.65762883", "0.6169109", "0.6017237", "0.58676904", "0.58676904", "0.5842514", "0.58078766", "0.571597", "0.5683472", "0.5669192", "0.56609315", "0.56607497", "0.56477195", "0.56477195", "0.5646708", "0.5581315", "0.5581315", "0.55265003", "0.5518151", "0.55140364", "0.5512183", "0.5511529", "0.5511529", "0.5511529", "0.5502183", "0.5484659", "0.5458461", "0.5452334", "0.5433787", "0.540576", "0.5401757", "0.53775686", "0.53750736", "0.5372829", "0.53406304", "0.5339116", "0.533241", "0.5325804", "0.5324081", "0.531488", "0.5304105", "0.5304105", "0.5304105", "0.52984613", "0.52977306", "0.5275688", "0.5269246", "0.52646273", "0.5250659", "0.5245556", "0.5244829", "0.5219243", "0.521019", "0.5200389", "0.5200389", "0.5197647", "0.51947063", "0.5192644", "0.51918447", "0.5171495", "0.51704365", "0.51685524", "0.516745", "0.51617914", "0.51509035", "0.51432097", "0.51386577", "0.51327163", "0.5130856", "0.5124792", "0.51208574", "0.5120799", "0.51051015", "0.5100365", "0.50916725", "0.508727", "0.5079207", "0.50754106", "0.5060492", "0.5059199", "0.50579566", "0.50553346", "0.5051255", "0.5048644", "0.50448", "0.5035971", "0.50201863", "0.5012335", "0.50107104", "0.50056523", "0.50056523", "0.50056523", "0.50056523", "0.50056523", "0.50056523", "0.50056523", "0.50056523", "0.50056523", "0.50056523" ]
0.8352617
0
This will update persuasions in bulk and will push it to ES API /persuasion/bulk/refresh?push_to_es=true&push_to_inflow=false [ {
def bulk_refresh(): logger.info("Refreshing/Updating persuasions in bulk") try: request_data = json.loads(request.data) args = request.args with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor: {executor.submit(PersuasionServices.refresh, data, args): data for data in request_data} return jsonify( dict(status="success", message="Your request is in the queue, persuasion will be updated shortly")) except Exception as e: logger.error("Exception while creating persuasions in bulk - " + repr(e)) return jsonify(dict(status="failure", error=repr(e)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schedule_refresh():\n logger.info(\"Refreshing/Updating persuasions in bulk\")\n try:\n args = request.args\n if not args.get(\"type\") or not args.get(\"sub_type\"):\n return jsonify(\n dict(status=\"failure\", message=\"Invalid type/sub_type\"))\n\n persuasions_request = PersuasionServices.get_persuasions_request_from_es(args)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.refresh, data, args): data for data in persuasions_request}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will be updated shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def refresh_persuasion():\n try:\n request_data = json.loads(request.data)\n meta = request.args\n persuasion = PersuasionServices.refresh(request_data, meta)\n except Exception as e:\n error_msg = \"Getting persuasion details - \" + repr(e)\n logger.error(error_msg)\n return jsonify(dict(status=\"failure\", error=error_msg))\n return jsonify(dict(status=\"success\", data=persuasion if persuasion else dict()))", "def bulk_update(self, request):\n serializer = MasterySerializer(\n data=request.data,\n many=True,\n )\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def update_embeddings(self, retriever):\n\n docs = self.get_all_documents()\n passages = [d.text for d in docs]\n logger.info(f\"Updating embeddings for {len(passages)} docs ...\")\n embeddings = retriever.embed_passages(passages)\n\n assert len(docs) == len(embeddings)\n\n doc_updates = []\n for doc, emb in zip(docs, embeddings):\n update = {\"_op_type\": \"update\",\n \"_index\": self.index,\n \"_id\": doc.id,\n \"doc\": {self.embedding_field: emb.tolist()},\n }\n doc_updates.append(update)\n\n bulk(self.client, doc_updates, request_timeout=300)", "def _batch_update(self, query, mutation):\n logger.info(\"Performing batch update on %s. Mutation: %s\", query, mutation)\n modified = 0\n for doc in self.instances.find(query):\n with lock_instance(doc['_id']):\n pre_update_doc = self.instances.find_one({'_id' : doc['_id']})\n result = self.instances.update_one({'_id': doc['_id']}, mutation)\n assert result.modified_count == 1\n modified += 1\n updated_doc = self.instances.find_one({'_id': doc['_id']})\n instance = FixtureInstance.deserialize_mongodoc(updated_doc)\n try:\n self.axdb_client.update_fixture_instance(instance.axdbdoc())\n except Exception:\n logger.exception(\"Failed to persist updates for %s. Undoing cache update\", instance)\n self.instances.replace_one({'_id' : instance.id}, pre_update_doc)\n raise\n logger.info(\"%s fixture instances modified\", modified)", "def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def update_batch(self, *args, **kwargs):\n pass", "def update_entities(self):\n raise NotImplementedError()", "def set_many(self, update_dict):\n for key, value in update_dict.items():\n # We just call `set` directly here, because this is an in-memory representation\n # thus we don't concern ourselves with bulk writes.\n self.set(key, value)", "def update_all():\n req_data = request.get_json()\n jobs = JobModel.get_one_job(job_id)\n if not jobs:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n data, error = job_schema.load(req_data, partial=True)\n if error:\n return custom_response(error, 400)\n\n for job in jobs:\n job.update(data)\n job_message = job_schema.dump(job)\n\n return custom_response(job_message, 200)", "def updateall(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.updateall, params)", "def _update_bulk(self, iterable):\n self.cursor.executemany(self.UPDATE, iterable)", "def bulk_create():\n logger.info(\"Creating persuasions in bulk\")\n try:\n request_data = json.loads(request.data)\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.create, data): data for data in request_data}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will create shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def update_targets(self, items):\n items = list(filter(None, chain(*items)))\n items = list(filter(None, items))\n\n if len(items) > 0:\n self.logger.info(\"Updating {} thermo documents\".format(len(items)))\n bulk = self.thermo().initialize_ordered_bulk_op()\n\n for m in items:\n m[self.thermo.lu_field] = datetime.utcnow()\n bulk.find({\"material_id\": m[\"material_id\"]}).upsert().replace_one(m)\n bulk.execute()\n else:\n self.logger.info(\"No items to update\")", "def update(self, es, **kwargs):\n pass", "def commit(self):\n with self.lock:\n try:\n action_buffer = self.BulkBuffer.get_buffer()\n if action_buffer:\n successes, errors = bulk(self.elastic, action_buffer)\n except Exception as e:\n # Handle errors from bulk indexing request\n raise\n \n retry_until_ok(self.elastic.indices.refresh, index=\"\")", "def push_bulk(self, obj_list, doc_type=None, refresh=True):\n assert isinstance(obj_list, collections.Sequence)\n assert len(obj_list) > 0\n\n es_obj_list = []\n for obj in obj_list:\n if obj is None:\n logger.warning(\"None object in input list\")\n continue\n\n doc_type, es_repr = self._validate_doc_and_get_type_and_repr(obj, doc_type)\n metadata = {\n '_op_type': 'index',\n \"_index\": self.index_name,\n \"_type\": doc_type,\n }\n es_repr.update(**metadata)\n\n es_obj_list.append(es_repr)\n\n helpers.bulk(client=self.conn.elastic_search_client, actions=es_obj_list,\n stats_only=True, refresh=u'true' if refresh else u'false')", "def refresh(self):\n yield from self._db._update_series(self.provider, self._dbattr(self.provider.IDATTR), dirty=self.providers)", "def set_many(self, update_dict):\n for key, value in update_dict.items():\n self.set(key, value)", "def updateAll(data):\n if (data.updatePositions):\n data.groups.player.update(data)\n data.groups.projectiles.update(data)\n data.groups.monsters.update(data)\n data.groups.spawners.update(data)", "def update_many(\n cls,\n *,\n pks: List[Union[str, int]],\n update: Dict[str, Any],\n sychronize_session: bool = False,\n ) -> None:\n if pks:\n db.session.query(cls).filter(\n getattr(cls, cls.get_primary_key()).in_(pks)\n ).update(update, synchronize_session=sychronize_session)\n db.session.commit()\n cache.delete_many(*(cls.create_cache_key(pk) for pk in pks))", "def update_all(self, request):\n\n schema = self.session.info['schema']\n\n for item in self.query().filter_by(schema=schema):\n self.session.delete(item)\n\n for item in ElectionCollection(self.session).query():\n self.update(item, request)\n\n for item in ElectionCompoundCollection(self.session).query():\n self.update(item, request)\n\n for item in VoteCollection(self.session).query():\n self.update(item, request)", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def entityUpdates(self, *args):\n\t\tfor entity in self.members.values():\n\t\t\tentity.update(*args)", "def process_es_bulk(pub_list, es):\n bulk_response = es.bulk(\n body=''.join(pub_list),\n refresh='wait_for',\n request_timeout=3600,\n )\n if bulk_response.get('errors'):\n logger.error('failed on bulk indexing:\\n%s',\n bulk_response)\n raise IndexingErrorException()\n return len(pub_list)", "def gbf_pub_update():\r\n LOG.info(\"Start: Update datasets in RLIDGeo warehouse.\")\r\n month_stamps = [\r\n datetime.date.today().strftime(\"%Y_%m\"),\r\n (\r\n datetime.date.today().replace(day=1)\r\n - datetime.timedelta(days=1)\r\n ).strftime(\"%Y_%m\"),\r\n ]\r\n for month_stamp in month_stamps:\r\n snapshot_db_path = SNAPSHOT_DB_PATH.format(month_stamp)\r\n if not os.path.exists(snapshot_db_path):\r\n LOG.warning(\"Snapshot database %s does not exist.\", snapshot_db_path)\r\n continue\r\n\r\n for _dataset in DATASETS:\r\n arcetl.features.update_from_dicts(\r\n dataset_path=_dataset.path(\"pub\"),\r\n update_features=source_rows(snapshot_db_path, _dataset.path(\"source\")),\r\n id_field_names=_dataset.id_field_names,\r\n field_names=_dataset.field_names,\r\n delete_missing_features=False,\r\n use_edit_session=False,\r\n )\r\n LOG.info(\"End: Update.\")", "def updateList(self):\n self._recreateJobs()", "def update_DB(self, iterable, entry_columns, update):\n conn = self.conn\n bulk = []\n old_bulk = []\n list_of_id_db = list()\n list_of_id_atuais = self.lista_atual()\n list_of_id_afastados = self.lista_afastados()\n\n if update:\n list_of_id_db = conn.execute('SELECT id_parlamentar FROM {}.{}'.format(self.schema, self.table))\n list_of_id_db = [tup[0] for tup in list_of_id_db]\n id_row_historic = list(conn.execute('SELECT MAX(id) FROM {}.{}_historic'.format(self.schema, self.table)))[0][0]\n if not id_row_historic:\n id_row_historic = 0\n\n for senador in tqdm(iterable):\n entry = self.fill_entry_senador(senador,entry_columns)\n id_parlamentar = entry['id_parlamentar']\n\n if id_parlamentar in list_of_id_atuais:\n entry['situacao_parlamentar'] = 'atual'\n elif id_parlamentar in list_of_id_afastados:\n entry['situacao_parlamentar'] = 'afastado'\n\n if id_parlamentar in list_of_id_db:\n compare_columns = 'id_parlamentar, nome_completo, nome_parlamentar_atual, forma_tratamento, sexo_parlamentar, data_nascimento, data_falecimento, sigla_uf_origem, endereco_origem, nome_cidade_origem, codigo_estado_civil, endereco_congresso, fone, fax, website, email, profissao, id_camara, id_senado, cpf, titulo_de_eleitor, descricao_participacao'\n\n old_row = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(compare_columns,self.schema, self.table,id_parlamentar))\n old_row = list(old_row)[0]\n new_row = tuple([entry[column] for column in compare_columns.split(', ')])\n\n if old_row != new_row:\n old_entry = copy.deepcopy(entry_columns)\n\n for key in old_entry.keys():\n old_date = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(key,self.schema, self.table,id_parlamentar))\n old_entry[key] = list(old_date)[0][0]\n old_entry['change_date'] = datetime.datetime.today() #capture of change date\n id_row_historic += 1\n old_entry['id'] = id_row_historic\n\n old_bulk.append(old_entry)\n conn.execute(\"DELETE FROM {}.{} WHERE id_parlamentar='{}'\".format(self.schema, self.table,id_parlamentar))\n\n bulk.append(entry)\n else:\n bulk.append(entry)\n\n if len(bulk) > 0:\n df = pd.DataFrame(bulk)\n df.set_index('id_parlamentar', inplace=True)\n print('Adding {} entries to SQL table {}.{}.'.format(len(df),self.schema, self.table))\n df.to_sql(self.table, con=self.conn, schema=self.schema, if_exists='append')\n\n if len(old_bulk) > 0:\n df2 = pd.DataFrame(old_bulk)\n df2.set_index('id_parlamentar', inplace=True)\n historic_table_name = self.table + '_historic'\n print('Adding {} entries to SQL table {}.{}.'.format(len(df2),self.schema, historic_table_name))\n df2.to_sql(historic_table_name, con=self.conn, schema=self.schema, if_exists='append')", "def update_metadata(api_token, base_url, records, updates):\n\n update_url = f\"{base_url}data_files/api_update?auth_token={api_token}\"\n\n counter = 0\n for record in tqdm.tqdm(records):\n # copy in the original ID of the search record into the file_id field of the updates\n updates['file_id'] = record['file_id']\n\n response = requests.post(update_url, data=updates)\n\n # Tally the number of successful updates\n if response.status_code == 200:\n counter += 1\n\n print(f\"{counter} records of {len(records)} successfully updated\")", "def update_mds(self, mds):\n submitted,failed = [],[]\n guids = list(mds)\n total = len(guids)\n count = 0\n for guid in guids:\n count+=1\n print(\"\\n\\tPosting '{}' to metadata service\".format(guid))\n mds_api = \"{}/mds/metadata/{}\".format(self._endpoint, guid)\n res = requests.put(mds_api, json=mds[guid], auth=self._auth_provider)\n\n if res.status_code > 199 and res.status_code < 300:\n submitted.append(guid)\n print(\"({}/{}) Submitted '{}' to MDS.\".format(count, total, guid))\n else:\n failed.append(guid)\n print(\"({}/{}) FAILED to submit '{}' to MDS.\".format(count, total, guid))\n print(\"\\n\\t\\t{}\".format(res.text))\n\n return {\"submitted\":submitted, \"failed\":failed}", "def update_all_metadata(self, metadata):\n return self.manager.update_all_metadata(self, metadata)", "def insert_to_elastic(elastic, paper_authors, papers, authors, index_name):\n helpers.bulk(elastic, merge_to_elastic(paper_authors, papers, authors, index_name))", "def update_particles(self):\n for particle in self.particles:\n particle.update_coordinates(self.bounds)", "def refresh():\n DB.drop_all()\n DB.create_all()\n samples = pull_pm('Los Angeles', 'pm25')\n for sample in samples:\n measure = Record(datetime = str(sample[0]), value = sample[1])\n DB.session.add(measure)\n DB.session.commit()\n return 'Data refreshed!'", "def update_sources(self):\n documents = self.get_docs_sources()\n \n for index,each_doc in enumerate(documents['docs']):\n if each_doc['found']:\n _,update_spec,action_buffer_index = self.doc_to_get[index]\n \n # maybe source already has been taken from elasticsearch\n # and updated. In that case get source from sources\n source = self.get_from_sources(each_doc['_index'], each_doc['_type'], each_doc['_id'])\n if not source:\n source = each_doc['_source']\n updated = self.docman.apply_update(source, update_spec)\n \n #Remove _id field from source\n if '_id' in updated: del updated['_id']\n # Everytime update source to keep it up-to-date\n self.add_to_sources(each_doc,updated)\n \n self.action_buffer[action_buffer_index]['_source'] = self.docman._formatter.format_document(updated)\n else:\n # Document not found in elasticsearch,\n # Seems like something went wrong during replication\n # or you tried to update document which while was inserting\n # didn't contain any field mapped in mongo-connector configuration\n self.doc_to_get = []\n raise errors.OperationFailed(\n \"mGET: Document id: {} has not been found\".format(each_doc['_id']))\n self.doc_to_get = []", "async def update_series(self):\n log.info('Updating all series')\n await self.updater.update_series()\n log.info('Done updating all series')", "def updateAllRealtime():\n for stockName in db.STOCK_MAP.keys():\n getStock(stockName, \"all\")\n\n db.UPDATING_REALTIME = False", "def update_many(\n self,\n *args: Union[dict, Mapping],\n session: Optional[ClientSession] = None\n ) -> UpdateMany:\n return self.update(*args, session=session)", "def updateAllEntities():\n entityKeys=list(registeredEntities.keys())\n for currKey in entityKeys:\n try:\n currEntity=registeredEntities[currKey]\n currEntity.update()\n except KeyError:\n #this should only be called if an entity is deleted (like if a rock got hit by a bullet)\n continue", "def update(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.update, params)", "def refresh():\n DB.drop_all()\n DB.create_all()\n results = get_results()\n for i, result in enumerate(results):\n record = Record(id=i, datetime=result[0], value=result[1])\n DB.session.add(record)\n DB.session.commit()\n return redirect(\"/\")", "def refresh_index(self):\n synchronize()\n # TODO: add logger call here\n self._compute_embeddings()", "def bulk_update_mappings(\n self, mapper: Mapper[Any], mappings: Iterable[Dict[str, Any]]\n ) -> None:\n self._bulk_save_mappings(\n mapper, mappings, True, False, False, False, False\n )", "def batch_process(self, delete_list=[], update_list=[]):\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n payload = {\n 'updated': update_list,\n 'deleted': delete_list,\n }\n\n r = self.requests.post(\n self.request_url,\n data=json.dumps(payload),\n headers=self.default_headers,\n timeout=30\n )\n\n return r.status_code, r.json()", "def update_documents(self, engine_name, documents):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(documents)\n\n return self.swiftype_session.request('patch', endpoint, data=data)", "def do_bulk(self, args):\n pass", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def bulk_modify(self, file, lp, storage_id, remote_path=''):\n client, _ = self._get_storage(remote_path, storage_id=storage_id)\n return client.bulk_modify(lp, file)", "def refresh(self):\n if self.is_server_process and self.cache_manager.is_refreshing():\n raise RefreshInProgressError()\n catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()\n for catalog in catalogs:\n self._insert_request(self.refresh_queue, catalog, \"modify\")", "def store(self):\n articles = []\n for entry in self.feed():\n key = self.datastore_client.key(self.DATASTORE_KIND, entry['id'])\n article = datastore.Entity(key=key)\n article.update(entry)\n articles.append(article)\n self.datastore_client.put_multi(articles)", "def execute(self):\n for coll in list(self.__bulks):\n try:\n bulkOp = self.__bulks[coll]\n curr_result = Counter(bulkOp.execute())\n self.update_results(coll, curr_result)\n except BulkWriteError as bwe:\n sys.stderr.write(str(bwe.details))", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []", "def _update_entities(*entities):\n futures = []\n for entity in entities:\n futures.append(entity.put_async())\n Future.wait_all(futures)\n results = []\n for future in futures:\n results.append(future.get_result())\n return results", "def handle_wps_update(self, data):\n\n self.jobs = data", "def update_entities(self, entities: List[str]) -> None:\n self._entities = entities or []", "def updateAll(self):\n \tself.idToUpdate=''\n \tself.newState=''\n \tself.save()", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def update_all_posts():\n for post in CURRENT_POSTS:\n update_tag(post)", "def update_project_documents(self, manifest_info):\n\n for proj_name, proj_info in manifest_info.projects.items():\n # See if project document already is in the database and extract\n # for updating if so, otherwise create a new dictionary for\n # population\n key_name = f'project:{proj_name}'\n\n try:\n project_data = self.db.get_document(key_name)\n except cbdatabase_db.NotFoundError:\n project_data = dict(\n type='project', key_=key_name, name=proj_name\n )\n\n remote, repo_url = \\\n manifest_info.get_project_remote_info(proj_name)\n\n if 'remotes' in project_data:\n if remote in project_data['remotes']:\n if repo_url not in project_data['remotes'][remote]:\n project_data['remotes'][remote].append(repo_url)\n else:\n project_data['remotes'][remote] = [repo_url]\n else:\n project_data['remotes'] = {remote: [repo_url]}\n\n self.db.upsert_documents({key_name: project_data})", "def repopall(ctx):\n c = ctx.obj['client']\n if not c.login:\n return False\n\n r = requests.request(\"GET\", urljoin(c.BASE_URL, '/apiproxy/JobService.js'), params={'accesskey': c.login, 'method': 'PopulateAllSearches'})\n print(r.status_code, r.text)\n\n if r.status_code == 200:\n return True\n else:\n return False", "def bulk_upsert(self, data, parameters=None, many_to_many_clear: bool = True):\n elements = []\n\n # Skip if no identifier is set\n if not self.unique_identifier or not data:\n return\n\n for element in data:\n if isinstance(parameters, dict):\n element.update(parameters)\n\n element, created = self.upsert(data=element, many_to_many_clear=many_to_many_clear)\n elements.append(element)\n\n return elements", "def do_refresh_revisions():\n\n rows_by_type = _get_revisions_by_type()\n\n for type_, rows in rows_by_type.iteritems():\n logger.info(\"Updating revisions for: %s\", type_)\n _fix_type_revisions(type_, rows)", "def update_archive(self, population, offsprings, generation):\n # Store agents\n did_store = False\n for agent in offsprings:\n agent['stored'] = generation\n if self.archive.store(agent):\n self.emitters_pop[self.emitter_idx].stored += 1\n did_store = True\n\n if did_store:\n # Update emitter distribution (This one needs to be changed if wanting to use improvement emitters)\n solutions = [(genome, -value) for genome, value in zip(offsprings['genome'], offsprings['reward'])]\n self.emitters_pop[self.emitter_idx].tell(solutions)\n else:\n # If the emitter did not store anything, we restart it from another elite\n elite_idx = np.random.choice(self.archive.size)\n self.emitters_pop[self.emitter_idx] = OptimizingEmitter(self.archive['genome'][elite_idx],\n self.archive['id'][elite_idx],\n mutation_rate=0.5,\n bounds=self.bounds,\n parameters=self.params)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_update_action(self):\n pass", "def update(self):\n self._sync_ranges()\n self._update_params()", "def salesforce_collection_update(self, objects):\n for obj in objects:\n assert obj[\n \"id\"\n ], \"Should be a list of objects with Ids returned by Salesforce Collection Insert\"\n if STATUS_KEY in obj:\n del obj[STATUS_KEY]\n\n assert len(objects) <= SF_COLLECTION_INSERTION_LIMIT, (\n \"Cannot update more than %s objects with this keyword\"\n % SF_COLLECTION_INSERTION_LIMIT\n )\n\n records = self.cumulusci.sf.restful(\n \"composite/sobjects\",\n method=\"PATCH\",\n json={\"allOrNone\": True, \"records\": objects},\n )\n\n for record, obj in zip(records, objects):\n obj[STATUS_KEY] = record", "def update(self, dt):\n for obj in self.objects:\n obj.update(dt)", "def to_db(self):\n bulk = conn_db().initialize_ordered_bulk_op()\n for fiction in self.fictions:\n bulk.find({'id': fiction.id}).upsert().update({'$set': fiction.__dict__})\n bulk.execute()", "def batch_process_async(self, delete_list=[], update_list=[]):\n headers = update_dict(self.default_headers, {self.API_VERSION_HEADER: self.API_VERSIONS[\"v2\"]})\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n\n payload = {\n 'updated': update_list,\n 'deleted': delete_list,\n }\n\n r = self.requests.post(self.request_url, data=json.dumps(payload), headers=headers, timeout=30)\n\n return r.status_code, r.json()", "def do_update(services):\n\n global running_update\n\n for service in services:\n feed = registry[service.name][0]\n try:\n if type(feed) is list:\n entries = []\n for f in feed:\n entries.extend(f(service))\n else:\n entries = feed(service)\n # TODO should be in a transaction\n for entry in entries:\n entry.save()\n service.updated = datetime.utcnow()\n service.save()\n except:\n logging.exception(msg='updater exception for service %s' %service.name, exception=True)\n running_update = False", "def update_state(self, slate_documents, responses):", "def updateMasivo(self, objects):\n retorno = 1\n try:\n for object in objects:\n self.session.merge(object)\n self.session.commit()\n \n except sqlalchemy.exc.DBAPIError, e:\n if self.session is not None:\n self.session.rollback()\n retorno = -1\n print(\"Error!\", e)\n finally:\n if self._DAOAlchemy__cerrarSesion:\n self.session.close()\n return retorno", "def refresh(self, parameters = {}):\n\n self.__enforce_connected()\n self.collection.refresh(self, parameters = parameters)", "def update():\n query = db.session.query(db.Series).filter_by(following=True).all()\n output.series('Updating {} series'.format(len(query)))\n for follow in query:\n series = series_by_url(follow.url)\n series.update()\n list_new()", "def refresh(self):\r\n self.metadata = self.db.read(self.path).json()", "def update_list(self):\n Asset.update_list(self, uri_keys=('ems_sys', 'list'))", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def flush_batch(self, batch):\n inserts = []\n replacements = []\n\n for action_type, data in batch:\n if action_type == processor.INSERT:\n inserts.append(data)\n elif action_type == processor.REPLACE:\n replacements.append(data)\n\n if inserts:\n write_rows(\n self.clickhouse,\n self.dist_table_name,\n inserts\n )\n\n if self.metrics:\n self.metrics.timing('inserts', len(inserts))\n\n if replacements:\n for key, replacement in replacements:\n self.producer.produce(\n self.replacements_topic,\n key=six.text_type(key).encode('utf-8'),\n value=json.dumps(replacement).encode('utf-8'),\n on_delivery=self.delivery_callback,\n )\n\n self.producer.flush()", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def update_users_bulk(self, users):\n # type: (list) -> dict\n payload = {'updated': users}\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n return self.__create_request(payload=payload, request_type=self.REQUEST_POST, version=\"v1\")", "def updates(self, request, version):\n task = EtlTask.objects.get_for_model(self.queryset.model)\n if task.last_changes:\n offset = task.last_changes.strftime(\"%Y-%m-%d %H:%M\")\n queryset = self.queryset.filter(last_modify_date__gte=offset)\n else:\n offset = \"none\"\n queryset = self.queryset.all()\n queryset = self.paginate_queryset(queryset)\n serializer = self.get_serializer(queryset, many=True)\n return self.get_paginated_response(serializer.data)\n # return Response(serializer.data,\n # headers={'update-date': offset})", "def bulk_write(collection, iterable, job_id=None, unsafe=False):\n namespace = Namespace(collection.foreign_id)\n stage = get_stage(collection, OP_INDEX, job_id=job_id)\n entities = []\n for item in iterable:\n if not is_mapping(item):\n raise InvalidData(\"Failed to read input data\", errors=item)\n entity = model.get_proxy(item)\n entity = namespace.apply(entity)\n if not unsafe:\n entity = remove_checksums(entity)\n entities.append(entity)\n index_entities(stage, collection, entities)", "async def reload_all(ctx):\n await ext_manager.reload_all()\n await ctx.send(\"Successfully reloaded.\")", "def refresh():\r\n DB.drop_all()\r\n DB.create_all()\r\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\r\n for i in time_x_values():\r\n DB.session.add(Record(datetime=i[0], value=i[1]))\r\n DB.session.commit()\r\n return 'Data refreshed!'", "def update(self, values):\n return self.manager.update(self, values)", "def apply_updates(self, updates):\n raise NotImplementedError()", "def upload_entities(self, batch):\n # TODO Include a Do Not Overwrite call\n results = None\n atlas_endpoint = self.endpoint_url + \"/entity/bulk\"\n\n payload = AtlasClient._prepare_entity_upload(batch)\n\n postBulkEntities = requests.post(\n atlas_endpoint,\n json=payload,\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(postBulkEntities)\n\n return results", "def bulkSave(self, objList: List[PermissionContext], tokenData: TokenData):", "def apply(self):\n\n data = self._to_dict()\n\n # read only attributes\n del data['Provider']\n del data['MarkedForDelOn']\n del data['IsDeleted']\n del data['Database']\n\n data = {k: v for k, v in data.items() if v is not None}\n\n # remove empty arrays. Note: pylint will complain about this\n # but zeep will complain even more :)\n if len(data['Attributes']) == 0:\n del data['Attributes']\n\n if len(data['SecondaryDomains']) == 0:\n del data['SecondaryDomains']\n\n updateshopobj = self.sc.get_updateshop_obj(data)\n\n self.sc.update(updateshopobj)\n\n self.refresh()", "def flush(self):\n self.save()\n for obs in self.observation_set.all():\n obs.flush();", "def merge_to_elastic(paper_authors, papers, authors, index_name):\n columns = list(papers.columns) + ['authors']\n for index, paper in papers.iterrows():\n merger = paper_authors.loc[paper_authors['paper_id'] == index]\n author_ids = merger['author_id'].values\n author_names = [authors.loc[authors['id'] == x, 'name'].values[0] for x in author_ids]\n paper['authors'] = author_names\n yield {\n \"_index\": index_name,\n \"_type\": \"_doc\",\n \"_id\" : f\"{index}\",\n \"_source\": filterKeys(paper, columns),\n }", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "def pools_refresh(self):\n path = \"%s/commands/poolsRefresh\" % self.__base_path\n response = self.__session.post(path)\n self.__check_status_code(response.status_code)\n return response.json()" ]
[ "0.76656324", "0.6743843", "0.6214206", "0.59055984", "0.58515877", "0.57441527", "0.5547801", "0.54766494", "0.54745275", "0.54507333", "0.5444725", "0.542627", "0.53595006", "0.5348997", "0.5331371", "0.52764916", "0.527101", "0.52681786", "0.5237418", "0.5237225", "0.5230986", "0.5213352", "0.5195876", "0.5189852", "0.5159918", "0.515815", "0.5137841", "0.51265115", "0.5119004", "0.5090844", "0.5079043", "0.5068937", "0.5054568", "0.50518733", "0.504478", "0.5031268", "0.5028038", "0.5025309", "0.50041467", "0.5000888", "0.49982923", "0.4989702", "0.49872592", "0.49785417", "0.49601278", "0.49572235", "0.49557477", "0.49524382", "0.4941155", "0.49405015", "0.4930987", "0.49290067", "0.49239975", "0.49214068", "0.49100196", "0.4908053", "0.4908053", "0.4908053", "0.4883649", "0.487883", "0.48743615", "0.48625854", "0.48596442", "0.48577398", "0.48516792", "0.48330256", "0.48314434", "0.48264906", "0.48247454", "0.48179412", "0.4809989", "0.48075843", "0.47975373", "0.47900942", "0.47773397", "0.47736093", "0.4756942", "0.4745793", "0.47438946", "0.4733899", "0.47282013", "0.47276178", "0.47270626", "0.47268492", "0.4726616", "0.47229195", "0.4710116", "0.47005188", "0.46971714", "0.4695512", "0.4693281", "0.46905023", "0.46902966", "0.46890056", "0.46859133", "0.4685558", "0.46780232", "0.4671876", "0.46715236", "0.46710223" ]
0.8021315
0
This will also update persuasions in bulk based on type subtype and will push it to ES API /persuasion/schedule/refresh?push_to_es=true&push_to_inflow=false&type=quality_score&sub_type=inventory_depth
def schedule_refresh(): logger.info("Refreshing/Updating persuasions in bulk") try: args = request.args if not args.get("type") or not args.get("sub_type"): return jsonify( dict(status="failure", message="Invalid type/sub_type")) persuasions_request = PersuasionServices.get_persuasions_request_from_es(args) with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor: {executor.submit(PersuasionServices.refresh, data, args): data for data in persuasions_request} return jsonify( dict(status="success", message="Your request is in the queue, persuasion will be updated shortly")) except Exception as e: logger.error("Exception while creating persuasions in bulk - " + repr(e)) return jsonify(dict(status="failure", error=repr(e)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bulk_refresh():\n logger.info(\"Refreshing/Updating persuasions in bulk\")\n try:\n request_data = json.loads(request.data)\n args = request.args\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.refresh, data, args): data for data in request_data}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will be updated shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def do_refresh_revisions():\n\n rows_by_type = _get_revisions_by_type()\n\n for type_, rows in rows_by_type.iteritems():\n logger.info(\"Updating revisions for: %s\", type_)\n _fix_type_revisions(type_, rows)", "def updateAll(data):\n if (data.updatePositions):\n data.groups.player.update(data)\n data.groups.projectiles.update(data)\n data.groups.monsters.update(data)\n data.groups.spawners.update(data)", "def mark_processed_entities(entity_type, max_date):\r\n\r\n try:\r\n \r\n # get a connection and cursor\r\n conn = ecommerce.db.getConnection()\r\n cursor = conn.cursor()\r\n \r\n # execute the query\r\n cursor.execute(\"\"\"\r\n UPDATE Stage0_Delta\r\n SET FlagUpdated = 0\r\n WHERE EntityType = ? AND\r\n FlagUpdated = 1 AND\r\n LastUpdate <= TO_DATE(?, 'YYYY-MM-DD HH24:MI:SS')\r\n \"\"\", (entity_type, max_date) )\r\n \r\n # commit changes\r\n conn.commit()\r\n except:\r\n conn.rollback()\r\n pass", "def update_position():\n submissions = get_submissions_effective_date_today()\n\n if submissions:\n for submission in submissions:\n submission_changes = get_submission_changes(submission['id'])\n\n for change in submission_changes:\n execute_statement(\n f\"UPDATE position SET {change['field']} = '{change['change']}' WHERE number = '{submission['position_id']}' and isBudget = 0\"\n )\n\n # create audit trail in db\n create_audit_log('127.0.0.1', 'Server', '127.0.0.1', action='Successfully updated position {} on submission effective date'.format(\n submission['position_id']), table='position', function='UPDATE')\n else:\n # create audit trail in db\n create_audit_log('127.0.0.1', 'Server', '127.0.0.1',\n action='No positions to update today', table='position', function='')", "def _batch_update(self, query, mutation):\n logger.info(\"Performing batch update on %s. Mutation: %s\", query, mutation)\n modified = 0\n for doc in self.instances.find(query):\n with lock_instance(doc['_id']):\n pre_update_doc = self.instances.find_one({'_id' : doc['_id']})\n result = self.instances.update_one({'_id': doc['_id']}, mutation)\n assert result.modified_count == 1\n modified += 1\n updated_doc = self.instances.find_one({'_id': doc['_id']})\n instance = FixtureInstance.deserialize_mongodoc(updated_doc)\n try:\n self.axdb_client.update_fixture_instance(instance.axdbdoc())\n except Exception:\n logger.exception(\"Failed to persist updates for %s. Undoing cache update\", instance)\n self.instances.replace_one({'_id' : instance.id}, pre_update_doc)\n raise\n logger.info(\"%s fixture instances modified\", modified)", "def bulk_update(self, request):\n serializer = MasterySerializer(\n data=request.data,\n many=True,\n )\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')", "def train(self):\n if self.check_if_necessary() is False:\n return\n # open a transaction\n with db as session:\n # Reset popularity score (delete and re-add column for score)\n session.execute(\n text('ALTER TABLE \"%s\" DROP COLUMN popularity_score' % self.__media__[0].tablename))\n session.execute(\n text('ALTER TABLE \"%s\" ADD COLUMN popularity_score DOUBLE PRECISION' % self.__media__[0].tablename))\n for media in self.__media__:\n st_time = datetime.utcnow()\n\n m = media(logger=self.logger)\n q_df = m.get_populars(size=1000)\n\n # open a transaction\n with db as session:\n # Set new popularity score\n for index, row in q_df.iterrows():\n session.execute(\n text(\"UPDATE %s SET popularity_score = %s WHERE %s = %s\" % (m.tablename, row[\"popularity_score\"], m.id, row[m.id])))\n self.logger.debug(\"%s popularity reloading performed in %s (%s lines)\" %\n (str(m.content_type) or \"ALL CONTENT\", datetime.utcnow()-st_time, q_df.shape[0]))\n self.store_date(m.content_type)", "def update_performance_reports(update_all=True):\n from mspray.apps.main.utils import performance_report\n\n time_within = UPDATE_VISITED_MINUTES\n time_since = timezone.now() - timedelta(minutes=time_within + 1)\n\n if update_all:\n submissions = SprayDay.objects.all()\n else:\n submissions = SprayDay.objects.filter(\n Q(created_on__gte=time_since) | Q(modified_on__gte=time_since)\n )\n\n sop_queryset = (\n SprayDay.objects.filter(\n Q(created_on__gte=time_since) | Q(modified_on__gte=time_since)\n )\n .filter(spray_operator__isnull=False)\n .only(\"spray_operator\")\n .distinct(\"spray_operator\")\n )\n\n for record in sop_queryset:\n performance_report(\n record.spray_operator,\n submissions.filter(spray_operator=record.spray_operator),\n )", "def updateall(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.updateall, params)", "def update_particles(self):\n for particle in self.particles:\n particle.update_coordinates(self.bounds)", "def update(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.update, params)", "def update_monomer_population(self,monomer_type,update_type='remove'):\n if update_type=='remove':\n self.monomer_population[monomer_type] += -1\n self.added_set[monomer_type] += 1\n \n self.total_monomers += -1\n self.network_size += 1\n elif update_type=='add':\n self.monomer_population[monomer_type] += 1\n self.added_set[monomer_type] += -1\n \n self.total_monomers += 1\n \n self.free_monomer_fraction = self.total_monomers/float(self.total_sites)\n\n self.construct_prob_dist()\n self.compute_mean_eps_self()", "def _update_coverage(self, msg, subtype, by):\n try:\n coverage = self.get_local(msg, \"coverage\")\n except KeyError:\n coverage = defaultdict(int)\n coverage[\"all\"] += by\n coverage[subtype] += by\n self.set_local(msg, \"coverage\", coverage)", "def save(self):\n for t in self.ace_types:\n self.api.api_request(\"PUT\", self.url + t, data={t: self[t]})", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def update(self, es, **kwargs):\n pass", "def update_members_from_preferences(self, **parameters):\n super(Sequence, self).update_members_from_preferences(**parameters)\n\n for i, item in enumerate(self.items):\n para = parameters['item_{}'.format(i)]\n item.update_members_from_preferences(**para)", "def update_properties(self, prop_dict):\n ft_dict = {ft.name: ft for ft in self.get_field_types()}\n for name, val in prop_dict.items():\n ft = ft_dict[name]\n if ft.is_parameter():\n key = \"value\"\n else:\n key = \"sample\"\n if issubclass(type(val), Sequence) and ft.array:\n self.set_field_value_array(name, None, [{key: v} for v in val])\n else:\n self.set_field_value(name, None, {key: val})", "def refresh_persuasion():\n try:\n request_data = json.loads(request.data)\n meta = request.args\n persuasion = PersuasionServices.refresh(request_data, meta)\n except Exception as e:\n error_msg = \"Getting persuasion details - \" + repr(e)\n logger.error(error_msg)\n return jsonify(dict(status=\"failure\", error=error_msg))\n return jsonify(dict(status=\"success\", data=persuasion if persuasion else dict()))", "def process(self):\n\t\tif self.update_check():\n\t\t\tself.ingest_all()\n\t\t\tself.update_totals()\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')", "async def actualize(self):\r\n for guild, settings in self.bot.settings.items():\r\n # Grab the roles and their requirements\r\n guild = self.bot.get_guild(guild)\r\n base_member = settings.get(\"rank_basic_member_role_id\")\r\n base_member = guild.get_role(base_member)\r\n active_member = settings.get(\"rank_active_member_role_id\")\r\n active_member = guild.get_role(active_member)\r\n active_member_days = settings.get(\"rank_active_member_required_days\")\r\n active_member_activity = settings.get(\"rank_active_member_required_activity\")\r\n junior_mod = settings.get(\"rank_junior_mod_role_id\")\r\n junior_mod = guild.get_role(junior_mod)\r\n junior_mod_days = settings.get(\"rank_junior_mod_required_days\")\r\n junior_mod_activity = settings.get(\"rank_junior_mod_required_activity\")\r\n senior_mod = settings.get(\"rank_senior_mod_role_id\")\r\n senior_mod = guild.get_role(senior_mod)\r\n admin = settings.get(\"rank_admin_role_id\")\r\n admin = guild.get_role(admin)\r\n # Fetch the list of members eligible for each rank by their activity\r\n active_member_eligible = await util_users.fetch_users_by_days_and_activity(\r\n self.bot, guild, active_member_days, active_member_activity\r\n )\r\n active_member_eligible = [i[0] for i in active_member_eligible]\r\n junior_mod_eligible = await util_users.fetch_users_by_days_and_activity(\r\n self.bot, guild, junior_mod_days, junior_mod_activity\r\n )\r\n junior_mod_eligible = [i[0] for i in junior_mod_eligible]\r\n # Iterate over all members to edit their roles\r\n for member in [i for i in guild.members if not i.bot]:\r\n # Admin check\r\n if admin in member.roles:\r\n continue\r\n # Senior mod check\r\n elif senior_mod in member.roles:\r\n continue\r\n # Junior mod check\r\n elif junior_mod in member.roles:\r\n if member.id not in junior_mod_eligible:\r\n await member.remove_roles(junior_mod)\r\n await member.add_roles(active_member)\r\n # Active member check\r\n elif active_member in member.roles:\r\n if member.id not in active_member_eligible:\r\n await member.remove_roles(active_member)\r\n await member.add_roles(base_member)\r\n # Base member check\r\n elif base_member in member.roles:\r\n if member.id in active_member_eligible:\r\n await member.add_roles(active_member)", "def update(self, events, **kwargs):\n self.collisions.update(self._collidables, self._projectiles)\n for o in list(self._updateables):\n o.update(gm=self, events=events, **kwargs)", "def refresh_counterfactual_url_type(request, compound_type):\t\n\n\tnow = datetime.datetime.now()\n\tprint \"Start Time %s\" % now\n\n\tc_type \t\t= compound_type \t# compound type, o to be only and c to be compound\n\tprimeCoordinator= PrimeCoordinator() \t# initiate an PrimeCoordinator object to manipulate inputed parameters to get a counterfactual results\n\n\tnow = datetime.datetime.now()\n\tprint \"Coordinator initialised %s\" % now\n\n\t# Get variables from URL and create array\n\texposure_name \t\t= request.GET.get('exposure', '')\n\te_id \t\t\t= DBHelper.get_exposure_id_from_name(exposure_name)\n\tmean \t\t\t= request.GET.get('mean', '')\n\tsd \t\t\t= request.GET.get('standard-deviation', '')\n\tnon_rate\t\t= request.GET.get('non-rate', '')\n\texposure_sequence \t= [{'non_rate':non_rate,'mean':mean,'e_id':e_id,'sd':sd}]\n\n\tprimeCoordinator.get_counterfactual_compound_exposures(exposure_sequence)\n\n\tnow = datetime.datetime.now()\n\tprint \"Coordinator given exposures %s\" % now\n\n\t# get the data in the interface\n\tb_output_mortality \t= primeCoordinator.output_baseline_mortality # baseline mortality list for all outcomes\n\tb_output_mortality_num \t= primeCoordinator.output_baseline_mortality_num # baseline mortality sum up for each outcome\n\tb_total_mortality \t= primeCoordinator.output_baseline_mortality_total# baseline mortality sum up for all outcomes\n\n\tnow = datetime.datetime.now()\n\tprint \"Total baseline mortality calculated %s\" % now\n\n\tc_output_mortality \t= primeCoordinator.output_counterfactual_mortality# counterfactual mortality for all outcomes\n\tc_output_mortality_num \t= primeCoordinator.output_counterfactual_mortality_num# counterfactual mortality for each outcome\n\tc_total_mortality \t= primeCoordinator.output_counterfactual_mortality_total# counterfactual mortality sum up for all outcomes\n\n\tnow = datetime.datetime.now()\n\tprint \"Total counterfactual mortality calculated %s\" % now\n\n\ttotal_population\t= primeCoordinator.output_total_population\n\tall_mortality_exposure\t= primeCoordinator.output_all_mortality_exposure_outcome\n\n\tnow = datetime.datetime.now()\n\tprint \"Total deaths averted calculated %s\" % now\n\n\ttotal_death_averted\t= str(round(primeCoordinator.output_total_death_averted,0))\n\ttotal_death_baseline\t= str(primeCoordinator.output_total_death_baseline)\n\n\n\tnow = datetime.datetime.now()\n\tprint \"End Time: %s\" % now\n\n\t#transmit the parameters\n\ttemplate = loader.get_template('primemodel/index.html')\n\tpara_view = {\n\t\t\t'b_output_mortality_num':\tb_output_mortality_num,\n\t\t\t'b_total_mortality':\t\tb_total_mortality,\n\t\t\t'c_output_mortality_num':\tc_output_mortality_num,\n\t\t\t'c_total_mortality':\t\tc_total_mortality,\n\t\t\t'total_population':\t\t\ttotal_population,\n\t\t\t'total_death_averted':\t\ttotal_death_averted,\n\t\t\t'total_death_baseline':\t\ttotal_death_baseline,\n\t\t\t'all_mortality_exposure':\tall_mortality_exposure\n\t\t\t}\n\n\t#context to transmit the parameters to show\n\tcontext = Context(para_view)\n\tresponse = template.render(context)\n\treturn HttpResponse(response)", "def _process(self, start_key, batch_size):\n\n query = self.MODEL.all()\n if start_key:\n query.filter('__key__ > ', start_key)\n\n try:\n entities = query.fetch(batch_size)\n\n if not entities:\n # all entities has already been processed\n return\n\n for entity in entities:\n for field in self.FIELDS_TO_UPDATE:\n old_reference = getattr(entity, field)\n\n if not old_reference:\n continue\n\n # check if the field has not been updated\n if isinstance(old_reference, self.PROFILE_MODEL):\n continue\n\n profile = _getProfileForRole(old_reference, self.PROFILE_MODEL)\n setattr(entity, field, profile)\n\n for list_property in self.LISTS_TO_UPDATE:\n l = getattr(entity, list_property)\n new_l = []\n for key in l:\n new_l.append(_getProfileKeyForRoleKey(key, self.PROFILE_MODEL))\n setattr(entity, list_property, new_l)\n\n db.put(entities)\n start_key = entities[-1].key()\n deferred.defer(self._process, start_key, batch_size)\n except DeadlineExceededError:\n # here we should probably be more careful\n deferred.defer(self._process, start_key, batch_size)", "def update_entities(self):\n raise NotImplementedError()", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def update_quality():\n global items, converted_items\n if not converted_items:\n items = convert_items(items)\n converted_items = True\n for item in items:\n item.update_q()", "def setEmpireStats(self):\n totalEmpires = len(self.empires.keys())\n stats = {'Research':[], 'Fleet Size':[], 'Army Size':[], 'CR Production':[],\n 'AL Production':[],'EC Production':[],'IA Production':[]}\n \n # Calculate Research Stats\n d = {}\n for empireID, myEmpire in self.empires.iteritems():\n if empireID <> '1':\n num = 0\n for techID, myTech in myEmpire.techTree.iteritems():\n if myTech.complete == 1:\n num += 1\n d[empireID] = num\n stats['Research'] = anwp.func.funcs.sortDictByValue(d, True)\n \n # Calculate Fleet Stats\n d = {}\n for shipID, myShip in self.ships.iteritems():\n if myShip.empireID <> '1':\n (BV,CR,AL,EC,IA) = myShip.getMyValue()\n if myShip.empireID in d.keys():\n d[myShip.empireID] += BV\n else:\n d[myShip.empireID] = BV\n stats['Fleet Size'] = anwp.func.funcs.sortDictByValue(d, True)\n \n # Calculate Army Stats\n d = {}\n for regimentID, myRegiment in self.regiments.iteritems():\n if myRegiment.empireID <> '1':\n (BV,CR,AL,EC,IA) = myRegiment.getMyValue()\n if myRegiment.empireID in d.keys():\n d[myRegiment.empireID] += BV\n else:\n d[myRegiment.empireID] = BV\n stats['Army Size'] = anwp.func.funcs.sortDictByValue(d, True)\n\n # Calculate Production Stats\n for res in ['CR','AL','EC','IA']:\n d = {}\n for systemID, mySystem in self.systems.iteritems():\n if mySystem.myEmpireID <> '1':\n myValue = getattr(mySystem, 'prod%s' % res)\n if mySystem.myEmpireID in d.keys():\n d[mySystem.myEmpireID] += myValue\n else:\n d[mySystem.myEmpireID] = myValue\n myEmpire = self.empires[mySystem.myEmpireID]\n myEmpireValue = getattr(myEmpire, 'totalProd%s' % res)\n setattr(myEmpire, 'totalProd%s' % res, myEmpireValue+myValue)\n \n stats['%s Production' % res] = anwp.func.funcs.sortDictByValue(d, True)\n \n # calculate top captains\n d = {}\n for captainID, myCaptain in self.captains.iteritems():\n if myCaptain.myEmpire.id <> '1':\n myCaptain.resetData()\n d[myCaptain.id] = myCaptain.experience\n topCaptains = anwp.func.funcs.sortDictByValue(d, True)\n topCaptains = topCaptains[:2*len(self.empires.keys())]\n \n # Send out Stats to each Empire\n for empireID, myEmpire in self.empires.iteritems():\n if empireID <> '1':\n title = 'Round:%d Statistics' % self.currentRound\n body = ['%s ROUND %d STATS:' % (myEmpire.name, self.currentRound)]\n body.append('====================================================')\n for item in ['Research','Fleet Size', 'Army Size', 'CR Production',\n 'AL Production', 'EC Production', 'IA Production']:\n if empireID in stats[item]:\n body.append('You are %s in %s' % (anwp.func.funcs.getNiceNumber(stats[item].index(empireID)+1), item))\n \n # total production\n body.append('')\n body.append('TOTAL EMPIRE PRODUCTION OVER %d ROUNDS:' % self.currentRound)\n body.append('====================================================')\n for res in ['CR','AL','EC','IA']:\n body.append('Total %s Production: %d' % (res, getattr(myEmpire, 'totalProd%s' % res)))\n\n # legendary captains\n body.append('')\n body.append('TOP %d STARSHIP CAPTAINS in ROUND %d:' % ((2*len(self.empires.keys()), self.currentRound)))\n body.append('====================================================')\n for captainID in topCaptains:\n myCaptain = self.captains[captainID]\n myCaptain.promoteMe()\n body.append('%s ---> RANK:%s -- EXP:%d -- %s' % (string.upper(myCaptain.name), myCaptain.rank, myCaptain.experience, string.upper(myCaptain.myEmpire.name)))\n \n myEmpire.genMail({'fromEmpire':empireID, 'round':self.currentRound,\n 'messageType':'general', 'subject':title, 'body':body})", "def update(self, data):\n self.data = data\n # extract data\n subject = data.identifier\n self.id = _uri_to_oai(subject)\n self.modified = datetime.utcnow()\n self.deleted = False\n\n itemtype, subtype = _get_itemtype(data, subject)\n\n self.metadata = {}\n\n # fixed fields:\n self.metadata['rif_key'] = _uri_to_key(subject)\n self.metadata['rif_group'] = self.provider.groupDescription\n self.metadata['rif_originatingSource'] = self.provider.originatingSource\n self.metadata['rif_object'] = {'value': itemtype,\n 'type': subtype,\n #'dateModified': '',\n }\n\n if itemtype == 'collection':\n self.updateCollection(data, subject)\n elif itemtype == 'party':\n if subtype == 'person':\n self.updatePartyPerson(data, subject)\n else:\n self.updateParty(data, subject)\n elif itemtype == 'activity':\n self.updateActivity(data, subject)\n elif itemtype == 'service':\n self.updateService(data, subject)", "def _postprocessing(self):\n # (in)validate\n if len(self._var_names) == 0:\n self.invalidate()\n else:\n self.put_param('is_valid', True)\n \n # set type\n self.put_param('type', 'Generic')", "def periodicUpdate(self):\n try:\n logging.info(f'{self.cn} periodicUpdate = Start')\n isHaz = JsonSettings.parseJson('settings.json','isHazelcast')\n if self.db.isDb():\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n else:\n self.db.initDb()\n self.insertSys()\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n except Exception as e:\n logging.critical(f'{self.cn} Exception: {e}')\n logging.critical(f'{self.cn} StackTrace: \\n', exc_info=1)\n finally:\n logging.info(f'{self.cn} periodicUpdate = End')", "def _fix_type_revisions(type_, rows):\n model = getattr(all_models, type_, None)\n revisions_table = all_models.Revision.__table__\n if not model:\n logger.warning(\"Failed to update revisions for invalid model: %s\", type_)\n return\n\n ids = [row.resource_id for row in rows]\n objects = model.eager_query().filter(model.id.in_(ids))\n\n obj_content_map = {obj.id: obj.log_json() for obj in objects}\n\n for row in rows:\n # This if statement checks that we only update content for objects that\n # exist. If an object has been deleted via import or in some other way that\n # its delete revision was not created, this if statement will prevent a\n # false error.\n # Note: there will be other migrations that deal with adding missing\n # revisions for those deleted objects.\n if row.resource_id in obj_content_map:\n db.session.execute(\n revisions_table.update()\n .where(revisions_table.c.id == row.id)\n .values(content=obj_content_map[row.resource_id])\n )\n db.session.commit()", "def task_refresh_all_stats_score(request):\n start = time.time()\n cls_name = request.POST.get('cls') or 'Day'\n destroy = int(request.POST.get('destroy', '0'))\n cursor = datastore_query.Cursor(urlsafe=request.POST.get('cursor'))\n task_count = int(request.POST.get('task_count', '0'))\n assert cls_name in ('Day', 'Multi'), cls_name\n cls = (\n models.AccountStatsDay\n if cls_name == 'Day' else models.AccountStatsMulti)\n\n # Task queues are given 10 minutes. Do it in 9 minutes chunks to protect\n # against most timeout conditions.\n timeout = 540\n updated = 0\n skipped = 0\n try:\n futures = []\n chunk_size = 10\n items = []\n more = True\n if destroy:\n options = ndb.QueryOptions(keys_only=True)\n else:\n options = ndb.QueryOptions()\n while more:\n batch, cursor, more = cls.query(default_options=options).fetch_page(\n 20, start_cursor=cursor)\n if destroy:\n futures.extend(ndb.delete_multi_async(batch))\n updated += len(batch)\n else:\n for i in batch:\n score = models.compute_score(i)\n if i.score != score:\n items.append(i)\n if len(items) == chunk_size:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n items = []\n futures = [f for f in futures if not f.done()]\n else:\n skipped += 1\n if time.time() - start >= timeout:\n break\n if items:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n ndb.Future.wait_all(futures)\n if not more and cls_name == 'Day':\n # Move to the Multi instances.\n more = True\n cls_name = 'Multi'\n cursor = datastore_query.Cursor()\n if more:\n taskqueue.add(\n url=reverse(task_refresh_all_stats_score),\n params={\n 'cls': cls_name,\n 'cursor': cursor.urlsafe() if cursor else '',\n 'destroy': str(destroy),\n 'task_count': str(task_count+1),\n },\n queue_name='refresh-all-stats-score')\n result = 200\n except (db.Timeout, DeadlineExceededError):\n result = 500\n out = 'Index: %d\\nType = %s\\nStored %d items\\nSkipped %d\\nIn %.1fs\\n' % (\n task_count, cls.__name__, updated, skipped, time.time() - start)\n if result == 200:\n logging.info(out)\n else:\n logging.error(out)\n return HttpTextResponse(out, status=result)", "def process(self):\n\t\tif self.update_check() or self.force_update:\n\t\t\tself.district_check() #pull all local data and regions\n\t\t\tself.fix() #fix data anomalies - e.g add in Bucks.\n\t\t\tself.save_all() #store a copy of the data\n\t\t\tself.ingest() #add data to models\n\t\t\tself.update_totals() #calculate weekly data\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')", "def entityUpdates(self, *args):\n\t\tfor entity in self.members.values():\n\t\t\tentity.update(*args)", "def _save_stats(self, msg, payload, subtype):\n\n image_id = md5(payload).hexdigest()\n\n try:\n sizes = self.get_local(msg, \"sizes\")\n except KeyError:\n sizes = defaultdict(dict)\n\n if image_id in sizes['all']:\n img = sizes['all'][image_id]\n area = img['width'] * img['height']\n self._update_coverage(msg, subtype, area)\n\n else:\n try:\n img = self._get_image_sizes(payload)\n except BadImageFile:\n self._update_invalid_counts(msg, subtype, 1)\n else:\n sizes['all'][image_id] = img\n sizes[subtype][image_id] = img\n self.set_local(msg, \"sizes\", sizes)\n area = img['width'] * img['height']\n self._update_coverage(msg, subtype, area)", "def put(self, request):\n\n data = request.data\n test_sub_type_id = data['test_sub_type_id']\n data.pop(\"test_sub_type_id\")\n test_sub_type = TestSubType.objects.filter(id=test_sub_type_id)\n\n try:\n test_sub_type.update(**data)\n LOGGER.info(\"Test sub type data updated successfully\")\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record updated successfully\"})\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})", "def population_update_multi(self, parents, children, timeline=None, genUpdate=0, adoptedParents=[], mhFrac=0.0, randomParents=False):\n numFunctions = self.numObjectiveFunctions\n if self.isFunctionList == 0:\n if not hasattr(self.objective.func, '__call__'):\n raise AssertionError('Invalid \\tion handle.')\n assert self.dID != [] and np.sum(self.dID + self.xID) == len(self.discreteVals), ('A map must exist for each discrete variable. {} discrete variables, and {} maps provided.').format(np.sum(self.dID), len(self.discreteVals))\n if sum(self.dID) + sum(self.xID) != 0:\n for c in range(0, len(children)):\n children[c] = self.map_to_discretes(children[c])\n\n for p in parents:\n p.variables = self.map_to_discretes(p.variables)\n\n replace = 0\n numFunctions = self.numObjectiveFunctions\n if numFunctions == 1:\n for i in range(0, len(children), 1):\n fnew = self.objective.func(children[i])\n if fnew > self.penalty:\n self.penalty = fnew\n\n feval = 0\n for i in range(0, len(children), 1):\n if randomParents:\n j = int(rand() * len(parents))\n elif len(adoptedParents) == len(children):\n j = adoptedParents[i]\n else:\n j = i\n fnew = self.objective.func(children[i])\n for con in self.constraints:\n fnew += con.func(children[i])\n\n feval += 1\n if fnew < parents[j].fitness:\n parents[j].fitness = fnew\n parents[j].variables = cp.copy(children[i])\n parents[j].changeCount += 1\n parents[j].stallCount = 0\n replace += 1\n if parents[j].changeCount >= 25 and j >= self.population * self.fracElite:\n parents[j].variables = self.initialize(1, 'random').flatten()\n parents[j].variables = self.map_to_discretes(parents[j].variables)\n fnew = self.objective.func(parents[j].variables)\n for con in self.constraints:\n fnew += con.func(parents[j].variables)\n\n parents[j].fitness = fnew\n parents[j].changeCount = 0\n else:\n parents[j].stallCount += 1\n if parents[j].stallCount > 50000 and j != 0:\n parents[j].variables = self.initialize(1, 'random').flatten()\n parents[j].variables = self.map_to_discretes(parents[j].variables)\n fnew = self.objective.func(parents[j].variables)\n for con in self.constraints:\n fnew += con.func(parents[j].variables)\n\n parents[j].fitness = fnew\n parents[j].changeCount = 0\n parents[j].stallCount = 0\n r = int(rand() * len(parents))\n if r <= mhFrac:\n r = int(rand() * len(parents))\n if fnew < parents[r].fitness:\n parents[r].fitness = fnew\n parents[r].variables = cp.copy(children[i])\n parents[r].changeCount += 1\n parents[r].stallCount += 1\n replace += 1\n\n parents.sort(key=lambda x: x.fitness)\n else:\n numVariables = len(self.varType)\n objectivelist = self.objective\n num_cont_int_bin_variables = len(self.lb)\n var_range = []\n for k in range(0, num_cont_int_bin_variables):\n var_range += (self.lb[k], self.ub[k])\n\n num_Features = num_cont_int_bin_variables\n problem = Problem(num_of_variables=num_Features, objectives=objectivelist, variables_range=var_range)\n num_adopted_parents = len(adoptedParents)\n num_parents = len(parents)\n num_children = len(children)\n populationSize = num_parents\n num_Features = num_cont_int_bin_variables\n if timeline != None:\n if len(timeline) < 2:\n timeline.append(Event(1, feval, parents[0].fitness, parents[0].variables))\n elif parents[0].fitness < timeline[(-1)].fitness and abs((timeline[(-1)].fitness - parents[0].fitness) / parents[0].fitness) > self.convTol:\n timeline.append(Event(timeline[(-1)].generation, timeline[(-1)].evaluations + feval, parents[0].fitness, parents[0].variables))\n else:\n timeline[(-1)].generation += genUpdate\n timeline[(-1)].evaluations += feval\n if sum(self.dID) + sum(self.xID) != 0:\n for p in parents:\n p.variables = self.map_from_discretes(p.variables)\n\n if timeline != None:\n return (parents, replace, timeline)\n else:\n return (\n parents, replace)\n return", "def update_all(self, request):\n\n schema = self.session.info['schema']\n\n for item in self.query().filter_by(schema=schema):\n self.session.delete(item)\n\n for item in ElectionCollection(self.session).query():\n self.update(item, request)\n\n for item in ElectionCompoundCollection(self.session).query():\n self.update(item, request)\n\n for item in VoteCollection(self.session).query():\n self.update(item, request)", "def _update_all(self, criteria: Q, *args, **kwargs):\n raise NotImplementedError", "def update_progress(self, argv):\n data = {}\n arg_types = [{\n \"type\": \"cognitive\",\n \"rating\": argv.c,\n \"description\": argv.cd\n },\n {\n \"type\": \"emotional\",\n \"rating\": argv.e,\n \"description\": argv.ed\n }\n , {\n \"type\": \"physical\",\n \"rating\": argv.p,\n \"description\": argv.pd\n }\n\n ]\n\n for data_type in arg_types:\n if data_type[\"rating\"]:\n if 100 >= int(data_type[\"rating\"]) > 0:\n data[data_type[\"type\"]] = {\n \"rating\": data_type[\"rating\"],\n \"description\": data_type[\"description\"],\n \"sentiment\": self.get_sentiment(data_type[\"description\"])\n }\n else:\n logging.error(\"Invalid Rating, must be 1-10\")\n exit(1)\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n insert_data = {today: {\"data\": data, \"weather\": Weather.get_weather()}}\n logging.info(\"Updating Progress\")\n self.prog_logs.insert(insert_data)", "def bulk_upsert(self, request):\n data = request.data\n if not isinstance(data, list):\n data = [data]\n\n serializer = FacilityUpsertSerializer(data=data, many=True)\n serializer.is_valid(raise_exception=True)\n\n district_id = request.data[0][\"district\"]\n facilities = (\n Facility.objects.filter(district_id=district_id)\n .select_related(\"local_body\", \"district\", \"state\", \"created_by__district\", \"created_by__state\")\n .prefetch_related(\"facilitycapacity_set\")\n )\n\n facility_map = {f.name.lower(): f for f in facilities}\n facilities_to_update = []\n facilities_to_create = []\n\n for f in serializer.validated_data:\n f[\"district_id\"] = f.pop(\"district\")\n if f[\"name\"].lower() in facility_map:\n facilities_to_update.append(f)\n else:\n f[\"created_by_id\"] = request.user.id\n facilities_to_create.append(f)\n\n with transaction.atomic():\n capacity_create_objs = []\n for f in facilities_to_create:\n capacity = f.pop(\"facilitycapacity_set\")\n f_obj = Facility.objects.create(**f)\n for c in capacity:\n capacity_create_objs.append(FacilityCapacity(facility=f_obj, **c))\n for f in facilities_to_update:\n capacity = f.pop(\"facilitycapacity_set\")\n f_obj = facility_map.get(f[\"name\"].lower())\n changed = False\n for k, v in f.items():\n if getattr(f_obj, k) != v:\n setattr(f_obj, k, v)\n changed = True\n if changed:\n f_obj.save()\n capacity_map = {c.room_type: c for c in f_obj.facilitycapacity_set.all()}\n for c in capacity:\n changed = False\n if c[\"room_type\"] in capacity_map:\n c_obj = capacity_map.get(c[\"room_type\"])\n for k, v in c.items():\n if getattr(c_obj, k) != v:\n setattr(c_obj, k, v)\n changed = True\n if changed:\n c_obj.save()\n else:\n capacity_create_objs.append(FacilityCapacity(facility=f_obj, **c))\n\n bulk_create_with_history(capacity_create_objs, FacilityCapacity, batch_size=500)\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def update_batch(self, *args, **kwargs):\n pass", "def setupParticles(self):\n\n for ss in self.pargs['species']:\n\n # Make sure we are setting up particles, not walls (so we check for id existence)\n if 'id' in ss and 'wall' not in ss:\n if not self.rank:\n logging.info('Setting up particles for group{id}'.format(**ss))\n\n randName = np.random.randint(10**5,10**8)\n pddName = 'pdd' + '{}'.format(np.random.randint(10**5,10**8))\n\n if 'vol_lim' not in ss:\n ss['vol_lim'] = 1e-20\n\n id = ss['id'] - 1\n self.lmp.command('group group{} type {}'.format(id, ss['id']))\n\n if 'args'in ss:\n args = ss['args']\n else:\n args = ()\n\n if 'radius' in ss:\n radius = ss['radius']\n\n if not isinstance(radius, tuple):\n radius = ('constant', radius)\n\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density} radius'.format(**ss) + (' {}' * len(radius)).format(*radius) \\\n + (' {}' * len(args)).format(*args))\n else:\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density}'.format(**ss) + (' {}' * len(args)).format(*args))\n \n self.lmp.command('fix {} '.format(pddName) + 'group{}'.format(id) + ' particledistribution/discrete 67867967 1'.format(**ss) + ' {} 1.0'.format(randName))\n\n if ss['style'] is 'multisphere':\n itype = ss['style']\n else:\n itype = 'nve/{style}'.format(**ss)\n\n #Do NOT unfix randName! Will cause a memory corruption error\n self.pddName.append(pddName)", "def update(cls, project_id, resource_type, resource_id, data):\n\n for key, value in data.items():\n cls.edit(\n resource_type=resource_type,\n resource_id=resource_id,\n project_id=project_id,\n key=key,\n value=value\n )", "def _update_counts(self, msg, subtype, by):\n\n try:\n counts = self.get_local(msg, \"counts\")\n except KeyError:\n counts = defaultdict(int)\n\n counts['all'] += by\n counts[subtype] += by\n self.set_local(msg, \"counts\", counts)", "def updateAllRealtime():\n for stockName in db.STOCK_MAP.keys():\n getStock(stockName, \"all\")\n\n db.UPDATING_REALTIME = False", "def push_bulk(self, obj_list, doc_type=None, refresh=True):\n assert isinstance(obj_list, collections.Sequence)\n assert len(obj_list) > 0\n\n es_obj_list = []\n for obj in obj_list:\n if obj is None:\n logger.warning(\"None object in input list\")\n continue\n\n doc_type, es_repr = self._validate_doc_and_get_type_and_repr(obj, doc_type)\n metadata = {\n '_op_type': 'index',\n \"_index\": self.index_name,\n \"_type\": doc_type,\n }\n es_repr.update(**metadata)\n\n es_obj_list.append(es_repr)\n\n helpers.bulk(client=self.conn.elastic_search_client, actions=es_obj_list,\n stats_only=True, refresh=u'true' if refresh else u'false')", "def _update_(self, data):\n self.heroes = dict((hero, Hero(hero, data[hero])) for hero in data)", "def update(self):\n for zone in self.zones:\n self.update_sensor(\"zone\", zone[\"id\"])\n for device in self.devices:\n self.update_sensor(\"device\", device[\"id\"])", "def _update(self):\n self.parametrize_beam()\n self.update_ranks()\n self._points = tf.reshape(self._endpoint, (1, 2)) * tf.reshape(self._ranks, (-1, 1))", "def update_targets(self, items):\n items = list(filter(None, chain(*items)))\n items = list(filter(None, items))\n\n if len(items) > 0:\n self.logger.info(\"Updating {} thermo documents\".format(len(items)))\n bulk = self.thermo().initialize_ordered_bulk_op()\n\n for m in items:\n m[self.thermo.lu_field] = datetime.utcnow()\n bulk.find({\"material_id\": m[\"material_id\"]}).upsert().replace_one(m)\n bulk.execute()\n else:\n self.logger.info(\"No items to update\")", "def _enchance_assignment(self, doc):\n\n results = self.get_archive_items_for_assignment(doc)\n if results.count() > 0:\n doc['item_ids'] = [str(item.get(config.ID_FIELD)) for item in results]\n\n self.set_type(doc, doc)", "def update(probabilities, one_gene, two_genes, have_trait, p):\n for person in probabilities:\n\n # count the genes for the person\n geneCount = 0\n if person in one_gene:\n geneCount = 1\n elif person in two_genes:\n geneCount = 2\n\n # check the trait\n hasTrait = False\n if person in have_trait:\n hasTrait = True\n\n # update the probabilities\n probabilities[person][\"gene\"][geneCount] += p\n probabilities[person][\"trait\"][hasTrait] += p", "def update_subcategory(self, control_type, *args):\n\n\t\tif control_type is 'intField':\n\t\t\tself.log('query intField and update others')\n\t\t\tintField_value = cmds.intField(self.grade_intField, query = True, value = True)\n\t\t\tself.log('intField is %s' % intField_value)\n\n\t\t\tself.current_grade_value = intField_value\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intSlider(self.grade_slider, edit=True, value = -intField_value)\n\t\t\tself.update_radios_default_comments(intField_value)\n\t\t\tself.update_default_comments()\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'slider':\n\n\t\t\tself.log('query slider and update others')\n\t\t\tslider_value = abs(cmds.intSlider(self.grade_slider, query = True, value = True))\n\t\t\tself.log('intSlider is %s' % slider_value)\n\n\t\t\tself.current_grade_value = slider_value\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intField(self.grade_intField, edit = True, value = slider_value)\n\t\t\tself.update_radios_default_comments(slider_value)\n\t\t\tself.update_default_comments()\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'radioButton':\n\t\t\tself.log('query radio collection and update others')\n\t\t\tselected = cmds.radioCollection(self.grade_radio_collection, query = True, select = True)\n\t\t\tselected_letter = cmds.radioButton(selected, query = True, label = True)\n\t\t\tselected_letter = re.sub('\\\\+', 'plus', selected_letter)\n\t\t\tself.log('selected radioButton: %s' % selected_letter)\n\n\t\t\tself.current_grade_value = int(self.grade_values.find(selected_letter).text)\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intField(self.grade_intField, edit = True, value = self.current_grade_value)\n\t\t\tcmds.intSlider(self.grade_slider, edit = True, value = -self.current_grade_value)\n\t\t\tself.log('selected_letter: %s' % selected_letter)\n\t\t\t\n\t\t\tcmds.scrollField(self.default_comments, edit = True, text = self.subcatXML.find('gradeComment').find(selected_letter).text)\n\t\t\tself.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)\n\t\t\tself.log('Default Comments Updated')\n\t\t\tself.log(self.current_default_comment_text)\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'default_comments_text':\n\t\t\tself.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)\n\t\t\tself.log('Default Comments Updated')\n\t\t\tself.log(self.current_default_comment_text)\n\t\t\tself.update_is_complete()\n\n\t\telif control_type is 'example_comments_text':\n\t\t\tself.current_example_comment_text = cmds.scrollField(self.example_comments, query = True, text = True)\n\t\t\tself.log('examples updated')\n\t\t\tself.log(self.current_example_comment_text)\n\n\t\telse:\n\t\t\tself.current_comment_text = cmds.scrollField(self.comments_text_field, query = True, text = True)\n\t\t\tself.log('comments updated')\n\t\t\tself.log(self.current_comment_text)", "def update(self, new_data):\n if type(new_data) is not dict:\n raise TypeError(\"Input parameter must be a dict\")\n # Update parameters\n self._type = new_data.get(\"_type\", self._type)\n self.time_units = new_data.get(\"time_units\", self.time_units)\n self.len_units = new_data.get(\"len_units\", self.len_units)\n self.pump_units = new_data.get(\"pump_units\", self.pump_units)\n self.parameters = new_data.get(\"parameters\", self.parameters)\n # Update pumping rate\n self.pumprate.update(new_data.get(\"pumprate\", self.pumprate.to_dict()))\n # Update data\n if \"wells\" in new_data:\n n = len(new_data[\"wells\"])\n if n > 1:\n self.delete_all_wells()\n for i in range(n):\n self.add_well(0, 0, new_data[\"wells\"][i][\"_type\"] - 2)\n self.wells[i].update(new_data[\"wells\"][i])\n # End Function", "def updateAllEntities():\n entityKeys=list(registeredEntities.keys())\n for currKey in entityKeys:\n try:\n currEntity=registeredEntities[currKey]\n currEntity.update()\n except KeyError:\n #this should only be called if an entity is deleted (like if a rock got hit by a bullet)\n continue", "def update_profit(self):\n # Acessing Redis can cause greenlet switches because new jobs. We don't\n # want to potentially switch jobs multiple times quickly, so we update\n # the profitability information all at once after the loop to avoid\n # multiple network switches\n new_price_data = {}\n for manager in self.jobmanagers.itervalues():\n currency = manager.config['currency']\n pscore = self.redis.get(\"{}_profit\".format(currency))\n\n # Deserialize\n if pscore:\n try:\n pscore = simplejson.loads(pscore, use_decimal=True)\n except Exception:\n self.logger.warn(\n \"Error parsing profit score for {}! Setting it to 0..\"\n .format(currency))\n pscore = 0\n pass\n # If no score was grabbed, pass a 0 value score\n else:\n self.logger.warn(\"Unable to grab profit info for {}!\"\n .format(currency))\n pscore = 0\n\n ratio = self.redis.get(\"{}_ratio\".format(currency)) or 1.0\n ratio = float(ratio)\n\n # Only set updated if it actually changed\n if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:\n new_price_data[currency] = (pscore, ratio, time.time())\n\n # If we have some new information, adjust accordingly\n if new_price_data:\n self.logger.info(\"Updated price information for {}\"\n .format(new_price_data.keys()))\n # Atomic update in gevent\n self.price_data.update(new_price_data)\n\n # Update all the profit info. No preemption, just maths\n for currency in self.jobmanagers.iterkeys():\n self.update_profitability(currency)\n\n self.logger.debug(\n \"Re-checking best network after new price data for {}\"\n .format(new_price_data.keys()))\n self.check_best()", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def beam_update(self, idx, positions, beam_size):\n for e in self._all:\n a, br, d = e.size()\n sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sentStates.data.copy_(\n sentStates.data.index_select(1, positions))", "def update2(self, es, **kwargs):\n self._update_ps(es)\n raise NotImplementedError('must be implemented in a derived class')", "def execute_queries():\n fetch_job_listings(engine)\n update_job_listing(engine)", "def update_es_index():\n\n for job in scheduler.get_jobs():\n if 'task_type' in job.meta and job.meta['task_type'] == \"update_index\":\n scheduler.cancel(job)\n\n scheduler.schedule(\n scheduled_time=datetime.now(),\n func='haystack.management.commands.update_index.Command().handle()',\n interval=60 * 60,\n repeat=None,\n )\n\n for job in scheduler.get_jobs():\n index_job = job\n if index_job.func_name == 'haystack.management.commands.update_index.Command().handle()':\n break\n\n index_job.meta['task_type'] = \"update_index\"\n index_job.save()", "def put(self):\n type_model = request.json\n\n type_model = namedtuple(\"Type\", type_model.keys())(*type_model.values())\n repository = TypeRepository(\n FLASK_APP.config[\"DBUSER\"],\n FLASK_APP.config[\"DBPASS\"],\n FLASK_APP.config[\"DBHOST\"],\n FLASK_APP.config[\"DBPORT\"],\n FLASK_APP.config[\"DBNAME\"])\n try:\n type_model = repository.update(type_model)\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Informative',\n 'Type sucessfuly updated',\n 'put()',\n str(type.__dict__),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=type_model,\n message=\"Type sucessfuly updated.\",\n status=204), 200\n except Exception as err:\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Error',\n 'Internal server error',\n 'put()',\n str(err),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=err,\n message=\"Internal server error: \" + str(err),\n status=500)", "def update_scores(read_scores, result):\n\n\t# check that there are some results for this read\n\tassert len(result) > 0\n\t# write this read to output\n\tfor sim_match in result.keys():\n\n\t\t# get type (discordant or chimeric)\n\t\tjunc_type = sim_match.split('_')[2]\n\n\t\tfor analysis_match in result[sim_match]:\n\t\t\t\n\t\t\t# get each score type\n\t\t\tfor score_type in read_scores:\n\t\t\t\tscore = analysis_match[score_type]\n\t\t\t\tread_scores[score_type][junc_type][score] += 1", "def _after_postgeneration(cls, instance, create, results=None):\n super()._after_postgeneration(instance, create, results=results)\n if results.get(\"should_publish\", False):\n for language in instance.extended_object.get_languages():\n instance.extended_object.publish(language)\n instance.refresh_from_db()", "def update(self):\r\n debug.write(\"[SourceRPG] Updating all ranked positions\", 1)\r\n database.execute(\"SELECT steamid FROM Player ORDER BY level DESC,xp DESC\")\r\n results = database.cursor.fetchall()\r\n self.ranks = []\r\n for index, steamid in enumerate(results):\r\n debug.write(\"Rank: %s Steamid: %s\" % (index, steamid), 5)\r\n self.ranks.append(steamid[0])\r\n debug.write(\"[SourceRPG] All ranked positions updated\", 1)", "def update_pollutant_emissions(self, ds):\n\n # Update biosphere exchanges according to GAINS emission values\n for exc in ws.biosphere(\n ds, ws.either(*[ws.contains(\"name\", x) for x in self.emissions_map])\n ):\n iam_emission_label = self.emissions_map[exc[\"name\"]]\n\n try:\n iam_emission = self.iam_data.cement_emissions.loc[\n dict(\n region=ds[\"location\"],\n pollutant=iam_emission_label\n )\n ].values.item(0)\n except KeyError:\n # TODO: fix this.\n # GAINS does not have a 'World' region, hence we use Europe as a temporary fix\n iam_emission = self.iam_data.cement_emissions.loc[\n dict(\n region=self.geo.iam_to_GAINS_region(\"World\"),\n pollutant=iam_emission_label\n )\n ].values.item(0)\n\n\n if exc[\"amount\"] == 0:\n wurst.rescale_exchange(\n exc, iam_emission / 1, remove_uncertainty=True\n )\n else:\n wurst.rescale_exchange(exc, iam_emission / exc[\"amount\"])\n return ds", "def update_param_all_cells(self, landscape, params):\n cell_type = self.dict_cells[landscape]\n cell_type.update_parameters(params)", "def reload(self):\n data = self.api.api_request(\"GET\", self.url)\n for t in self.ace_types:\n self[t].actors = data[t][\"actors\"]\n self[t].groups = data[t][\"groups\"]", "def refresh_from_padron(self, resource_type):\n data = {'impuestos': 'afip.tax', 'actividades': 'afip.activity', 'conceptos': 'afip.concept'}\n resource_name = resource_type.capitalize().replace('tos', 'to').replace('des', 'd')\n\n model = data.get(resource_type)\n if not model:\n raise UserError(_('Resource Type %s not implemented!') % (resource_type))\n\n url = \"https://soa.afip.gob.ar/parametros/v1/%s/\" % resource_type\n res = requests.get(url=url)\n data = res.json().get('data')\n if res.status_code != 200:\n raise UserError(_('Error getting %s params from AFIP!') % resource_type)\n\n codes = []\n for item in data:\n code = item.get(\"id\" + resource_name)\n values = {'code': code,\n 'name': item.get(\"desc\" + resource_name),\n 'active': True}\n record = self.env[model].search([('code', '=', code)], limit=1)\n codes.append(code)\n if record:\n record.write(values)\n else:\n record.create(values)\n\n # deactivate the ones that are not in afip\n self.env[model].search([('code', 'not in', codes)]).write({'active': False})", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def _update_historical_ratings(self, pid, obs_time, rating, variance, r_type):\n\n\t\tself.historical_player_ratings[pid][obs_time][r_type + '_rating'] = rating\n\t\tself.historical_player_ratings[pid][obs_time][r_type + '_variance'] = variance", "def scale_up(self):\n self.load *= 10\n for pp in self.powerplants:\n if pp[\"type\"] != \"windturbine\":\n pp[\"pmin\"] *= 10\n pp[\"pmax\"] *= 10", "def update(\n self,\n filter_freq=FILTER_FREQ_DEFAULT,\n filter_stages=None,\n url_stats=\"result/matrix?show_stage_details=true&show_item_details=true\",\n url_rules=\"formula\",\n path_stats=\"data/matrix.json\",\n path_rules=\"data/formula.json\",\n gamedata_path=\"https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/{}/gamedata/excel/item_table.json\",\n dont_save_data=False,\n ):\n material_probs, convertion_rules = request_data(\n penguin_url + url_stats,\n penguin_url + url_rules,\n path_stats,\n path_rules,\n gamedata_path,\n dont_save_data,\n )\n self.itemdata = request_itemdata(gamedata_path)\n self.itemdata_rv = {\n lang: {v: k for k, v in dct.items()} for lang, dct in self.itemdata.items()\n }\n\n if filter_freq:\n if filter_stages is None:\n filter_stages = []\n filtered_probs = []\n for dct in material_probs[\"matrix\"]:\n if (\n dct[\"times\"] >= filter_freq\n and dct[\"stage\"][\"code\"] not in filter_stages\n ):\n filtered_probs.append(dct)\n material_probs[\"matrix\"] = filtered_probs\n\n self._set_lp_parameters(*self._pre_processing(material_probs, convertion_rules))", "def update(self, context):\n session = Session()\n\n for sub in self.cart:\n offers = session.search(sub.query, self.lat, self.lon, self.radius)\n for offer in sub.handle_offers(offers):\n context.bot.send_message(self.chat_id, text=offer_text(offer))\n\n updates = sub.check_offers()\n for offer in updates['expired']:\n context.bot.send_message(self.chat_id,\n text=offer_text_expired(offer))\n for offer in updates['expiring']:\n context.bot_send_message(self.chat_id,\n text=offer_text_expiring(offer))\n self.config_updated()", "async def async_update(self):\n node = self.gateway.sensors[self.node_id]\n child = node.children[self.child_id]\n set_req = self.gateway.const.SetReq\n for value_type, value in child.values.items():\n _LOGGER.debug(\n \"Entity update: %s: value_type %s, value = %s\",\n self._name,\n value_type,\n value,\n )\n if value_type in (\n set_req.V_ARMED,\n set_req.V_LIGHT,\n set_req.V_LOCK_STATUS,\n set_req.V_TRIPPED,\n ):\n self._values[value_type] = STATE_ON if int(value) == 1 else STATE_OFF\n elif value_type == set_req.V_DIMMER:\n self._values[value_type] = int(value)\n else:\n self._values[value_type] = value", "def set_mass(self, type, mass):\n assert type < self.num_atom_types + 1\n assert type > 0\n self.masses[type] = mass", "def handle_wps_update(self, data):\n\n self.jobs = data", "def update_data(self):\n staff = Staff.objects.all()\n orgs = Organization.objects.all()\n depts = Department.objects.all()\n\n existing = self.all()\n if existing.count():\n existing.delete()\n\n if staff.count():\n for s in staff:\n record = CombinedTeledata(\n id=s.id,\n alpha=s.alpha,\n name=s.name,\n first_name=s.first_name,\n last_name=s.last_name,\n sort_name=s.sort_name,\n email=s.email,\n phone=s.phone,\n postal=s.postal,\n job_position=s.job_position,\n department=s.dept.name,\n dept_id=s.dept.id,\n organization=s.dept.org.name,\n org_id=s.dept.org.id,\n building=s.bldg.name,\n bldg_id=s.bldg.import_id,\n room=s.room,\n from_table='staff'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(s.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if orgs.count():\n for o in orgs:\n record = CombinedTeledata(\n id=o.id,\n name=o.name,\n sort_name=o.name,\n phone=o.phone,\n fax=o.fax,\n building=o.bldg.name,\n bldg_id=o.bldg.import_id,\n room=o.room,\n from_table='organizations'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(o.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if depts.count():\n for d in depts:\n record = CombinedTeledata(\n id=d.id,\n name=d.name,\n sort_name=d.name,\n phone=d.phone,\n fax=d.fax,\n organization=d.org.name,\n org_id=d.org.id,\n building=d.bldg.name,\n bldg_id=d.bldg.import_id,\n room=d.room,\n from_table='departments'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(d.keywords.all())\n except Exception as e:\n logger.error(str(e))", "def test_update_multiple_preferences_type(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def updateDerivedTypes(self):\n self.hasConditionals = False\n self.derivedDict = {}\n for format in self.values():\n if format.genericType:\n format.updateFromGeneric()\n generic = self[format.genericType]\n if format.genericType in self.derivedDict:\n self.derivedDict[format.genericType].append(format)\n else:\n self.derivedDict[format.genericType] = [generic, format]\n if format.conditional:\n self.hasConditionals = True\n format.conditional.setupFields(format)", "def _process(proc_data: List[Dict]) -> List[Dict]:\n for item in proc_data:\n for key, val in item.items():\n if key != 'type':\n item[key] = int(val)\n\n return proc_data", "def updateWeapons(self):\n self.readyWeapons = []\n self.setWeaponStatus()\n\n for myWeapon in self.activeWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.alternateTargets = []\n\n if self.amsTargets != []:\n for myWeapon in self.amsWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.amsTargets = []", "def _update_positional_allele_dictionary(self, read_id, pos, allele, type, mapping_quality):\n if pos not in self.positional_allele_dictionary:\n self.positional_allele_dictionary[pos] = {}\n if (allele, type) not in self.positional_allele_dictionary[pos]:\n self.positional_allele_dictionary[pos][(allele, type)] = 0\n\n # increase the allele frequency of the allele at that position\n self.positional_allele_dictionary[pos][(allele, type)] += 1\n self.allele_dictionary[read_id][pos].append((allele, type))", "def _postprocess_staging_data(self):\n super()._postprocess_staging_data()\n with self._conn, self._conn.cursor() as cursor:\n cursor.execute(sql.SQL(\"\"\"UPDATE {0} SET rat_bitmask = translate_bands_to_rat_bitmask(bands)\"\"\")\n .format(self._staging_tbl_identifier))", "def updateSubSystems(self, data):\r\n current_subsystems_files = {}\r\n\r\n for subsystems_filename in self.versions[\"subsystems\"]:\r\n current_subsystems_files[subsystems_filename] = self.versions[\"subsystems\"][subsystems_filename]\r\n\r\n for subsystems_filename in data[\"subsystems\"]:\r\n subsystems_file = data[\"subsystems\"][subsystems_filename]\r\n if subsystems_filename == \"main\":\r\n continue\r\n try:\r\n if subsystems_filename not in current_subsystems_files.keys() or self.mission_name + \"_\" + subsystems_file[\"version\"] != current_subsystems_files[subsystems_filename][\"version\"]:\r\n downloadAndReplaceFile(subsystems_filename + \".py\", subsystems_file[\"python\"])\r\n self.versions[\"subsystems\"][subsystems_filename] = {}\r\n self.versions[\"subsystems\"][subsystems_filename][\"version\"] = self.mission_name + \"_\" + subsystems_file[\"version\"]\r\n self._logger.info(\"subsystems file \" + subsystems_filename + \" updated to version: \" + subsystems_file[\"version\"])\r\n except Exception as e:\r\n self._logger.error(\"Failed to update subsystems file \" + subsystems_filename + \" due to an exception: \" + str(e))", "def execute(self):\n for coll in list(self.__bulks):\n try:\n bulkOp = self.__bulks[coll]\n curr_result = Counter(bulkOp.execute())\n self.update_results(coll, curr_result)\n except BulkWriteError as bwe:\n sys.stderr.write(str(bwe.details))", "def write(self, values):\n STAGE = self.env['anytracker.stage']\n RATING = self.env['anytracker.rating']\n PRIORITY = self.env['anytracker.priority']\n IMPORTANCE = self.env['anytracker.importance']\n COMPLEXITY = self.env['anytracker.complexity']\n oldmethods = {t.id: t.method_id for t in self}\n res = super(Ticket, self).write(values)\n # change the method on subtickets as well\n if 'method_id' not in values:\n return res\n\n for ticket in self:\n children = self.search([('id', 'child_of', ticket.id),\n ('id', '!=', ticket.id)])\n new_meth_id = values['method_id']\n super(Ticket, children).write({'method_id': new_meth_id})\n\n # point subtickets to the equivalent stage in the new method\n for oldstage in oldmethods[ticket.id].stage_ids:\n # find children with this stage\n children = self.search([('id', 'child_of', ticket.id),\n ('stage_id', '=', oldstage.id)])\n if not children:\n continue\n # find a stage with the same code in the new method\n equ_stage = STAGE.search([('state', '=', oldstage.state),\n ('method_id', '=', new_meth_id)])\n # if no stage found, reset to no stage at all\n # (should display tickets on the left in the kanban)\n equ_stage = equ_stage[0].id if equ_stage else False\n self.env.cr.execute(\n 'update anytracker_ticket '\n 'set stage_id=%s where id in %s',\n (equ_stage or None, tuple(children.ids)))\n\n # point subtickets to the equivalent priority in the new method\n for oldprio in oldmethods[ticket.id].priority_ids:\n # find children with this priority\n children = self.search([('id', 'child_of', ticket.id),\n ('priority_id', '=', oldprio.id)])\n if not children:\n continue\n # find a priority with the same seq in the new method\n equ_prio = PRIORITY.search([('seq', '=', oldprio.seq),\n ('method_id', '=', new_meth_id)])\n # if no priority found, reset to no priority at all\n # (should display tickets on the left in the kanban)\n equ_prio = equ_prio[0].id if equ_prio else False\n self.env.cr.execute('update anytracker_ticket '\n 'set priority_id=%s where id in %s',\n (equ_prio or None, tuple(children.ids)))\n\n # point subtickets to the equivalent importance in the new method\n for oldimp in oldmethods[ticket.id].importance_ids:\n # find children with this importance\n children = self.search([('id', 'child_of', ticket.id),\n ('importance_id', '=', oldimp.id)])\n if not children:\n continue\n # find a importance with the same code in the new method\n equ_imp = IMPORTANCE.search([('seq', '=', oldimp.seq),\n ('method_id', '=', new_meth_id)])\n # if no importance found, reset to no importance at all\n # (should display tickets on the left in the kanban)\n equ_imp = equ_imp[0].id if equ_imp else False\n self.env.cr.execute(\n 'update anytracker_ticket set importance_id=%s'\n ' where id in %s',\n (equ_imp or None, tuple(children.ids)))\n\n # point subtickets to the equiv complexity in the new method\n for oldcplx in oldmethods[ticket.id].complexity_ids:\n # find ratings with this complexity and related to children\n ratings = RATING.search([\n ('ticket_id', 'in',\n self.search([('id', 'child_of', ticket.id)]).ids),\n ('complexity_id', '=', oldcplx.id)])\n if not ratings:\n continue\n # find a complexity with the same code in the new method\n equ_cmplx = COMPLEXITY.search(\n [('value', '=', oldcplx.value),\n ('method_id', '=', new_meth_id)])\n # if no complexity found, reset to no complexity at all\n # (should display tickets on the left in the kanban)\n equ_cmplx = equ_cmplx[0].id if equ_cmplx else False\n self.env.cr.execute(\n 'update anytracker_rating set complexity_id=%s '\n 'where id in %s',\n (equ_cmplx or None, tuple(ratings.ids)))\n\n # recompute risk and ratings\n ticket.recompute_subtickets()\n\n return res", "def gbf_pub_update():\r\n LOG.info(\"Start: Update datasets in RLIDGeo warehouse.\")\r\n month_stamps = [\r\n datetime.date.today().strftime(\"%Y_%m\"),\r\n (\r\n datetime.date.today().replace(day=1)\r\n - datetime.timedelta(days=1)\r\n ).strftime(\"%Y_%m\"),\r\n ]\r\n for month_stamp in month_stamps:\r\n snapshot_db_path = SNAPSHOT_DB_PATH.format(month_stamp)\r\n if not os.path.exists(snapshot_db_path):\r\n LOG.warning(\"Snapshot database %s does not exist.\", snapshot_db_path)\r\n continue\r\n\r\n for _dataset in DATASETS:\r\n arcetl.features.update_from_dicts(\r\n dataset_path=_dataset.path(\"pub\"),\r\n update_features=source_rows(snapshot_db_path, _dataset.path(\"source\")),\r\n id_field_names=_dataset.id_field_names,\r\n field_names=_dataset.field_names,\r\n delete_missing_features=False,\r\n use_edit_session=False,\r\n )\r\n LOG.info(\"End: Update.\")", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def update(self, dt):\n for obj in self.objects:\n obj.update(dt)", "def update(self):\n self._sync_ranges()\n self._update_params()", "def update(self, **kwargs):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"update {}\".format(item))\n item.update(**kwargs)", "def reindex_pidtype(pid_type):\n click.echo('Indexing pid type \"{}\"...'.format(pid_type))\n cli = create_cli()\n runner = current_app.test_cli_runner()\n runner.invoke(\n cli,\n 'index reindex --pid-type {} --yes-i-know'.format(pid_type),\n catch_exceptions=False\n )\n runner.invoke(cli, 'index run', catch_exceptions=False)\n click.echo('Indexing completed!')", "def populate(self):\n\n self.create_index()\n self.check_type()\n self.create_mapping()\n\n f = open(self.csv_file, 'rU')\n\n # Read the first line for all the headers\n headers = f.readline().split(',')\n\n # Read the rest of the document\n rows = f.readlines()\n added_counter = 0\n\n actions = []\n for row in rows:\n fields = row.split(',')\n obj = {}\n for header in headers:\n # we call lower-case here because we were originally using\n # analyzed strings in elasticsearch (and they were\n # automatically converted). Code was built based on that so it's\n # easiest to convert for now\n try:\n obj[header.replace('\\n', '')] = float(fields[\n headers.index(header)].replace('\\n', '').lower())\n except ValueError:\n obj[header.replace('\\n', '')] = fields[\n headers.index(header)].replace('\\n', '').lower()\n # check afterwards to replace empty strings with None (which json.dumps hopefully writes to null)\n if obj[header.replace('\\n', '')] == '':\n obj[header.replace('\\n', '')] = None\n try:\n item = {\n '_index': self.es_main_index,\n '_type': self.es_main_type,\n '_source': obj\n }\n\n actions.append(item)\n\n added_counter += 1\n print('%s new records added' % added_counter,\n end='\\r')\n sys.stdout.flush()\n\n if added_counter % self.chunk_size == 0:\n helpers.bulk(self.es, actions)\n actions = []\n\n except ConnectionError:\n print('There was a connection error. Check your Elastic' +\n ' Search setting and make sure Elastic Search is ' +\n 'running.')\n return False\n\n # add the remaining items\n if actions:\n helpers.bulk(self.es, actions)\n\n print('The update is completed. %s new records were added.' %\n added_counter)" ]
[ "0.5894394", "0.552447", "0.5422689", "0.51684684", "0.51551306", "0.50729585", "0.50314814", "0.495626", "0.49350718", "0.48938638", "0.48739257", "0.48735148", "0.48624405", "0.48345262", "0.48125353", "0.47565222", "0.47560182", "0.47403848", "0.47322214", "0.46917525", "0.46881023", "0.46878457", "0.46480522", "0.4646425", "0.46456882", "0.46377173", "0.46361303", "0.4632519", "0.46324039", "0.4630771", "0.46228954", "0.4617129", "0.46135354", "0.46124178", "0.4610281", "0.46080405", "0.46075884", "0.45996284", "0.4593866", "0.4573534", "0.456476", "0.45544326", "0.454592", "0.45411822", "0.45402166", "0.4531714", "0.45267612", "0.45242944", "0.45142242", "0.45120132", "0.4510825", "0.45032087", "0.4492668", "0.44897243", "0.44885406", "0.4479692", "0.44752496", "0.4472957", "0.44685373", "0.4467156", "0.44625393", "0.44494084", "0.44449922", "0.44304776", "0.44287166", "0.4424979", "0.4424572", "0.44236204", "0.44182158", "0.44144586", "0.44134998", "0.4410143", "0.4408557", "0.4408334", "0.44070426", "0.44062364", "0.44043562", "0.44023845", "0.44013608", "0.439806", "0.43941742", "0.43911985", "0.4389549", "0.4389439", "0.43865544", "0.43856043", "0.43853626", "0.43826875", "0.43805185", "0.43791974", "0.4375671", "0.43733066", "0.43666795", "0.43644658", "0.43629995", "0.43529445", "0.43522617", "0.4351256", "0.43486255", "0.4342766" ]
0.6947471
0
Convert any text to a fernet key for encryption.
def any_text_to_fernet_key(self, text): md5 = fingerprint.fingerprint.of_text(text) fernet_key = base64.b64encode(md5.encode("utf-8")) return fernet_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text2PublicKey(text:str):\n return RSA.importKey(b58decode(text))", "def encrypt(text):\r\n\r\n cipher = fuzz(text)\r\n return hexify(cipher)", "def fernet_encript(key,message):\n\tf = Fernet(key)\n\treturn f.encrypt(message)", "def caesar_encryption(text):\n result = ''\n for char in text:\n if char.isdigit():\n i = (num_key.index(char) - 4) % 10\n result += num_key[i]\n elif not char.isdigit() and char.lower() in alpha_key:\n i = (alpha_key.index(char.lower()) - 4) % 26\n result += alpha_key[i]\n else:\n result += char\n return result", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def gen_Fernet_key():\n\tkey = Fernet.generate_key()\n\treturn key", "def FtEncrypt(self,text):\n \n self.text = text\n EncryptText = []\n characters = \"abcdefghijklmnopqrstuvwxyz \"\n\n #attempt to append my_list and update my_dict\n #using a random set of alphabet and a random made_key \n try:\n for char in random.sample(characters,27):\n Code_Fouad_Teniou.my_list.append(char)\n \n Code_Fouad_Teniou.my_dict.update(zip(Code_Fouad_Teniou.my_key,Code_Fouad_Teniou.my_list))\n\n for item in text.lower():\n for i in Code_Fouad_Teniou.my_dict.items():\n if item == i[1]:\n EncryptText.append(i[0])\n \n return EncryptText\n \n #Raise AttributeError if text is not a string \n except AttributeError:\n raise AttributeError, \"\\n<Please re-enter your text as a 'string'\"", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def encode(text, key):\n encrypted = []\n for i in text:\n encrypted.append(key[i])\n return encrypted", "def encipher(self,string): \n string = self.remove_punctuation(string)\n ret = ''\n for (i,c) in enumerate(string):\n if i<len(self.key): offset = self.a2i(self.key[i])\n else: offset = self.a2i(string[i-len(self.key)]) \n ret += self.i2a(self.a2i(c)+offset)\n return ret", "def encrypt_vigenere(plaintext: str, keyword: str) -> str:", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def encryptstring(text, password):\n\n enc = []\n for i in enumerate(text):\n key_c = password[i[0] % len(password)]\n enc_c = chr((ord(i[1]) + ord(key_c)) % 256)\n enc.append(enc_c)\n return base64.urlsafe_b64encode(\"\".join(enc).encode()).decode()", "def encrypt_vigenere(plaintext: str, keyword: str) -> str:\n ciphertext = \"\"\n # PUT YOUR CODE HERE\n\n key_lenght = len(keyword)\n text_lenght = len(plaintext)\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_A = ord('A')\n ord_a = ord('a')\n\n if plaintext.islower():\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_a)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n ciphertext += \" \"\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_a\n ciphertext += chr(value)\n else:\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_A)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n value = ord(\" \")\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_A\n ciphertext += chr(value)\n return ciphertext", "def create_crypt_key():\n\n crypt_key = Fernet.generate_key() # key is type = bytes\n\n crypt_query = 'INSERT INTO Crypt (crypt_key) VALUES (%s)'\n my_cursor.execute(crypt_query, (crypt_key,))\n pw_db.commit()", "def load_Fernet_key(filename):\n\tfich = open(str(filename) +'.key', 'rb')\n\tkey = fich.read() # The key will be type bytes\n\tfich.close()\n\treturn key", "def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext", "def vigenere(inputString, mode, key):\n\n\t# Sanitize inputs\n\tkey = key.lower()\n\tkey.replace(\" \", \"\")\n\tif mode not in ['e', 'd']:\n\t\traise Exception(\"'mode' must be either 'e' for encryption or 'd' for decryption\")\n\n\t# Initialize local variables\n\tkeystreamCounter = 0\n\toutString = \"\"\n\n\t# Iterate on each character of the input string.\n\tfor inputChar in inputString:\n\t\t# If this character is in the alphabet, encode it\n\t\tif inputChar.lower() in alphabet:\n\t\t\t# Find the number of positions to move and the starting position\n\t\t\tkeyChar = key[keystreamCounter % len(key)]\n\t\t\tnumMoves = alphabet.index(keyChar)\n\t\t\tstartPosition = alphabet.index(inputChar.lower())\n\n\t\t\t# Encode or decode the input character by shifting the alphabet\n\t\t\tif mode == 'e': # encode\n\t\t\t\toutputChar = alphabet[(startPosition + numMoves) % len(alphabet)]\n\t\t\telif mode == 'd': # decode\n\t\t\t\toutputChar = alphabet[(startPosition - numMoves) % len(alphabet)]\n\n\t\t\t# Preserve capitalization\n\t\t\tif inputChar.isupper():\n\t\t\t\toutputChar = outputChar.upper()\n\n\t\t\t# Increment counter on each loop.\n\t\t\tkeystreamCounter += 1\n\n\t\t# If character is not in alphabet, preserve punctutation\n\t\telse:\n\t\t\toutputChar = inputChar\n\n\t\t# Append to output string\n\t\toutString += outputChar\n\n\t# Write to file\n\treturn(outString)", "def MakeKey(self, string, string_1, string_2):\n ...", "def run():\n key = input(\"Enter a 26 letter key: \")\n if not isValidKey(key):\n print(\"Invalid key.\")\n return\n plainText = input(\"Plain Text: \")\n cipherText = substitution(plainText, key)\n print(f\"Cipher Text: {cipherText}\")\n return", "def encode(keyFile: str, string: str=\"\", inputFile: str=\"\", outputFile:str=\"\") -> str:\n print(\"Encoding message ...\")\n print(\"Is public key file ok ?\", checkKeyFile(keyFile,\"public\"))\n\n if (checkKeyFile(keyFile,\"public\")): \n f = open(keyFile)\n keyData = extractParamsFromKey(f.readlines()[1]) # read the second line of the file and extract the param\n if args.verbose : print(\"keydata (publ) :\", keyData)\n \n #open a file if the string is empty\n if(string == \"\"):\n string = str(readFile(inputFile))\n else:\n string = string\n\n # transform the ascii string into a series of numbers\n asciiToInt = \"\"\n for char in string :\n asciiToInt += str(ord(char)).zfill(3)\n if args.verbose : print(\"ascii to int\", asciiToInt)\n\n # calculate the block length\n blocklen = len(str(keyData[0])) -1\n if args.verbose : print(\"block size is\", blocklen)\n \n # split the string into blocks\n # start bu reversing the string so we can start left to right\n tmp = asciiToInt[::-1]\n # cut them\n blocks = wrap(tmp, blocklen)\n # reverse the lsit of cut\n blocks.reverse()\n # inside eecaht cut reserve the characters\n for i in range(len(blocks)):\n blocks[i] = blocks[i][::-1]\n if args.verbose : print(blocks)\n \n # make sur that every block is the corect length, overwise add padding\n for i in range(len(blocks)):\n blocks[i] = blocks[i].zfill(blocklen)\n if args.verbose : print(\"blocks after padding :\", blocks)\n \n # crypt everyblock\n tempCryptString = \"\"\n if args.verbose : print(\"encrypted blocks:\")\n for i in range(len(blocks)): \n blockEncrypted = str(calculateCrypt(blocks[i], keyData[1], keyData[0]))\n if args.verbose : print(blockEncrypted)\n blockEncrypted = blockEncrypted.zfill(blocklen+1)\n if args.verbose : print(blockEncrypted)\n tempCryptString += blockEncrypted\n if args.verbose : print(\"encrypted string :\",tempCryptString)\n \n # write the contentes to a file\n hexstr = intToHexToBase64(tempCryptString)\n if(outputFile == \"\"):\n print(\"Encrypted :\")\n print(hexstr)\n else :\n print(\"writing to file\", outputFile)\n writeToFile(outputFile, hexstr)\n return hexstr\n else: \n print(\"keyfile is incorrect\")\n return", "def encrypt_string(self, raw_string):\n return self.fernet_instance.encrypt(raw_string.encode('utf-8'))", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def get_fernet_key(app: Sanic, passphrase: str) -> bytes:\n salted = (passphrase + app.secret_key).encode()\n key = hashlib.sha256(salted).digest()[:32]\n return base64.urlsafe_b64encode(key)", "def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)", "def tokey(*args):\n salt = '||'.join([force_text(arg) for arg in args])\n hash_ = hashlib.md5(encode(salt))\n return hash_.hexdigest()", "def makeKey(text):\n key, n = {}, 0\n for i in text:\n key[i] = str(n)\n n += 1\n return key", "def encrypt(string,pub):\r\n string = livingDead.utfE(string)\r\n crypto = rsa.encrypt(string, pub)\r\n return crypto", "def test_encryption(e, c):\n message = input(\"Enter word to encrypt: \")\n ciphered = ''\n\n for i in range(0, len(message)):\n ciphered = f'{ciphered}{chr(endecrypt(ord(message[i]), e, c))}'\n\n print(ciphered + ' is the ciphered text')\n d = key_cracker(e, c)\n print(\"Plain text is:\")\n for i in range(0, len(ciphered)):\n print(chr(endecrypt(ord(ciphered[i]), d, c)), end='')", "def cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>)\"\r\n# Fetching the writing in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# The crypting process, replaces letters in intab1 with outtab1\r\n crypted = (a.translate({ord(x): y for (x, y) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the crypted text within textbox\r\n self.textbox.setPlainText(crypted)", "def FtDecrypt(self,EncryptText):\n \n self.EncryptText = EncryptText\n characters = \"abcdefghijklmnopqrstuvwxyz \"\n DecripText = ''\n\n #attempt to decrypt the text using the made_key and EncryptText \n try:\n for item in self.EncryptText:\n DecripText += Code_Fouad_Teniou.my_dict[item]\n\n return DecripText\n \n #Raise KeyError if a different key was used to encrypt the text \n except KeyError:\n print \"\\n<Please use the right code(made_key) to decrypt your text\"", "def test_from_and_to_text(self):\n rkeyring = dns.tsigkeyring.from_text(text_keyring)\n tkeyring = dns.tsigkeyring.to_text(rkeyring)\n self.assertEqual(tkeyring, text_keyring)", "def keyify(text):\n text = text.lower()\n text = text.strip()\n\n text = text.replace('.', '')\n text = re.sub('[,-]', ' ', text)\n text = re.sub('\\s{2,}', ' ', text)\n\n return text", "def encrypt(self,string=\"vrorqjdqgwdqnviruwkhilvk\",key=3):\r\n return \"\".join([chr((ord(ch)-key-ord('a'))%(ord('z')-ord('a')+1)+ord('a')) for ch in string])", "def decrypt(self, text):\n return self.encrypt(text)", "def __encrypt_text_aes__(self, text, password):\n BLOCK_SIZE = 32\n PADDING_CHAR = b'^'\n iv = Random.new().read(16)\n # key must be 32 bytes for AES-256, so the password is hashed with md5 first\n cipher = AES.new(self.__hash_md5__(password), AES.MODE_CBC, iv)\n plaintext = text.encode('utf-8')\n # plaintext must be padded to be a multiple of BLOCK_SIZE\n plaintext_padded = plaintext + (BLOCK_SIZE - len(plaintext) % BLOCK_SIZE) * PADDING_CHAR\n ciphertext = cipher.encrypt(plaintext_padded)\n return (\n base64.b64encode(iv),\n base64.b64encode(ciphertext),\n PADDING_CHAR\n )", "def test_encryption(e, c):\n\n#\te = int(raw_input(\"\\nEnter e from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n string = raw_input(\"\\nEnter word to encrpyt\\n\")\n for i in range(0, len(string)):\n print endecrypt(ord(string[i]), e, c)", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def caesar_encode(self, text, key):\n result_list = []\n for char in text:\n if char.isalpha():\n if char.islower():\n offset = ASCII_LOWER_OFFSET\n else:\n offset = ASCII_UPPER_OFFSET\n char = chr((ord(char) - offset + key) % ALPHABET_SIZE + offset)\n result_list.append(char)\n return ''.join(result_list)", "def recover_encrypt_pass(self):\n with open(self.key_path) as input_file:\n key = input_file.readlines()\n cipher_suite = Fernet(key[0])\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n return ciphered_text", "def Crypt(self, aString):\n kIdx = 0\n cryptStr = \"\" # empty 'crypted string to be returned\n\n # loop through the string and XOR each byte with the keyword\n # to get the 'crypted byte. Add the 'crypted byte to the\n # 'crypted string\n for x in range(len(aString)):\n cryptStr = cryptStr + \\\n chr( ord(aString[x]) ^ ord(self.key[kIdx]))\n # use the mod operator - % - to cyclically loop through\n # the keyword\n kIdx = (kIdx + 1) % len(self.key)\n\n return cryptStr", "def encrypt(self, text):\n text = text.upper()\n output = []\n text_list = list(text)\n for letter in text_list:\n output.append(self.atbash_dict.get(letter, letter))\n return ''.join(output)", "def encrypt_data_key(self, dataKey, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKey, str):\n dataKey = dataKey.encode('utf-8')\n cipherText= box.encrypt(dataKey).decode('cp855')\n return cipherText", "def create_key ():", "def convert_key(text):\n if text is None:\n return None\n if text == \"YESTERDAY\":\n return \"today\"\n if text == \"TODAY\":\n return \"today\"\n if text == \"MONTH TO DATE\":\n return \"month\"\n if text.startswith(\"SINCE \"):\n return text.replace(\"SINCE \", \"\").replace(\" \", \"\").lower()\n LOG.warning(\"convert_key() failed for |%s|\", text)\n return \"fail\"", "def encrypt(plaintext: str, key: str) -> str:\n return \"\".join(chr(ord(p) ^ ord(k)) for (p, k) in zip(plaintext, key))", "def encrypt(text, offset):\r\n\r\n return format_text(text, offset)", "def encryptFromString(self, data, keyobj):\n return self.encryptByteArray(bytearray(data, 'utf-8'), keyobj)", "def encrypt(key, text):\n key = _key_array(key)\n text = _text_array(text)\n aes = mxit.aes.AES()\n parts = _split(text, 16)\n encoded = []\n for part in parts:\n encoded += aes.encrypt(part, key, aes.keySize[\"SIZE_128\"])\n return encoded", "def _InitFromString(self, text):\r\n # First, remove all whitespace:\r\n text = re.sub(_WHITESPACE_RE, '', text)\r\n\r\n # Parse out the period-separated components\r\n match = _KEY_RE.match(text)\r\n if not match:\r\n raise ValueError('Badly formatted key string: \"%s\"', text)\r\n\r\n private_exp = match.group('private_exp')\r\n if private_exp:\r\n private_exp = _B64ToNum(private_exp)\r\n else:\r\n private_exp = None\r\n self.keypair = Crypto.PublicKey.RSA.construct(\r\n (_B64ToNum(match.group('mod')),\r\n _B64ToNum(match.group('exp')),\r\n private_exp))", "def encrypt(self, input_file, output_file):\n self.key %= 26\n ciphertext = \"\"\n with open(input_file) as plaintext:\n self.text = plaintext.read()\n for char in self.text:\n if char.isalpha():\n if 65 <= ord(char) <= 90: #char is between A and Z\n if ord(char) + self.key <= 90:\n ciphertext += chr(ord(char) + self.key)\n elif ord(char) + self.key > 90:\n ciphertext += chr(ord(char) + self.key - 26)\n if 97 <= ord(char) <= 122:\n if ord(char) + self.key <= 122:\n ciphertext += chr(ord(char) + self.key)\n elif ord(char) + self.key > 122:\n ciphertext += chr(ord(char) + self.key - 26)\n else:\n ciphertext += char\n encrypted_file = open(output_file, 'w')\n encrypted_file.write(ciphertext)\n print \"Created file: \" + output_file", "def create_key(input, **kwargs):\n input = re.compile(r\"\\d+\").sub(_repl, input)\n input = input.replace(':', 'c')\n input = input.replace(';', 'c')\n input = input.replace('_', 'u')\n return re.sub('[^a-zA-Z]+', '', str(input)).lower()", "def encode(encryption=None):\n\n key_to_encrypt = {'a': 'q', 'b': 'v', 'c': 'x', 'd': 'z', 'e': 'y', 'f': 'w', 'g': 'u', 'h': 't', 'i': 's',\n 'j': 'r',\n 'k': 'p', 'l': 'o', 'm': 'n', 'n': 'm', 'o': 'l', 'p': 'k', 'r': 'j', 's': 'i', 't': 'h',\n 'u': 'g', 'w': 'f',\n 'y': 'e', 'z': 'd', 'x': 'c', 'v': 'b', 'q': 'a',\n 'A': 'Q', 'B': 'V', 'C': 'X', 'D': 'Z', 'E': 'Y', 'F': 'W', 'G': 'U', 'H': 'T', 'I': 'S',\n 'J': 'R', 'K': 'P',\n 'L': 'O', 'M': 'N', 'N': 'M', 'O': 'L', 'P': 'K', 'R': 'J', 'S': 'I', 'T': 'H', 'U': 'G',\n 'W': 'F', 'Y': 'E',\n 'Z': 'D', 'X': 'C', 'V': 'B', 'Q': 'S',\n '1': '5', '2': '9', '3': '8', '4': '7', '5': '6', '6': '4', '7': '3', '8': '2', '9': '1',\n '.': ',', ',': '.', ':': ';', ';': ':', '?': '!', '!': '?', '-': '_', '_': '-', '(': ')',\n ')': '(',\n '%': '$', '$': '%', ' ': '&', '&': ' ', '+': '*', '*': '+'}\n entered_image = input(\"Image name with extension: \")\n img = Image.open(entered_image, 'r')\n\n message = input(\"Message that you want to be encoded: \")\n if (len(message) == 0):\n raise ValueError('Empty message!')\n\n e1 = monoalphabetic_encryption.Encryption(key_to_encrypt, message)\n encrypted_message = e1.encrypt()\n\n new_image = img.copy()\n putPixel(new_image, encrypted_message)\n\n new_image_name = input(\"New image name with extension: \")\n new_image.save(new_image_name, str(new_image_name.split(\".\")[1].upper()))", "def weaksauce_encrypt(text, password):\n\n offset = sum([ord(x) for x in password])\n encoded = ''.join(\n chr(min(ord(x) + offset, 2**20))\n for x in text\n )\n return encoded", "def plain_text(file_key, file_cipher_text):\n with open(file_key, 'r') as f:\n key = f.readline()\n with open(file_cipher_text, 'r') as f:\n cipher_text = f.readline()\n\n cipher_suite = Fernet(key)\n return cipher_suite.decrypt(cipher_text)", "def encrypt_vigenere(plaintext: str, keyword: str) -> str:\n ciphertext = \"\"\n if len(keyword) < len(plaintext):\n for j in range(len(plaintext) - len(keyword)):\n keyword += keyword[j]\n for i in range(len(plaintext)):\n n = ord(plaintext[i])\n m = ord(keyword[i])\n if (n >= ord('A')) and (n <= ord('Z')):\n if (m - ord('A') + n) <= ord('Z'):\n ciphertext += chr(m - ord('А') + n)\n else:\n ciphertext += chr((m + n) % (1 + ord('Z')))\n else:\n if (m - ord('a') + n) <= ord('z'):\n ciphertext += chr(m - ord('a') + n)\n else:\n ciphertext += chr((m + n) % ord('a'))\n return ciphertext", "def decrypt_vigenere(ciphertext: str, keyword: str) -> str:\n plaintext = \"\"\n # PUT YOUR CODE HERE\n key_lenght = len(keyword)\n text_lenght = len(ciphertext)\n\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_a = ord('a')\n ord_A = ord('A')\n\n if ciphertext.islower():\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_a)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_a\n plaintext += chr(value)\n else:\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_A)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_A\n plaintext += chr(value)\n\n return plaintext", "def _encrypt(self, text, **options):\n\n raise CoreNotImplementedError()", "def encrypt(self, text):\n ciphertext = []\n # text = text.upper()\n for char in text:\n try:\n key = (self.a * self.characters.index(char) + self.b) % len(self.characters)\n # If character is not in set for cipher,\n # directly append it without transformation\n except ValueError:\n ciphertext.append(char)\n else:\n ciphertext.append(self.characters[key])\n return ''.join(ciphertext)", "def demonstrate_string_encryption_key_based(plain_text):\n try:\n # GENERATE key\n key = AESGCM.generate_key(bit_length=256)\n\n # GENERATE random nonce (number used once)\n nonce = os.urandom(12)\n\n # ENCRYPTION\n aesgcm = AESGCM(key)\n cipher_text_bytes = aesgcm.encrypt(\n nonce=nonce,\n data=plain_text.encode('utf-8'),\n associated_data=None\n )\n # CONVERSION of raw bytes to BASE64 representation\n cipher_text = base64.urlsafe_b64encode(cipher_text_bytes)\n\n # DECRYPTION\n decrypted_cipher_text_bytes = aesgcm.decrypt(\n nonce=nonce,\n data=base64.urlsafe_b64decode(cipher_text),\n associated_data=None\n )\n decrypted_cipher_text = decrypted_cipher_text_bytes.decode('utf-8')\n\n logger.info(\"Decrypted and original plain text are the same: %s\",\n decrypted_cipher_text == plain_text)\n except InvalidTag:\n logger.exception(\"Symmetric string encryption failed\")", "def encryptionSelfMadeFunction(text,index):\n s = text\n transformedChar = \"\"\n\n transformedChar = s[0:index] + s[index+1:] +s[index]\n\n print(\"Encrypted Transformed text : \" )\n return transformedChar", "def HashForText (text):\n if isinstance(text, six.text_type):\n text = text.encode('utf-8')\n return __Hasher(text).hexdigest()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)", "def test_from_text(self):\n rkeyring = dns.tsigkeyring.from_text(text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def enc(text):\n if isinstance(text, str):\n return unicode(text, 'utf-8') # TODO: fix in Python 3\n elif isinstance(text, unicode):\n return text.encode('utf-8')\n else:\n raise Exception(\"Unsupported encode format.\")", "def get_key_plain():\n if len(sys.argv) != 2:\n exit(\"Usage: python vigenere.py k\")\n\n # get plaintext\n user_input = input(\"plaintext: \")\n \n return sys.argv[1], user_input", "def serialize_text(text):\n return serialize_plaintext(text)", "def encrypt(self, text):\n output = []\n text = text.upper()\n\n for char in text:\n try:\n index = self.alpha.index(char)\n except ValueError:\n output.append(char)\n else:\n output.append(self.alpha[(index * 5 + 8) % 26])\n return \"\".join(output)", "def encrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n f = open(myTmpDir + 'pt' + str(identity) + '.bin','wb')\n f.write(msg)\n f.close()\n\n os.popen(\"rsa.exe e \" + myTmpDir + \"pt\" + str(identity) + \".bin \"+ myTmpDir + \"locEnc\" + str(identity) + \".bin\")\n\n locEncFileName = myTmpDir + \"locEnc\" + str(identity) + \".bin\"\n with open(locEncFileName, \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n digest = base64.encodestring(bytes(readFile))\n\n # TODO: overwirite\n outText.insert(tkinter.END, digest)", "def encrypt(self, text):\n\n text = text.lower()\n encrypted_word = []\n for letter in text:\n try:\n index = self.alpha.index(letter)\n except ValueError:\n encrypted_word.append(letter)\n else:\n # Uses Affine encryption function to encrypt the word\n new_index = ((self.a*index)+self.b) % self.m\n encrypted_word.append(self.alpha[new_index])\n return \"\".join(encrypted_word)", "def string_to_key(string: str):\n try:\n # Removing all characters not in alphabet and making the string lower case\n return re.compile(\"[^A-Za-z]\").sub(\"\", string).lower()\n except TypeError:\n # If string is actually None we return None\n return None", "def vigenere_transform(operation, str):\n key = input(\"Passkey? \").upper()\n\n if operation == 'E':\n print(\"Encrypting {0} using Vigenere cipher with key {1}\".format(str, key))\n print(\"...\")\n return encrypt_vigenere(str, key)\n else:\n print(\"Decrypting {0} using Vigenere cipher with key {1}\".format(str, key))\n print(\"...\")\n return decrypt_vigenere(str, key)", "def passwd_encryption(self):\n key = Fernet.generate_key()\n cipher_suite = Fernet(key)\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n with open(self.pass_path, 'wb') as pass_output:\n pass_output.write(ciphered_text)\n with open(self.key_path, 'wb') as key_output:\n key_output.write(key)", "def __encrypt(string: str) -> str:\n key = 171\n result = b\"\\0\\0\\0\" + chr(len(string)).encode('latin-1')\n for i in string.encode('latin-1'):\n a = key ^ i\n key = a\n result += chr(a).encode('latin-1')\n return result", "def encrypt(self, user_input):\n # extract the message and keyword\n message = user_input[0]\n keyword = user_input[1]\n\n # generate encryption key\n cipher_key = self.__generate_key(keyword, \"encrypt\")\n\n # process and return text\n return self.__process_text(cipher_key, message)", "def encrypt(self, text):\n\t\tclean_text = message2num(text)\n\t\tencrypted = []\n\t\tplug = self._enc_plugboard(clean_text)\n\n\t\tfor letter in plug:\n\t\t\tl = self._forward(letter)\n\t\t\tl = self._reflect(self.reflector, l)\n\t\t\tl = self._backwards(l)\n\t\t\tencrypted.append(l)\n\n\t\tencrypted = self._enc_plugboard(encrypted)\n\t\tencrypted = \"\".join(POS2L[l] for l in encrypted)\n\n\t\treturn encrypted", "def generate_symmetric_key():\n return Fernet.generate_key()", "def store_Fernet_key(key,filename):\n\tfich = open(str(filename) + '.key', 'wb')\n\tfich.write(key) # The key is type bytes still\n\tfich.close()", "def crypt(text, passphrase, which):\n text = scrub_string(text)\n passphrase = scrub_string(passphrase)\n letters = (\n shift_string_by_letter(ch, passphrase[i % len(passphrase)], which)\n for i, ch in enumerate(text)\n )\n return \"\".join(letters)", "def test_to_text(self):\n tkeyring = dns.tsigkeyring.to_text(rich_keyring)\n self.assertEqual(tkeyring, text_keyring)", "def encipher(self):\n ciphertext = \"\"\n for pt, key_char in zip(self.text, self.key):\n char_index = self.char_block.alphabet.index(pt)\n ciphertext += self.char_block.rows[key_char][char_index]\n print(ciphertext)", "def _encrypted_user_photo_key_str(self):\r\n face_aes_key_str = settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"FACE_IMAGE_AES_KEY\"]\r\n face_aes_key = face_aes_key_str.decode(\"hex\")\r\n rsa_key_str = settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"RSA_PUBLIC_KEY\"]\r\n rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str)\r\n\r\n return rsa_encrypted_face_aes_key.encode(\"base64\")", "def encrypt(text, offset):\n encrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n encrypted_character = chr(ord(char))\n elif ord(char) < 90:\n encrypted_character = ord(char) + offset\n if encrypted_character > 90:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n else:\n encrypted_character = ord(char) + offset\n if encrypted_character > 122:\n encrypted_character -= 26\n encrypted_character = chr(encrypted_character)\n encrypted_text += encrypted_character\n\n return encrypted_text", "def applyCoder(text, coder):\n ciphertext = str()\n #for each letter in the text find it, and grab shifted letter\n for letter in text:\n ciphertext += coder.get(letter, letter)\n return ciphertext", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def encryptor(text: bytes, IV: bytes, key: bytes) -> bytes:\n \n # Given\n prepend_string = \"comment1=cooking%20MCs;userdata=\"\n append_string = \";comment2=%20like%20a%20pound%20of%20bacon\"\n\t\n plaintext = text.replace(b';', b'\";\"').replace(b'=', b'\"=\"')\n ciphertext = AES_CBC_encrypt(PKCS7_pad(plaintext, len(key)), IV, key)\n return ciphertext", "def encrypt_vigenere(plaintext, keyword):\n ciphertext = ''\n new_keyword = ''\n for i in range(len(plaintext)):\n num_word = ord(plaintext[i])\n while len(plaintext) > len(new_keyword):\n new_keyword += keyword\n num_key = ord(new_keyword[i])\n if num_key <= 90:\n num_key -= 65\n elif num_key >= 97:\n num_key -= 97\n if num_word <= 90:\n if (num_key+num_word) > 90:\n num_word -= 26\n else:\n if (num_key+num_word) > 122:\n num_word -= 26\n ciphertext += chr(num_word+num_key)\n return ciphertext", "def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)", "def encodeVigenere(self, key):\n\n key = key.upper().replace(\" \", \"\")\n encode = Vig(key)\n cipherText = encode.encode(self.planeText)\n \n if (self.verbose == 1):\n print(cipherText)\n \n return(cipherText)", "def key(self, value=None):\n if self.crypt_method == 'C':\n key_type = \"number\"\n else:\n key_type = \"string\"\n\n input_message = f\"Please enter a {key_type} as a \" \\\n f\"{self.crypt_type}ion key\\n>> \"\n if value is None:\n key = input(input_message)\n else:\n key = value\n\n is_valid_key, key = Check.is_valid_key(key, self.crypt_method)\n if is_valid_key:\n self._key = key\n else:\n raise ValueError(f\"Key{key} is invalid\")", "def makekey(function, *args, **kwargs) -> str:\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(\n str.maketrans('', '', string.punctuation+string.whitespace)\n )\n key = codecs.encode(pickle.dumps(arguments, protocol=0), \"base64\").decode().strip()\n return key", "def encode(text, password):\r\n\tstep_index = 0\r\n\tencoded_text = ''\r\n\tfor letter in text:\r\n\t\tencoded_text += next_letter(letter, to_int(password[step_index]))\r\n\t\tstep_index += 1\r\n\t\tif step_index > len(password)-1:\r\n\t\t\tstep_index = 0\r\n\treturn encoded_text", "def __generate_key(self, keyword, action):\n encryption_key = {}\n\n # get separate modified & unmodified alpha objects\n plain_alpha = self.helpers.alphabet(\"list\")\n cipher_alpha = self.helpers.alphabet(\"list\")\n\n # remove duplicate letters from the keyword\n keyword_stripped = self.helpers.unique(keyword)\n\n # remove keyword letters from cipher alpha\n for i in range(len(keyword_stripped)):\n cipher_alpha.remove(keyword_stripped[i])\n\n # append keyword letters to cipher alpha\n cipher_alpha = keyword_stripped + cipher_alpha\n\n if action == \"encrypt\":\n # generate encryption key\n for i in range(len(plain_alpha)):\n encryption_key[plain_alpha[i]] = cipher_alpha[i]\n elif action == \"decrypt\":\n # generate decryption key\n for i in range(len(plain_alpha)):\n encryption_key[cipher_alpha[i]] = plain_alpha[i]\n else:\n raise ValueError(\"Cheatin', uh?\")\n\n return encryption_key", "def get_key(name):\n import os\n salt = os.urandom(16)\n name = name.encode()\n from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.backends import default_backend\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=100000,\n backend=default_backend())\n import base64\n key = base64.urlsafe_b64encode(kdf.derive(name))\n return key", "def encode_email(email, key):\n return", "def find_key(plaintext1, plaintext2):\n\n # if plain1 XOR cipher1 == plain2 XOR cipher2\n xor1 = strings_xor(list(plaintext1.encode()), list(CIPHERTEXT1))\n xor2 = strings_xor(list(plaintext2.encode()), list(CIPHERTEXT2))\n if xor1 == xor2:\n return xor1.encode()\n\n # if plain1 XOR cipher2 == plain2 XOR cipher1\n xor1 = strings_xor(list(plaintext1.encode()), list(CIPHERTEXT2))\n xor2 = strings_xor(list(plaintext2.encode()), list(CIPHERTEXT1))\n if xor1 == xor2:\n return xor1.encode()\n\n return None", "def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)", "def setup_key_encrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(f\"Please enter a key value less than or equal to {self.max_key}. --> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\t\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key\")\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def encrypt(cls, plaintext, aad, key, iv):" ]
[ "0.6408409", "0.6371177", "0.63681877", "0.6212298", "0.61476415", "0.60205185", "0.60133415", "0.60062563", "0.6004446", "0.5816517", "0.5788318", "0.5727161", "0.56803447", "0.56624347", "0.5623659", "0.56033343", "0.560295", "0.55829906", "0.5578667", "0.5558498", "0.5555277", "0.5529817", "0.55219585", "0.55100524", "0.55099773", "0.5496666", "0.546912", "0.5459327", "0.5457194", "0.54402053", "0.5433752", "0.5416148", "0.54044837", "0.53962094", "0.5384977", "0.5374268", "0.5373688", "0.53684396", "0.53620696", "0.5357686", "0.5350703", "0.5341708", "0.53390324", "0.53321403", "0.53226244", "0.53205097", "0.529828", "0.5294416", "0.5293679", "0.5292488", "0.52849376", "0.52752256", "0.5271905", "0.5270045", "0.5265118", "0.52586555", "0.5254675", "0.5249991", "0.5243344", "0.5235718", "0.52249676", "0.5223514", "0.5194058", "0.5192591", "0.519175", "0.5191739", "0.5179525", "0.51780415", "0.51703864", "0.51647353", "0.51549566", "0.51525", "0.5147639", "0.5145504", "0.51429904", "0.5137442", "0.5135708", "0.5134955", "0.51326174", "0.5124815", "0.51186", "0.5114379", "0.5103466", "0.5097508", "0.50922596", "0.5090383", "0.5085826", "0.5074838", "0.5073431", "0.5072858", "0.5070852", "0.5062184", "0.505437", "0.50396746", "0.50387615", "0.50337076", "0.50261676", "0.50237846", "0.50227135", "0.5017928" ]
0.85045666
0
Manually enter a password for encryption on keyboard.
def input_password(self): # pragma: no cover password = input("Please enter your secret key (case sensitive): ") self.set_password(password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enter_password(self):", "def enter_password(self):\n self.password.clear()\n self.password.click()\n self.password.send_keys(TestData.PASSWORD)\n sleep(TestData.DELAY)\n return self.password", "async def password(self, ctx):\n pass", "def prompt_pass():\n msg = \"Enter Password: \"\n password = getpass.getpass(msg)\n return password", "def input_user_pass(self, user_pass):\n self.locate_element_by_css_selector(PASSWORD_SELECTOR).send_keys(user_pass)", "def OnTextCtrlPasswordText(self, event):\r\n\t\tself._password = event.GetString()", "def ask_password(self, prompt: str) -> str:\n raise NotImplementedError", "def password(self) -> str:", "def set_Password(self, value):\n super(DownloadDocumentInputSet, self)._set_input('Password', value)", "def setpassword(self, pwd):\n pass", "def focus_password(self, **kws):\r\n self.password_box.focus()", "def ask_password(db_params: Dict[str, str]) -> None:\r\n db_params[Toml.PASSWORD] = getpass('DB Password: ')", "def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)", "def take_pass(text_to_prompt):\r\n return prompt(text_to_prompt, is_password=True)", "def AskPassword(prompt, default='', id=264, ok=None, cancel=None):\n raise NotImplementedError(\"AskPassword\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(caller, input):\n caller.msg(echo=True)\n input = input.strip()\n text = \"\"\n options = (\n {\n \"key\": \"_default\",\n \"desc\": \"Enter your password.\",\n \"goto\": \"password\",\n },\n )\n\n # Check the password\n player = caller.db._player\n # If the account is locked, the user has to wait (maximum\n # 3 seconds) before retrying\n if player.db._locked:\n text = \"|gPlease wait, you cannot enter your password yet.|n\"\n return text, options\n\n caller.msg(echo=True)\n bans = ServerConfig.objects.conf(\"server_bans\")\n banned = bans and (any(tup[0] == player.name.lower() for tup in bans) \\\n or any(tup[2].match(caller.address) for tup in bans if tup[2]))\n\n if not player.check_password(input):\n caller.msg(echo=False)\n text = dedent(\"\"\"\n |rIncorrect password.|n\n Type |yb|n to go back to the login screen.\n Or wait 3 seconds before trying a new password.\n \"\"\".strip(\"\\n\"))\n\n # Loops on the same node\n player.scripts.add(WrongPassword)\n scripts = player.scripts.get(\"wrong_password\")\n if scripts:\n script = scripts[0]\n script.db.session = caller\n else:\n print \"Cannot retrieve the 'wrong_password' script.\"\n\n options = (\n {\n \"key\": \"b\",\n \"desc\": \"Go back to the login screen.\",\n \"goto\": \"start\",\n },\n {\n \"key\": \"_default\",\n \"desc\": \"Enter your password again.\",\n \"goto\": \"password\",\n },\n )\n elif banned:\n # This is a banned IP or name!\n string = dedent(\"\"\"\n |rYou have been banned and cannot continue from here.|n\n If you feel this ban is in error, please email an admin.\n \"\"\".strip(\"\\n\"))\n caller.msg(string)\n caller.sessionhandler.disconnect(\n caller, \"Good bye! Disconnecting...\")\n else:\n # The password is correct, we can log into the player.\n if not player.email:\n # Redirects to the node to set an e-mail address\n text = text_email_address(player)\n options = (\n {\n \"key\": \"_default\",\n \"desc\": \"Enter your e-mail address.\",\n \"goto\": \"email_address\",\n },\n )\n elif not player.db.valid:\n # Redirects to the node for the validation code\n text = \"Enter your received validation code.\"\n options = (\n {\n \"key\": \"_default\",\n \"desc\": \"Enter your validation code.\",\n \"goto\": \"validate_account\",\n },\n )\n else:\n _login(caller, player)\n text = \"\"\n options = _options_choose_characters(player)\n\n return text, options", "def generic_input_password(self, element_id, password):\n self._generic_input(element_id, password, False)", "def prompt_encrypt(self):\r\n print(\"Please copy/paste key and secret from MtGox and\")\r\n print(\"then provide a password to encrypt them.\")\r\n print(\"\")\r\n\r\n\r\n key = input(\" key: \").strip()\r\n secret = input(\" secret: \").strip()\r\n while True:\r\n password1 = getpass.getpass(\" password: \").strip()\r\n if password1 == \"\":\r\n print(\"aborting\")\r\n return\r\n password2 = getpass.getpass(\"password (again): \").strip()\r\n if password1 != password2:\r\n print(\"you had a typo in the password. try again...\")\r\n else:\r\n break\r\n\r\n # pylint: disable=E1101\r\n hashed_pass = hashlib.sha512(password1.encode(\"utf-8\")).digest()\r\n crypt_key = hashed_pass[:32]\r\n crypt_ini = hashed_pass[-16:]\r\n aes = AES.new(crypt_key, AES.MODE_OFB, crypt_ini)\r\n\r\n # since the secret is a base64 string we can just just pad it with\r\n # spaces which can easily be stripped again after decryping\r\n print(len(secret))\r\n secret += \" \" * (16 - len(secret) % 16)\r\n print(len(secret))\r\n secret = base64.b64encode(aes.encrypt(secret)).decode(\"ascii\")\r\n\r\n self.config.set(\"gox\", \"secret_key\", key)\r\n self.config.set(\"gox\", \"secret_secret\", secret)\r\n self.config.save()\r\n\r\n print(\"encrypted secret has been saved in %s\" % self.config.filename)", "def ask_password(ip):\n password_question = [\n {\n 'type': 'password',\n 'message': 'Enter the password of the machine with the IP address ' + ip + ':',\n 'name': 'password'\n }\n ]\n\n password_answer = prompt(password_question, style=style)\n pwd = password_answer[\"password\"]\n return pwd", "def _prompt_for_password(self, args):\n if not args.password:\n args.password = getpass.getpass(\n prompt='\"--password\" not provided! Please enter password for host %s and user %s: '\n % (args.host, args.user))\n return args", "def aisappium_input_password(self, locator, text, oAppiumInfo=None):\n self._info(\"Typing password into text field '%s'\" % locator)\n if oAppiumInfo is not None:\n self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver)\n else:\n self._element_input_text_by_locator(locator, text)", "def prompt_for_password(args):\n if not args.password:\n args.password = getpass.getpass(\n prompt='Enter password for host %s and user %s: ' %\n (args.host, args.user))\n return args", "def ask_input(prompt = '', is_password = False):\n\n while True:\n answer = getpass.getpass() if is_password == True else input(prompt)\n if answer is not '':\n return answer", "def set_Password(self, value):\n super(AddressValidationInputSet, self)._set_input('Password', value)", "def change_pwd(self):\r\n if self.field_pwd.text() == \"\":\r\n self.label_chg_pwd.setText(\"Password cannot be empty\")\r\n return None\r\n self.encryptor.set_key_from_password(self.field_pwd.text())\r\n self.label_chg_pwd.setText(\"Password typed\")\r\n self.label_chg_pwd.setStyleSheet(\"color:#01ac2d\")\r\n self.label_chg_key.clear()\r\n self.field_key.clear()\r\n QtWidgets.QMessageBox.information(self, \"Password Change\", \r\n (\"Your password has been successfully changed.\\n\\n\"\r\n \"You can now encrypt / decrypt files.\"))", "def getpass(self, prompt):\r\n return getpass.getpass(prompt)", "def get_user_password(text):\n return getpass.getpass(text)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def store_passwd(self, clr_passwd):\n aes_cipher = AESCipher()\n self.__aes_key = aes_cipher.AES_KEY\n self.__password = aes_cipher.encrypt(clr_passwd)", "def on_encryptionKeyEdit_textChanged(self, txt):\n self.passwordMeter.checkPasswordStrength(txt)\n self.__updateUI()", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def encrypt_password(pass_to_encrypt):\n\n temp_key = get_crypt_key()\n tk = Fernet(temp_key)\n\n pass_to_encrypt = pass_to_encrypt.encode(\"UTF-8\")\n return tk.encrypt(pass_to_encrypt)", "def set_password(self, password):\n self.password = password", "def set_password(self, password):\n from kalon.auth import encrypt_password\n self.document.password = encrypt_password(password)", "def password_error(self, msg):\n print('\\nBad password: %s' % msg, file=self.console)\n import getpass\n self.password = getpass.getpass('Password: ')\n self.verifiedpassword = getpass.getpass('Password again: ')", "def get_password(self,text):\r\n return self.driver.find_element(*SinginPage.password).send_keys(text)", "def main():\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n while not is_valid_password(password):\n print(\"Invalid password!\")\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n print(\"*\" * len(password))", "def GetPassword(self):\n pass", "def password_request():\n while True:\n print(\"You must input the 'password' to authorize the update.\")\n print(\"***NOTE: the password is case sensitive.***\\n\")\n\n password = pwinput.pwinput(\"Please enter your password: \\n\")\n\n if validate_password(password):\n print(\"Loading systems...\")\n break", "def set_password(self, password):\n self.PASS = password", "def password(self, password: str):\n\n self._password = password", "def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)", "def set_password(self, password):\n self.PASSWORD = password", "def acceptsPassword(self):\r\n raise NotImplementedError()", "def askPass( user, host ):\n prompt = \"Password for user {} on host {}: \".format( user, host )\n password = getpass.getpass( prompt )\n return password", "def do_masterpassword(self, masterpassword):\n if masterpassword:\n if self.taskstarted == True:\n self.masterpassword = masterpassword\n else:\n if self.taskstarted == False:\n print(self.cl.red(\"[!] <ERROR> You need to start a new KeePass Interaction.\"))\n print(self.cl.red(\"[!] <ERROR> Start this with 'new' from the menu.\"))\n print(\"[!] <ERROR> You need to supply the command for typing\")", "def password(self):\n return self._password()", "def set_password(ctx, new_password, remember):\n ensure_validated(ctx, prompt='Enter your current password')\n if not new_password:\n new_password = click.prompt(\n 'Enter your new password',\n hide_input=True,\n confirmation_prompt=True,\n err=True)\n\n controller = ctx.obj['controller']\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n key = controller.set_password(new_password)\n click.echo('Password updated.')\n if remember:\n keys[controller.id] = b2a_hex(key).decode()\n settings.write()\n click.echo('Password remembered')\n elif controller.id in keys:\n del keys[controller.id]\n settings.write()", "async def handle_password_prompt(self):\n self.applog.info(\"sending password %s\" % self.password)\n self._flush_buffer()\n self.pexpect_child.sendline(self.password)\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT, pexpect.EOF], timeout=10\n )\n if ret == 0:\n return True\n self.applog.info(\"handle_password_prompt failed with %s\" % ret)\n return False", "def user_encrypt_password(data=None, **kw):\n if 'password' in data:\n data['password'] = encrypt_password(data['password'])", "def create_password(self):\r\n alphabet = string.ascii_letters + string.digits\r\n password = ''.join(secrets.choice(alphabet) for i in range(30))\r\n\r\n QtWidgets.QMessageBox.information(self, \"Password generated\", \r\n \"{}\".format(password))", "def password_listener(target, value, oldvalue, initiator):\n return pwd_context.encrypt(value)", "def fake_login(self):\n self.username = self.console.input('Please type your username here:')\n self.password = self.console.input('Please type your password here:')", "def passphrase(self):\n password = self.entry.password\n if password:\n return self.entry.password.encode('UTF-8')\n else:\n return None", "def passwd_encryption(self):\n key = Fernet.generate_key()\n cipher_suite = Fernet(key)\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n with open(self.pass_path, 'wb') as pass_output:\n pass_output.write(ciphered_text)\n with open(self.key_path, 'wb') as key_output:\n key_output.write(key)", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"password\")", "def login(self):\n def twobytwo(s):\n i = 0\n while i < len(s):\n yield s[i:i + 2]\n i += 2\n sha1_pass_bytes = sha1(self._password).hexdigest()\n self.command('PASS: \"%s\"' % ' '.join(twobytwo(sha1_pass_bytes.upper())))", "def set_password() -> \"Function\":\n password = \"\"\n\n def inner():\n nonlocal password\n if password == \"\":\n password = get_password()\n return password\n\n return inner", "def password(self, value):\n self.password_hashed = func.crypt(value, func.gen_salt('bf'))", "def create_pwd_login_internal(password, re_enter_password):\r\n if g.platform == 'android':\r\n 'Enter the password in Create password text box'\r\n flag1 = ui_controls.text_box(get_obj_identifier('login_createPassword_txt'), value=password)\r\n\r\n 'Checks for the keyboard presence and hides it'\r\n flag2 = ui_controls.hide_keyboard()\r\n\r\n 'Enter the confirm password in Confirm Password text box'\r\n flag3 = ui_controls.text_box(get_obj_identifier('login_confirmPassword_txt'), value=re_enter_password)\r\n\r\n 'Checks for the keyboard presence and hides it'\r\n flag4 = ui_controls.hide_keyboard()\r\n\r\n 'Enter password hint in Password Hint text box'\r\n #flag5 = ui_controls.text_box(get_obj_identifier('login_passwordHint_txt'), value=hint)\r\n\r\n 'Checks for the keyboard presence and hides it'\r\n #flag6 = ui_controls.hide_keyboard()\r\n\r\n 'Click on create password login button'\r\n flag7 = ui_controls.button(get_obj_identifier('login_createPasswordLogin_btn'))\r\n\r\n status = False if not (flag1 and flag2 and flag3 and flag4 and flag7) else True\r\n else:\r\n 'Setting values on Create Password text box in ios. Using set value as send_keys failing here'\r\n flag1 = ui_controls.setValue(get_obj_identifier('login_createPassword_txt'), password)\r\n sleep(3)\r\n\r\n 'Setting values on Confirm Password text box in ios.'\r\n flag2 = ui_controls.setValue(get_obj_identifier('login_confirmPassword_txt'), value=re_enter_password)\r\n\r\n 'Setting values on Password Hint text box in ios.'\r\n # flag3 = ui_controls.setValue(get_obj_identifier('login_passwordHint_txt'), hint)\r\n sleep(3)\r\n\r\n 'Click on Create Password button in IOS'\r\n flag4 = ui_controls.button(get_obj_identifier('login_createPasswordLogin_btn'))\r\n\r\n status = False if not (flag1 and flag2 and flag4) else True\r\n return status", "def get_password_from_user():\n pwd = ''\n keyboard = xbmc.Keyboard('', ADDON_NAME + ': ' + localise(32022), True)\n keyboard.doModal()\n if keyboard.isConfirmed():\n pwd = keyboard.getText()\n return pwd", "def _password_prompt(question: str, console: io.IO) -> str:\n console.tell(question)\n while True:\n password1 = console.getpass('Password: ')\n try:\n _password_validate(password1)\n except ValueError as e:\n console.error(e)\n continue\n password2 = console.getpass('Password (again): ')\n if password1 != password2:\n console.error('Passwords do not match, please try again')\n continue\n return password1", "def _set_password(self, cr, uid, id, password, context=None):\n encrypted = self._crypt_context(\n cr, uid, id, context=context).encrypt(password)\n print(password)\n print(encrypted)\n self._set_encrypted_password(cr, uid, id, encrypted, context=context)\n self._set_password_again(cr, uid, id, password, context=context)", "def switchPassword(password):\n bits = base64.b64decode(password)\n bits = bytes([x ^ y for (x, y) in zip(settings.BITMASK, bits)])\n return base64.b64encode(bits).decode()", "def islandPassword(self, click, password):\n\t\tpos = mouse.get_pos()\n\t\tdef msg(text, length):\n\t\t\t\"\"\" Render message \"\"\"\n\t\t\tself.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))\n\t\t\tself.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))\n\t\t\tself.treasure.render(True, False, False, False, self.message)\n\t\t\t# Render and pause\n\t\t\tdisplay.flip()\n\t\t\ttime.wait(length)\n\n\t\t# Blit background\n\t\t# self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))\n\n\t\tkeysDict = {\n\t\t\t\"1\": Rect(96, 72, 170, 90),\n\t\t\t\"2\": Rect(315, 72, 170, 90),\n\t\t\t\"3\": Rect(531, 72, 170, 90),\n\t\t\t\"4\": Rect(96, 200, 170, 90),\n\t\t\t\"5\": Rect(315, 200, 170, 90),\n\t\t\t\"6\": Rect(531, 200, 170, 90),\n\t\t\t\"7\": Rect(96, 322, 170, 90),\n\t\t\t\"8\": Rect(315, 322, 170, 90),\n\t\t\t\"9\": Rect(531, 322, 170, 90),\n\t\t\t\"0\": Rect(315, 451, 170, 90), \n\t\t\t\"cancel\": Rect(803, 72, 170, 90),\n\t\t\t\"clear\": Rect(803, 200, 170, 90),\n\t\t\t\"enter\": Rect(803, 322, 170, 90)\n\t\t}\n\t\tif not self.passwordMsgFinished:\n\t\t\tmsg(\"Enter Password\", 1000)\n\t\t\tself.passwordMsgFinished=True\n\t\tcorrect = [\"6\", \"3\", \"8\", \"5\", \"4\", \"6\"]#original password\n\t\tfor key in keysDict:\n\t\t\tif keysDict[key].collidepoint(pos):\n\t\t\t\tif click:\n\t\t\t\t\t# print(\"Pressed\", key)\n\t\t\t\t\tif key!=\"cancel\" and key!=\"clear\" and key!=\"enter\":\n\t\t\t\t\t\tpassword.append(key)\n\t\t\t\t\tif key== \"cancel\":\n\t\t\t\t\t\tself.fade.fadeDark(self.maps.allScenes[\"finalisland\"][0], self.screen, (0, 0))\n\t\t\t\t\t\t# Create new scene\t\n\t\t\t\t\t\tself.maps.newScene(\"finalisland\")\n\t\t\t\t\t\t# Set player coordinates\n\t\t\t\t\t\tself.player.x = 510\n\t\t\t\t\t\tself.player.y = 46\n\t\t\t\t\t\t# Reset fade\n\t\t\t\t\t\tself.fade.reset()\n\t\t\t\t\tif key==\"clear\":\n\t\t\t\t\t\tpassword.clear()\n\t\t\t\t\tif key == \"enter\":\n\t\t\t\t\t\tif not (\"key\" in self.treasure.collectedItems):\n\t\t\t\t\t\t\tmsg(\"Key Missing\", 1500)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif password == correct:\n\t\t\t\t\t\t\t\t#print(\"correct password\")\n\t\t\t\t\t\t\t\tself.fade.fadeDark(self.maps.allScenes[\"finalisland\"][0], self.screen, (0, 0))\n\t\t\t\t\t\t\t\t# Create new scene\t\n\t\t\t\t\t\t\t\tself.maps.newScene(\"finalisland\")\n\t\t\t\t\t\t\t\t# Set player coordinates\n\t\t\t\t\t\t\t\tself.player.x = 516\n\t\t\t\t\t\t\t\tself.player.y = 46\n\t\t\t\t\t\t\t\t# Reset fade\n\t\t\t\t\t\t\t\tself.fade.reset()\n\t\t\t\t\t\t\t\tmsg(\"You have succesfully opened the safe.\", 3000)\n\t\t\t\t\t\t\t\tself.gameWon=True\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmsg(\"Wrong password. Press clear and try again.\", 1500)", "def log_in_password(self, password):\n waiter.find_write(self.driver, 'password', password, by=NAME)", "def on_passwordCheckBox_toggled(self, isOn):\n if isOn:\n self.input.setEchoMode(QLineEdit.Password)\n else:\n self.input.setEchoMode(QLineEdit.Normal)", "def passsword(self, password):\n self.passwor_harsh = generate_password_hash(password)", "def add_password_input(\n self,\n name: str,\n label: Optional[str] = None,\n placeholder: Optional[str] = None,\n ) -> None:\n self._client.add_element(\n name=name, element=TextField(label=label, value=placeholder, password=True)\n )", "def onPasswordFocusIn(self,event):\n if self.obj2.get() == \"New Password\":\n self.obj2.delete(0,END)", "def text_typing_block(self):\n\t # open the database using the masterpassword\n typing_text = 'Send({})\\n'.format(self.masterpassword)\n\n # add in exit - this is achieved using CTRL + q\n typing_text += 'Sleep(15677)\\n'\n typing_text += \"SendKeepActive('KeePass')\\n\"\n typing_text += 'Send(\"^q\")\\n'\n typing_text += \"; Reset Focus\\n\"\n typing_text += 'SendKeepActive(\"\")'\n\n return textwrap.indent(typing_text, self.indent_space)", "def passwd_prompt():\n\n print(\"Passwords MUST contain AT LEAST: one lower-case letter,\" \n \"one number, one symbol, and be a MINIMUM of 8 characters in length,\"\n \"e.g. r!ght2oE\")\n\n while True:\n\n passy = getpass.getpass(prompt=\"Enter password for user: \")\n confirm_passy = getpass.getpass(prompt=\"To confirm, \" \\\n \"re-enter password: \")\n\n # check for the following conditions: \n # user input matches\n # length of input is at least 8 characters\n # input contains at least 1 number \n # input contains at least 1 letter \n # input contains at least 1 symbol \n \n if passy != confirm_passy \\\n or len(passy) <8 \\\n or not re.search('\\d', passy) \\\n or not re.search(r\"[a-z]\",passy) \\\n or not re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', passy): \n \n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Password meets complexity requirement. Continuing...\") \n return passy" ]
[ "0.81982946", "0.7473501", "0.7234966", "0.7115085", "0.70761955", "0.7038702", "0.693298", "0.68611926", "0.68122035", "0.67915016", "0.67278564", "0.67168885", "0.6711128", "0.67014295", "0.66786796", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.66681784", "0.6648841", "0.6580645", "0.6552636", "0.65179044", "0.65161204", "0.65135264", "0.6452248", "0.64482844", "0.6428498", "0.63997483", "0.6386326", "0.6382127", "0.63622737", "0.6355577", "0.63357073", "0.632049", "0.632049", "0.632049", "0.632049", "0.6319766", "0.6314133", "0.62859684", "0.62633157", "0.625626", "0.6249111", "0.6247365", "0.62414", "0.6236894", "0.6236136", "0.62324154", "0.6204806", "0.62038016", "0.6200925", "0.61840296", "0.61821663", "0.61785656", "0.6177012", "0.6163865", "0.61490643", "0.6146791", "0.6143447", "0.6140038", "0.6138022", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.61375594", "0.6129404", "0.61287224", "0.61244535", "0.6115429", "0.6109917", "0.61036074", "0.6101498", "0.61009014", "0.60924566", "0.6081051", "0.6072906", "0.6072464", "0.6072282", "0.6066762", "0.60654116", "0.6063046" ]
0.78404224
1
Set a new password for encryption.
def set_password(self, password): self.__init__(password=password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setpassword(self, pwd):\n pass", "def set_password(self, value):\n # Salt need to be generated before set password\n m = hashlib.sha256()\n m.update('-'.join([\n str(datetime.now()),\n config.get('security.password_salt')\n ]))\n self.salt = m.hexdigest()\n self.password_pending = False\n self.password = self.__encrypt(value)", "def set_password(self, password):\n self.password = password", "def set_password(self, password):\n self.password = md5crypt(password, gen_salt())", "def _set_password(self, cr, uid, id, password, context=None):\n encrypted = self._crypt_context(\n cr, uid, id, context=context).encrypt(password)\n print(password)\n print(encrypted)\n self._set_encrypted_password(cr, uid, id, encrypted, context=context)\n self._set_password_again(cr, uid, id, password, context=context)", "def set_new_password(self, new_password):\n self.password = new_password", "def set_password(self, password):\n self.PASS = password", "def set_password(self, password):\n from kalon.auth import encrypt_password\n self.document.password = encrypt_password(password)", "def set_password(self, password):\n self.PASSWORD = password", "def set_password(self, password):\n self.password = generate_password_hash(password, method='pbkdf2:sha256')", "def set_password(self, new_password):\n super(Mafiasi, self).set_password(new_password)\n self.new_password = new_password", "def set_password(self, password):\n self.password = generate_password_hash(password)", "def _set_password(self, password):\n self._password = generate_password_hash(password)", "def set_password(self, password):\n self.password = self.hash_password(password)", "def set_password(self, password):\n self.cloudserver.change_password(password)", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def password(self, password):\n\n self._password = password", "def set_password(self, password):\n self.password_hash = generate_password_hash(str(password))", "def set_password(self, password):\n self.password_hash = generate_password_hash(password)", "def set_password(self, password):\n self.password_hash = generate_password_hash(password)", "def set_password(self, password):\n self.password_hash = generate_password_hash(f\"{password}{self.user_salt}\")", "def set_password(self, value):\n hashed = bcrypt.encode(value)\n self._password = unicode(hashed)", "def set_password(self, password):\n self.authentication.password = password", "def password(self, password):\n self.password_hash = generate_password_hash(password)\n self.password_set = True", "def password(self, password: str):\n\n self._password = password", "def set_password(self, password):\n\n self.password = bcrypt.generate_password_hash(password)", "def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)", "def set_password(ctx, new_password, remember):\n ensure_validated(ctx, prompt='Enter your current password')\n if not new_password:\n new_password = click.prompt(\n 'Enter your new password',\n hide_input=True,\n confirmation_prompt=True,\n err=True)\n\n controller = ctx.obj['controller']\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n key = controller.set_password(new_password)\n click.echo('Password updated.')\n if remember:\n keys[controller.id] = b2a_hex(key).decode()\n settings.write()\n click.echo('Password remembered')\n elif controller.id in keys:\n del keys[controller.id]\n settings.write()", "def set_password(self, raw_password: str):\n self.new_password = raw_password", "def password(self, password) :\n\t\ttry :\n\t\t\tself._password = password\n\t\texcept Exception as e:\n\t\t\traise e", "def set_encryption_password(session, password, old_password=None,\n return_type=None, **kwargs):\n verify_not_none(password, \"password\")\n\n body_values = {'encryption_pwd': password}\n if old_password:\n body_values['old_encryption_pwd'] = old_password\n\n path = '/api/settings/encryption.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)", "def set_password(self, service, username, password):\n segments = range(0, len(password), self._max_password_size)\n password_parts = [password[i : i + self._max_password_size] for i in segments]\n for i, password_part in enumerate(password_parts):\n curr_username = username\n if i > 0:\n curr_username += '{{part_%d}}' % i\n self._keyring.set_password(service, curr_username, password_part)", "def password(self, password):\n self._password = password\n return self", "def passwd(self, plaintext):\n self._password = bcrypt.generate_password_hash(plaintext.encode('utf8')).decode('utf8')", "def set_password(self, password):\n self._java_ref.setPassword(password)", "def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()", "def set_admin_password(self, instance, new_pass):\n pass", "def set_password(self, raw_password):\n self.password = security.generate_password_hash(raw_password, length=12)", "def set_password(self, raw_password):\n self.password = security.generate_password_hash(raw_password, length=12)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password(self, password):\n\n self.password_hash = generate_password_hash(password)", "def password(self, value):\n self.password_hashed = func.crypt(value, func.gen_salt('bf'))", "def set_Password(self, value):\n super(DownloadDocumentInputSet, self)._set_input('Password', value)", "def setUserPassword(self,value):\n self.PDFreactorConfiguration.in1[\"userPassword\"] = value", "def change_password(self, password, newpassword):\n cred = {\"newpasswd\": newpassword, \"passwd\": password}\n return self.put(\"passwd\", cred)", "def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")", "def update_password(self, user, password):\n user.password = hashers.make_password(password)", "def password(self, password):\n if password is None:\n self._password = None\n else:\n self._password = generate_password_hash(password)", "def _set_password(self, password):\r\n hashed_password = password\r\n\r\n if isinstance(password, unicode):\r\n password_8bit = password.encode('UTF-8')\r\n else:\r\n password_8bit = password\r\n\r\n # Hash a password for the first time, with a randomly-generated salt\r\n salt = bcrypt.gensalt(10)\r\n hashed_password = bcrypt.hashpw(password_8bit, salt)\r\n\r\n # Make sure the hased password is an UTF-8 object at the end of the\r\n # process because SQLAlchemy _wants_ a unicode object for Unicode\r\n # fields\r\n if not isinstance(hashed_password, unicode):\r\n hashed_password = hashed_password.decode('UTF-8')\r\n\r\n self._password = hashed_password", "def set_password(self, raw_password):\n if raw_password is None:\n self.set_unusable_password()\n else:\n import random\n algo = PASSWORD_ALGO\n salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]\n hsh = get_hexdigest(algo, salt, raw_password)\n self.password = '%s$%s$%s' % (algo, salt, hsh)", "def store_passwd(self, clr_passwd):\n aes_cipher = AESCipher()\n self.__aes_key = aes_cipher.AES_KEY\n self.__password = aes_cipher.encrypt(clr_passwd)", "def setPassword(self, unhashPass):\n\t\tself.passHash = generate_password_hash(unhashPass)", "def update_password(self, username, password):\n self.update(('Password', password), username)", "def password(self, password):\n if password is not None and len(password) > 30:\n raise ValueError(\"Invalid value for `password`, length must be less than or equal to `30`\")\n\n self._password = password", "def setpass(self, type, key):\n self.data.passwords[type] = key\n self.save()", "def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)", "def hash_password(self, password):\n self.password = pwd_context.encrypt(password)", "def SetPassword(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def change_password(self, new_pass):\n self.manager.change_user_password(self, new_pass)", "def set_Password(self, value):\n super(AddressValidationInputSet, self)._set_input('Password', value)", "def password(self):\n raise RuntimeError(\"Password can not be read, only set\")", "def passsword(self, password):\n self.passwor_harsh = generate_password_hash(password)", "def update_password(self, new_password=None):\n\n self.password = generate_password_hash(new_password)\n\n if self.save(verbose=False):\n self.logger.warn('Updated password! %s' % self)\n else:\n raise AttributeError('Password update failed!')", "def set_password(username, new_password):\n if not validate_password(new_password):\n return \"salasana on väärää muotoa\"\n new_password_hash = generate_password_hash(new_password)\n sql = \"UPDATE users \" \\\n \"SET password=:new_pw \" \\\n \"WHERE username=:username\"\n db.session.execute(sql, {\"new_pw\": new_password_hash, \"username\": username})\n db.session.commit()\n return \"ok\"", "def update_password(self, pwd):\n self.password = bcrypt.generate_password_hash(pwd).decode('utf8')", "def save_password(self, new_password):\n # 55 iterations takes about 100 ms on a Netgear WNDR3800 or about 8ms on a\n # Core2 Duo at 1200 MHz.\n hashed = pbkdf2.crypt(new_password, iterations=55)\n self.write(self.password_filename, hashed)", "def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()", "def change_password(change_account):\n change_data(change_account, changed_data='password')", "def set_session_password(self, pwd):\n\n if (self.__rootpwd == None):\n self.__rootpwd = hashlib.md5(pwd).hexdigest()\n else:\n self.__change_password(pwd)", "def wifi_password(self):\n raise RuntimeError(\"Password can not be read, only set\")", "def set_password_hash(self, password):\n salt = bcrypt.gensalt()\n self.password_hash = bcrypt.hashpw(password.encode(), salt)", "def setPassword(self, password, hashed=False):\n if hashed or self.hashed:\n self.hashed = True\n self.password = utils.saltHash(password)\n else:\n self.password = password", "def passwd_encryption(self):\n key = Fernet.generate_key()\n cipher_suite = Fernet(key)\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n with open(self.pass_path, 'wb') as pass_output:\n pass_output.write(ciphered_text)\n with open(self.key_path, 'wb') as key_output:\n key_output.write(key)", "def store_lc_passwd(self, clr_passwd):\n aes_cipher = AESCipher()\n self.__aes_key = aes_cipher.AES_KEY\n self.__lc_password = aes_cipher.encrypt(clr_passwd)", "def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset", "def store_password_in_keyring(username, password):\n return keyring.set_password(KEYRING_SYSTEM, username, password,)", "def hash_password(self):\n self.__password = self.str_to_hash(self.__password)", "def auth_password(self, auth_password):\n\n self._auth_password = auth_password", "def set_password(self,\n\t password,\n\t user='',\n\t shutit_pexpect_child=None,\n\t note=None):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child\n\t\tshutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)\n\t\treturn shutit_pexpect_session.set_password(password,user=user,note=note)", "def test_password_set(self):\r\n tst = User()\r\n tst.password = self.test_password\r\n\r\n self.assertEqual(\r\n len(tst.password),\r\n 60,\r\n \"Hashed should be 60 char long: \" + tst.password)\r\n self.assertEqual(\r\n '$2a$',\r\n tst.password[:4],\r\n \"Hash should start with the right complexity: \" + tst.password[:4])", "def set_password(self, user, password):\n hashed_password = self.hash_password(password)\n server_name = self.get_server_name()\n hookenv.log(\"Storing hash: {}\".format(hashed_password), hookenv.DEBUG)\n result = self.pgsql_query(\n \"UPDATE users SET password_hash = '{}' WHERE name = '@{}:{}';\".format(\n hashed_password, user, server_name\n )\n )\n return result", "def password(self):\n raise AttributeError(\"password: write-only field\")", "def user_encrypt_password(data=None, **kw):\n if 'password' in data:\n data['password'] = encrypt_password(data['password'])", "def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)", "def set_password(name, password):\n s_file = \"/etc/shadow\"\n ret = {}\n if not os.path.isfile(s_file):\n return ret\n lines = []\n with salt.utils.files.fopen(s_file, \"r\") as ifile:\n for line in ifile:\n comps = line.strip().split(\":\")\n if comps[0] != name:\n lines.append(line)\n continue\n comps[1] = password\n line = \":\".join(comps)\n lines.append(\"{}\\n\".format(line))\n with salt.utils.files.fopen(s_file, \"w+\") as ofile:\n lines = [salt.utils.stringutils.to_str(_l) for _l in lines]\n ofile.writelines(lines)\n uinfo = info(name)\n return uinfo[\"passwd\"] == password", "def hash_password(self, original_password):\n self.password = generate_password_hash(original_password)", "async def password(self, ctx):\n pass", "def set_password(self, password: Optional[str]) -> None:\n if password is None:\n self.set_unusable_password()\n return\n\n if not check_password_strength(password):\n raise PasswordTooWeakError\n\n super().set_password(password)", "def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))", "def update_password(self, password):\n self.password = scryptsalsa208sha256_str(password.encode('utf-8')).decode('utf-8')\n return True", "def changePassword(self, loginName, password, newPassword):\n return self.talk(\n 'purchase',\n data=self.__makeLoginDict(loginName, password,\n {'newPassword': newPassword}))", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def encrypt_password(pass_to_encrypt):\n\n temp_key = get_crypt_key()\n tk = Fernet(temp_key)\n\n pass_to_encrypt = pass_to_encrypt.encode(\"UTF-8\")\n return tk.encrypt(pass_to_encrypt)", "def test_setPassword(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n username = u'%s@%s' % (self.localpart, self.domain)\n d = account.setPassword(u'blahblah')\n perform()\n self.successResultOf(d)\n d = self._requestAvatarId(UsernamePassword(username, u'blahblah'))\n perform()\n self.assertEquals(self.successResultOf(d), account.storeID)\n d = self._requestAvatarId(UsernamePassword(username, self.password))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)\n d = self._requestAvatarId(UsernamePassword(username, account.passwordHash))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)" ]
[ "0.8377578", "0.8145898", "0.8129737", "0.80971813", "0.8089785", "0.80798155", "0.80655986", "0.8056996", "0.8050682", "0.7988545", "0.7963893", "0.7942522", "0.7855977", "0.7843381", "0.7838417", "0.78155446", "0.78155446", "0.78155446", "0.78155446", "0.7810031", "0.7780505", "0.7780505", "0.7763531", "0.7751662", "0.7748675", "0.773308", "0.772262", "0.76941186", "0.7692444", "0.76771563", "0.7667029", "0.75896794", "0.7550105", "0.75218284", "0.75177276", "0.7505668", "0.7494208", "0.74807173", "0.7468678", "0.7435102", "0.7435102", "0.7417522", "0.73941946", "0.73941946", "0.73941946", "0.73941946", "0.7359088", "0.7324293", "0.7284486", "0.7275708", "0.72734904", "0.72678626", "0.7261777", "0.7204337", "0.71910566", "0.71672606", "0.7155414", "0.7147419", "0.7144916", "0.7068454", "0.705407", "0.7021587", "0.69880855", "0.6953109", "0.69441915", "0.69401765", "0.6939661", "0.69266814", "0.6912347", "0.6911378", "0.6906269", "0.6898913", "0.6897689", "0.68754613", "0.68440706", "0.6798301", "0.6774968", "0.67546016", "0.6744525", "0.67412746", "0.67323625", "0.67296183", "0.6727924", "0.670346", "0.66879374", "0.667256", "0.66658", "0.66365206", "0.66325676", "0.66284204", "0.66277254", "0.66275716", "0.6617518", "0.6613581", "0.66105926", "0.66073674", "0.6600977", "0.65998894", "0.657383", "0.6572627" ]
0.7456274
39
Open the deletion modal for an address row.
def delete_address(self) -> object: self.delete_button.click() return DeletionModal(self).wait_for_component_to_be_present()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_address(self, address: dict) -> None:\n row = self.addresses_list.surface_address_row(address)\n\n row.open_kebab_menu()\n row.kebab_menu.delete_address()\n\n self.deletion_modal.confirm_address_deletion()", "def DeleteRow(self, entry):\n for a_link in entry.link:\n if a_link.rel == 'edit':\n return self.Delete(a_link.href)", "def open_edit_address(self, address: dict) -> None:\n row = self.addresses_list.surface_address_row(address)\n\n row.open_kebab_menu()\n row.kebab_menu.edit_address()", "def _open_delete_dialog():\n import hiero\n hiero.ui.findMenuAction('foundry.project.clearlocalcacheNuke').trigger()", "def delete_record():\n global data_base, table, choice, res, confirmation, output1, place_for_enter, choice_row\n choice_row = choice.get()\n sqlite3_simple_delete_record(data_base, table, choice_row, res)\n output1.delete(1.0, END)\n confirmation.after(1, confirmation.destroy)\n place_for_enter.delete(0, END)", "def delete(self, request, *args, **kwargs):\n # validate address id and get object\n instance = self.get_object()\n\n # get last transaction save point id\n sid = transaction.savepoint()\n\n try:\n # soft delete address\n instance.delete_address(request.user)\n except Exception as err:\n logger.error(\"Unexpected error occurred : %s.\", err)\n # roll back transaction if any exception occur while delete address\n transaction.savepoint_rollback(sid)\n return APIResponse({\"message\": err.args[0]}, HTTP_400_BAD_REQUEST)\n\n return APIResponse({\"message\": DELETE_ADDRESS}, HTTP_OK)", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def onAddRecord(self, event):\r\n dbItem = getattr(db, self.modelName) ()\r\n dlg = dPerson.Person(self.view, dbItem).view\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def delete():\n add_book_tk = DeleteBookDialog()\n entries_args = [\n (\"Book ID : \", 0.5),\n ]\n add_book_tk.create_components(entries_args)\n add_book_tk.mainloop()", "def delete_row(self, pk):\n return self.execute(self.commands.delete_row(self.name, self.primary_key_column, pk))", "def delete_item(self):\n self.df_user.drop(self.index_select_number, inplace=True)\n self.df_user.to_csv(\"user_items.csv\", index=False)\n self.update_treeview()\n self.changing_item_label.config(text=\"Please double click on the item you want to edit.\")\n self.delete_but.destroy()\n self.serv_drop.destroy()\n self.serv_but.destroy()\n self.pop_up_del.destroy()", "def test_client_address_delete(self):\n pass", "def on_delete_event(self, widget, event):\n\n self.hide()\n return True", "def click_map_line_items_delete_button(self):\n self.click_element(self.map_line_items_delete_button_locator, True, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.delete_mappings_confirmation_message_locator), 'delete mappings confirmation message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise", "def cmd_delete_employee():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_employee_by_id(id)\r\n User.query.filter(User.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Employee '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.employees'))\r\n else:\r\n flash(f\"Employee '{id}' was not found\")\r\n return redirect(url_for('main.employees'))", "def delete(request, id, template='contacts/regtaller/delete.html'):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n user = request.user\n if not user.has_perm('delete_regtaller'):\n return HttpResponseForbidden()\n\n try:\n regtaller = TallerRegistration.objects.get(id=id)\n except TallerRegistration.DoesNotExist:\n raise Http404\n\n if request.method == 'POST':\n new_data = request.POST.copy()\n if new_data['delete_regtaller'] == 'Yes':\n regtaller.delete()\n return HttpResponseRedirect(reverse('contacts_regtaller_list'))\n else:\n return HttpResponseRedirect(regtaller.get_absolute_url())\n\n kwvars = {\n 'object': regtaller\n }\n\n return render_to_response(template, kwvars, RequestContext(request))", "def test_delete__DeleteForm__4(address_book, browser, role):\n browser.login(role)\n browser.assert_forbidden(browser.SEARCH_DELETE_URL)", "def delete(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n employee.delete()\n return Response(\n data=' Entry deleted',\n status=status.HTTP_400_BAD_REQUEST\n )", "def customer_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n customer_reference = get_object_or_404(Customer, id=id,company=company)\n\n #deletes the view and redirects to the page.\n customer_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def del_row(self, row_index):\n ...", "def delete(no):\n\n conn = sqlite3.connect(\"person_database.bd\")\n c = conn.cursor()\n\n # delete a record\n c.execute(f\"DELETE from person_info WHERE oid= \" + str(no))\n\n conn.commit()\n conn.close()", "def delete():\n click.echo('delete was called.')", "def delete(self, source_index):\r\n click_css(self, 'a.delete-button', source_index, require_notification=False)\r\n # Click the confirmation dialog button\r\n click_css(self, 'a.button.action-primary', 0)", "def dataGridView_UserDeletedRow(self, send, eventArgs):\r\n name = eventArgs.Row.Cells[0].Value\r\n Application.DeleteObj(name)", "def delete_entry(self, scenario_info):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_info[\"id\"],))", "def delete_address(address_id: int):\n try:\n current_app.logger.info(\"delete address : address_id: %s\", address_id)\n return jsonify({\n \"status\": address_service.delete_address_by_id(address_id)\n }), 200\n except SQLCustomError as error:\n current_app.logger.error(\"fail to delete address : address_id: %s\", address_id)\n return jsonify({\n \"errors\": {\n \"error\": error.__dict__\n }\n }), 400", "def get_delete_confirmation_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv", "def delete_button(self):\n self.pop_up_del = Toplevel(master)\n self.pop_up_del.geometry(\"500x50\")\n\n self.del_label = Label(self.pop_up_del, text=\"Are you sure you want to delete this item?\", font=\"roboto 12\")\n self.del_label.place(relx=0.5, rely=0.01, anchor=\"n\")\n\n self.del_button = Button(self.pop_up_del, text=\"DELETE\", command=self.delete_item)\n self.del_button.place(relx=0.4, rely=0.5, anchor=\"n\")\n\n self.keep_button = Button(self.pop_up_del, text=\"CANCEL\", command=self.close_1)\n self.keep_button.place(relx=0.6, rely=0.5, anchor=\"n\")", "def test_delete__DeleteForm__1(search_data, UserFactory, browser):\n address_book = search_data\n # Create a user -- the person of a user cannot be deleted using this search\n # result handler.\n UserFactory(address_book, u'Ben', u'Utzer', u'[email protected]',\n u'12345678', [], keywords=[u'church'])\n browser.login('mgr')\n browser.keyword_search('church')\n # Only the selected persons get deleted. Deselected persons will not:\n browser.getControl(name='persons:list').getControl(\n value=\"Person-2\").selected = False # This this the person named \"Koch\"\n browser.getControl('Apply on selected persons').displayValue = [\n 'Delete']\n browser.getControl(name='form.buttons.apply').click()\n # The number of persons for deletion is shown on the question screen:\n # (There are 3 persons with the church keyword in the fixture, one got\n # deselected but there is additionally a newly created user.\n assert ['3'] == browser.etree.xpath(\n '//span[@id=\"form-widgets-count\"]/text()')\n assert ('You are not able to delete a person who is referenced.'\n in browser.contents)\n assert browser.SEARCH_DELETE_URL == browser.url\n browser.getControl('Yes, delete').click()\n assert 'Selected persons deleted: 2' == browser.message\n assert browser.PERSONS_LIST_URL == browser.url\n # Only the two non-users got deleted:\n assert 'Koch' in browser.contents\n assert 'Utzer' in browser.contents\n assert 'Liebig' not in browser.contents\n assert 'Velleuer' not in browser.contents", "def test_delete__DeleteForm__3(search_data, browser, role):\n browser.login(role)\n browser.keyword_search('church')\n # There is no delete option which can be applied:\n assert ([\n 'XLS export main (Exports person data and main addresses resp. '\n 'phone numbers.)',\n 'XLS export complete (Exports person data and all addresses resp. '\n 'phone numbers.)',\n 'E-Mail (Creates a link to send e-mails.)',\n 'Names (Comma separated list of person names.)',\n 'Checklist (List of person names with check-boxes.)',\n \"iCalendar export birthday (Export person's birthdays as \"\n \".ics file.)\",\n 'Birthday list (Person names sorted by birthday.)',\n ] == browser.getControl('Apply on selected persons').displayOptions)\n browser.assert_forbidden(browser.SEARCH_DELETE_URL)", "def delete_place(place_id):\n place_db = mongo.db.places.find_one_or_404({'_id': ObjectId(place_id)})\n if request.method == 'GET':\n form = ConfirmDelete(data=place_db)\n return render_template('delete_restaurant.html', title=\"Delete Restaurant\", form=form)\n form = ConfirmDelete(request.form)\n if form.validate_on_submit():\n places_db = mongo.db.places\n places_db.delete_one({\n '_id': ObjectId(place_id),\n })\n return redirect(url_for('home'))\n return render_template('delete_restaurant.html', place=place_db, form=form)", "def open_download_delete_file(self):\n self._unable_open_option()\n self._tap_on_confirm_button(yes=True, msg=\"Delete button\")\n self._tap_on_confirm_button(yes=True, msg=\"Confirm Delete button\")", "def on_pushButton_delete_clicked(self):\n row = self.tableWidget.currentIndex().row()\n # 找到对于行的第一项(XX编码项)\n xxbm = unicode(self.tableWidget.takeItem(row, 0).text())\n self.tableWidget.setItem(row, 0, QTableWidgetItem(xxbm))\n content = unicode(self.comboBox.currentText())\n if content == \"职称表\":\n result_signal = self.sql_client.delete_zc_info(xxbm)\n elif content == \"文化表\":\n result_signal = self.sql_client.delete_wh_info(xxbm)\n else:\n result_signal = self.sql_client.delete_bm_info(xxbm)\n\n QMessageBox.information(self, 'Message', \"删除成功!\", QMessageBox.Yes)\n self.tableWidget.removeRow(row)", "def delete_deals(self):\n if not self._parent.connected():\n return\n selected = self.deals_view.get_selected_rows()\n if len(selected) > 0:\n self._parent.model.taremove_deal(map(lambda dl: dl[0], selected))\n self._parent.call_update_callback()", "def test_destroy_address(self):\n address = ProfileAddressFactory.create(city=\"city_destroyed\")\n \n url = reverse('v1:addresses-detail', args=[1, address.id])\n self.assertEqual(len(ProfileAddress.objects.all()), 1)\n\n # Check Anonymous User should return 403\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n # Profile Owner User\n self.client.credentials(Authorization='Bearer ' + 'regularusertoken')\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n \n self.assertEqual(len(ProfileAddress.objects.all()), 0)", "def delete(request):\n return render(request, 'modify.html')", "def delete_actor(self):\n\t\tif(self.ui.lista_act.currentItem()):\n\t\t\tmsgBox = QtGui.QMessageBox.question(self, \"Borrar registro\",\"¿Estas seguro de eliminar esta columna?\",\n\t\t\t\t\t\t\t\t\t\t\t\tQtGui.QMessageBox.No | QtGui.QMessageBox.Yes)\n\t\t\tif msgBox == QtGui.QMessageBox.Yes:\n\t\t\t\tname = self.ui.lista_act.currentItem().text()\n\t\t\t\tcontroller.delete_actor(name)\n\t\t\t\tself.load_data()\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\terrorMessageBox = QtGui.QMessageBox.warning(self,\"Error\",\"Debe seleccionar un elemento\")\n\t\t\treturn False", "def delete_entry(entry):\n\n # Llama a view_entries despues de haber añadido nueva funcionalidad\n\n response = input(\"Estás seguro? [yN]\").lower()\n\n if response == 'y':\n entry.delete_instance()\n print('Entrada borrada.')", "def delete_rows(self, table_model, row, count):\n self.undostack.push(DeleteRowCommand(table_model, row, table_model.get_rows(row, count=count)))", "def deleteJournal(self):\n\n x = self.tableWidget_journals.currentRow()\n journalName = self.journals[x]['name']\n #print((\"Delete row: \" + str(x)))\n Dialog_confirmDelete = QtGui.QDialog()\n ui = Ui_Dialog_confirmDelete(self.journals[x]['name'])\n ui.setupUi(Dialog_confirmDelete)\n ok = Dialog_confirmDelete.exec_()\n\n if ok:\n cur = self.settings['conn'].cursor()\n cur.execute(\"delete from journal where name = ?\", [journalName])\n self.settings['conn'].commit()\n for item in self.journals:\n if item['name'] == journalName:\n self.journals.remove(item)\n self.tableWidget_journals.removeRow(x)\n self.log += \"Journal \" + journalName + \" deleted\\n\"", "def delete(self, _id):", "def delete_row(self, pos):\n del self._grid[pos]", "def office_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n #deletes the view and redirects to the page.\n office_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n db(db.lioli_main.unique_id == u_id).delete()\n redirect(URL('new_entries'))\n return dict()", "def deleteRecord(self):\n selectedData = self.controller.chooseRecord(\"Enter the record number: \") - 1\n if selectedData >= (len(self.dto.getRecord())):\n print(\"Please choose number within the number of records.\")\n else:\n print(self.dto.getRecord()[selectedData].__dict__)\n if self.controller.confirmMsg(\"Do you want to delete this data? (y/n): \") == \"y\":\n self.dto.getRecord().remove(self.dto.getRecord()[selectedData])\n print(\"Record deleted.\")", "def taskdetail_delete(td):\n return IMPL.taskdetail_delete(td)", "def delete_view(self, request, object_id):\r\n obj = self.get_object(unquote(object_id))\r\n obj.delete()\r\n return HttpResponse(\"Deleted\")", "def admindelete(object, id):\n db = get_db()\n execute_str = 'DELETE FROM ' + object + ' WHERE id = ' + str(id)\n db.execute(execute_str)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))", "def test_delete__DeleteForm__2(search_data, browser):\n browser.login('mgr')\n browser.keyword_search('church', 'Delete')\n # Seleting the `cancel` button leads to the person list without deleting\n # anybody:\n browser.getControl('No, cancel').click()\n assert 'Deletion canceled.' == browser.message\n assert browser.PERSONS_LIST_URL == browser.url\n assert 'Koch' in browser.contents\n assert 'Liebig' in browser.contents\n assert 'Velleuer' in browser.contents", "def delete_event(self,widget=None):\n self.on_device_dialog_cancel_clicked()\n return True", "def competitors_delete(request, slug,id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n competitors_reference = get_object_or_404(Competitors, id=id,company=company)\n\n #deletes the view and redirects to the page.\n competitors_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete():\n return render_template('layout.html')", "def delete_row(self, identifier, rowid, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Delete the row at the given index position\n df = vizual.delete_rows(dataset.to_dataframe(), rowids=[rowid])\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations.filter(rows=list(df.index))\n )\n return VizualApiResult(ds)", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def get_absolute_url(self):\n return reverse('order_items:order_item_delete', args=[str(self.id)])", "def delete_loc(lid):\r\n\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n db.execute(\r\n \"DELETE FROM location WHERE location_id = ? AND for_business = ?\", (lid, b_id,)\r\n )\r\n db.commit()\r\n db.execute(\"DELETE FROM warehouse WHERE loc_id = ? AND b_id = ?\", (lid, b_id,))\r\n db.commit()\r\n return redirect(url_for(\"main.locations\"))", "def handle_delete(self, peer, row, col):\n if peer.hasSelection():\n \n peer.deleteSelection()\n \n else:\n\n self.delete(\"{}.{}\".format(row, col))\n \n # peer.move(row, col)\n\n return", "def delete_location(self, location_id):", "def __editDelete(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").clear()\n else:\n self.activeWindow().clear()", "def do_deluser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.del_contact(line)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Wrong syntax! Type 'help delete'\")\n\t\telse:\n\t\t\tprint(\"To delete contacts you need to open or create a book.\")", "def mock_datatable_delete_row(self, request):\n\n data = self.mock_success_delete\n\n return create_response(request,\n status_code=200,\n content=b(dumps(data)))", "def delete_donation():\n print()\n print('Welcome to the Delete a a Donor Donation Menu')\n print()\n donor_name = get_name_input()\n single_donor_print(donor_name)\n print('See the donation you want to delete in the report? Follow the prompts to enter donation to delete')\n donation_delete = check_number_input()\n print()\n delete_donation_from_db(donor_name,donation_delete)\n print('Donation has been deleted. See report below for verification')\n single_donor_print(donor_name)", "def delete(self):\n if self.id_:\n GsSession.current._delete(f'{API}/{self.id_}', request_headers=DATAGRID_HEADERS)\n else:\n raise MqValueError('DataGrid has not been persisted.')", "def delete_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n db = get_db()\n cur = db.execute('select id, title from entries where id = ?',\n [id.strip()])\n entries = cur.fetchall()\n title = entries[0]['title']\n db = get_db()\n db.execute('delete from entries where id = ?', [id.strip()])\n db.commit()\n flash('Recipe, ' + escape(title) + ', has been deleted', 'success')\n return redirect(url_for('show_entries'))", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete_row(self):\n return exclusions.closed()", "def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def delete():\n if request.method == 'POST':\n table_name = request.form['table']\n table = get_table_by_name(table_name)\n if 'id' not in request.form:\n user_data = get_data_from_table(table)\n\n return render_template(\n 'delete.html', table_name=table_name, user_data=user_data\n )\n\n if table is None:\n return 'Table not chosen?'\n\n success, reason = utils.delete_user(request.form['id'], table_name)\n\n if not success:\n return f'Error occurred trying to delete - {reason}'\n\n log(\n f\"<code>{current_user.name}</code> has deleted <code>{request.form['id']}</code> from {table_name}\"\n )\n return (\n f\"<code>{request.form['id']}</code> has been deleted from from {table_name}\"\n )\n return render_template('events.html', events=get_accessible_tables())", "def del_history_to_address(address):\n \n result = delete_transaction_observation_to_address(address)\n\n # if successfully deleted from observation list, return a plain 200\n if \"error\" in result:\n return make_response(jsonify(build_error(result[\"error\"])), result[\"status\"])\n else:\n return \"\"", "def test_delete_shipping_address(self):\n self.cim.delete_shipping_address(\n customer_profile_id=u\"123\",\n customer_address_id=u\"543\"\n )", "def delete_command():\n global selected_tuple\n backend.delete(selected_tuple[0])", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self):\n ...", "def remove_row(self, row_id):", "def delete_model(self, request, obj):\n obj.post.comNumDrease()\n obj.delete()", "def click_delete_button(self):\n self.number_of_rows = self.get_inbound_statement_grid_number_of_rows()\n self.click_element(self.delete_button_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_delete_confirmation_locator), 'statement delete confirmation locator not found before specified time out')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time out')\n self.wait_for_ajax_spinner_load()\n ok_button_element.click()\n self.wait_for_ajax_spinner_load()\n self.wait().until(EC.visibility_of_element_located(self.statement_delete_success_message_locator), 'statement delete success message locator not found before specified time out')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time out')\n self.wait_for_ajax_spinner_load()\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def _delete(self, pk, user=None):\n request = self.factory.delete(self.detail_url(pk), format='json')\n force_authenticate(request, user)\n resp = self.detail_view(request, pk=pk)\n resp.render()\n return resp", "def transaction_delete(request, transaction_id, model_class=Transaction, template_name='budget/transactions/delete.html'):\n transaction = get_object_or_404(Transaction.active.all(), pk=transaction_id)\n if request.POST:\n if request.POST.get('confirmed'):\n transaction.delete()\n return HttpResponseRedirect(reverse('budget_transaction_list'))\n return render_to_response(template_name, {\n 'transaction': transaction,\n }, context_instance=RequestContext(request))", "def cmd_delete_job():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_job_by_id(id)\r\n Job.query.filter(Job.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Job '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.jobs'))\r\n else:\r\n flash(f\"Job '{id}' was not found\")\r\n return redirect(url_for('main.jobs'))", "def delete_people(pid):\n db_conn = get_connection()\n cur = db_conn.cursor()\n _sql = \"\"\"SELECT \n id,firstname,lastname,address,country\n FROM peoples where id = ?\"\"\"\n cur.execute(_sql, (pid,))\n record = cur.fetchone()\n\n if record:\n delete_sql = \"\"\"DELETE FROM peoples where id = ?\"\"\"\n cur.execute(delete_sql, (pid,))\n db_conn.commit()\n flash(\"Record with id {} deleted successfully!!!\".format(pid))\n return redirect(url_for('list_people'))\n\n else:\n flash(\"Sorry! No user with id {} found to delete\".format(pid))\n return redirect(url_for(\"list_people\"))", "def __render_deletion_cascade(self, r, method, row):\n \n r.line('// %s' % self.get_debug_name())\n self.local_index.render_deletion(r,\n self.project_key_foreign_to_local(row))\n return", "def showEditContact(self):", "def deleteRow(self):\r\n self.conn = connect('database.sqlite')\r\n self.cur = self.conn.cursor()\r\n self.cur.execute('''\r\n DELETE FROM Clients WHERE id=(SELECT MAX(id) FROM Clients)\r\n ''')\r\n self.conn.commit()\r\n self.cur.close()", "def help_delete(self):\n print(DELETE)", "def on_deleteButton_clicked(self):\n for itm in self.historyList.selectedItems():\n ditm = self.historyList.takeItem(self.historyList.row(itm))\n del ditm\n self.historyList.scrollToItem(self.historyList.currentItem())\n self.historyList.setFocus()", "def delete_entry(self, id, **args):\n args.update(id=id)\n return self.fetch(\"/entry/delete\", post_args=args)", "def del_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n comment.delete()\n actor = comment.actor\n url = '../../' + str(comment.actor.pk)\n return redirect(url)", "async def delete_one(self, where):\n\n pass", "def delete_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "def management_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n management_reference = get_object_or_404(Management, id=id,company=company)\n\n #deletes the view and redirects to the page.\n management_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def award_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n award_reference = get_object_or_404(Award, id=id,company=company)\n\n #deletes the view and redirects to the page.\n award_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def del_history_from_address(address):\n \n result = delete_transaction_observation_from_address(address)\n\n # if successfully deleted from observation list, return a plain 200\n if \"error\" in result:\n return make_response(jsonify(build_error(result[\"error\"])), result[\"status\"])\n else:\n return \"\"", "def delete(id):\n get_autor(id)\n try:\n db.insert_bd('DELETE FROM autor WHERE id = %d' % id)\n return redirect(url_for('autor.index'))\n except:\n return render_template('404.html')", "def create_delete_entry(columns: list, win, x: int, y: int):\n global choice, search_delete_enter\n window_for_search = Frame(win)\n window_for_search.place(x=x, y=y)\n search_delete_enter = ttk.Combobox(window_for_search, values=columns, height=3)\n search_delete_enter.set(u'Выбор параметра')\n search_delete_enter.grid(column=0, row=0)\n choice = search_delete_enter\n return choice", "def post_delete(self, *args, **kw):\n #obtenemos el id de la fase para hacer el filtrado despues de la redireccion\n item_to_del = DBSession.query(Item).filter_by(id_item=args[0]).one()\n fid = item_to_del.id_fase_fk\n pks = self.provider.get_primary_fields(self.model)\n d = {}\n for i, arg in enumerate(args):\n d[pks[i]] = arg\n self.provider.delete(self.model, d)\n\n path = './' + '../' * (len(pks) - 1) + '?fid=' + str(fid)\n\n redirect(path)", "def delete():\n id_num = int(input('Enter the ID number of the item you wish to delete\\n'))\n db_actions.remove(id_num)", "def funding_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n\n #deletes the view and redirects to the page.\n funding_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def view_delete(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n if request.method == 'POST':\n # Log the event\n Log.objects.register(\n request.user,\n Log.VIEW_DELETE,\n view.workflow,\n {\n 'id': view.id,\n 'name': view.name,\n 'workflow_name': view.workflow.name,\n 'workflow_id': view.workflow.id})\n\n # Perform the delete operation\n view.delete()\n\n # In this case, the form is valid anyway\n return JsonResponse({'html_redirect': reverse('table:view_index')})\n\n return JsonResponse({\n 'html_form': render_to_string(\n 'table/includes/partial_view_delete.html',\n {'view': view},\n request=request),\n })", "def post_delete(self, *args, **kw):\n id_atributo = int(args[0])\n transaction.begin()\n attr = AtributosPorTipoItem.por_id(id_atributo)\n DBSession.delete(attr)\n transaction.commit()\n flash(\"Atributo Eliminado\")\n redirect(\"./\")" ]
[ "0.70819616", "0.61636835", "0.6088303", "0.5755534", "0.5709963", "0.55882573", "0.55848145", "0.5560167", "0.55589795", "0.55150014", "0.5407896", "0.5399962", "0.5394524", "0.53886646", "0.5371516", "0.5349296", "0.5309787", "0.53002477", "0.529465", "0.52785975", "0.5267689", "0.52535003", "0.5235827", "0.5234687", "0.52154475", "0.52142286", "0.52094084", "0.5200737", "0.5193239", "0.5184428", "0.5179076", "0.5164752", "0.51585364", "0.51459754", "0.51252663", "0.5115147", "0.5110028", "0.5106845", "0.5105958", "0.5103508", "0.5088271", "0.5086246", "0.5079797", "0.50732964", "0.50702447", "0.50586593", "0.50523764", "0.50448316", "0.50430036", "0.5034576", "0.50332683", "0.50167614", "0.50108457", "0.49828774", "0.4980904", "0.4977461", "0.49648088", "0.49636167", "0.49478373", "0.49364144", "0.49316856", "0.49308982", "0.49305663", "0.4921388", "0.49104849", "0.4907485", "0.48972702", "0.48906982", "0.48890457", "0.4873864", "0.48722067", "0.48683217", "0.48683217", "0.4868106", "0.48679993", "0.48665205", "0.48658377", "0.4865804", "0.48634997", "0.4859881", "0.48598295", "0.4858247", "0.48565552", "0.4856375", "0.48535487", "0.4853115", "0.485306", "0.4852368", "0.484801", "0.48462522", "0.48438928", "0.4837866", "0.48375046", "0.4823", "0.4819474", "0.48154545", "0.48145008", "0.4813918", "0.48127896", "0.4811962" ]
0.77717626
0
Open the edit address form for an address row. A conditional check for whether the URL contains 'admin' allows this method to be used in both OnDemand Web and OnDemand Admin.
def edit_address(self) -> object: self.edit_button.click() if 'admin' not in self.driver.current_url: return WebAddressForm(self).wait_for_component_to_be_present() return AdminAddressForm(self).wait_for_component_to_be_present()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_edit_address(self, address: dict) -> None:\n row = self.addresses_list.surface_address_row(address)\n\n row.open_kebab_menu()\n row.kebab_menu.edit_address()", "def __editAddress(self):\n idx = self.bookmarksTree.currentIndex()\n idx = idx.sibling(idx.row(), 1)\n self.bookmarksTree.edit(idx)", "def edit_form():\n return template (\"edit\")", "def edit_address(self, new_label: str) -> None:\n self.address_form.label_input.fill(new_label)\n self.address_form.save_button.click()", "def activate_external_editing(self, new_doc):\n new_doc.setup_external_edit_redirect(self.request, action=\"oneoffixx\")", "def test_phone_address_view_with_url(self):\n url = reverse('profiles:update_phone_address')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(\n response,\n 'profiles/update_phone_address.html'\n )", "def get(self, request, *args, **kwargs):\n\n self.object = self.get_object()\n formclass = self.get_form_class()\n opform = self.get_form(formclass)\n if not request.user.is_superuser:\n del opform.fields['administrator']\n addrform = AddressForm(instance=self.object.address)\n return self.render_to_response(\n self.get_context_data(\n form=opform,\n address_form=addrform,\n )\n )", "def showEditContact(self):", "def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)", "def getEditForm( self ):\n return \"listc_edit\"", "def set_show_in_edit_form(self, flag):\n qry = ServiceOperationQuery(self, \"setShowInEditForm\", [flag])\n self.context.add_query(qry)", "def edit_link(db_object, text=None):\n if text is None:\n text = 'edit'\n return _make_link(db_object.update_url(), text)", "def lineEdit( self ):\n return self._urlEdit", "def edit(self, **kwargs):\n ...", "def open_ride_edit_form(self) -> object:\n self.container.find_element(*self._ride_edit).click()\n\n return EditForm(self).wait_for_component_to_be_present()", "def url_to_edit(obj):\n return reverse(\n 'admin:%s_%s_change' % (obj._meta.app_label, obj._meta.model_name),\n args=[obj.id]\n )", "def edit_redirect_url(self):\n return url_for(self.edit_redirect_to_view)", "def process_address():\n #get address info from form\n user_details = request.form\n #validate address with google geocoding\n update_details = apiapijoyjoy.validate_address(user_details)\n #update ino in db\n dbwrangler.newaddress(update_details)\n \n return redirect(\"/\")", "def edit_link(instance):\n\n try:\n content_type = ContentType.objects.get_for_model(instance.__class__)\n except AttributeError:\n raise ValueError('Passed value must be registered model instance')\n else:\n model_admin_change_link = 'admin:{app}_{model}_change'.format(\n app=content_type.app_label,\n model=content_type.model\n )\n return reverse(model_admin_change_link, args=(instance.id,))", "def test_the_data_edit_url(self):\n\n my_instance = Contact.objects.first()\n info_url = resolve('/to_form/%s/' % my_instance.id)\n self.assertEqual(info_url.func.__name__, 'my_edit_data')\n self.assertEqual(self.response.status_code, 200)", "def choose_new_address(self) -> \"CheckoutPage\":\n self.accordion = BillingDetailsUser(self.driver)\n self.accordion.btn_new_address.click()\n return self", "def edit(self):\n\n pass", "def show_admin_edit_admins():\n return render_admin_page(\"admin-ea.html\")", "def get_edit_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv", "def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")", "def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)", "def OnToggleEdit(self, event):\n\t\ttoggle = self.btn_edit.GetToggle()\n\t\tif not toggle:\n\t\t\tif not util.gAuthen.Authenticate(util.ADMIN):\n\t\t\t\tself.btn_edit.SetToggle(not toggle)\n\t\t\t\treturn \n\t\tself.UpdateToggle()", "def register_edit_view(self, blueprint):\n view = apply_decorators(self.edit_view, self.edit_decorators)\n blueprint.add_url_rule(\n self.edit_rule, self.edit_endpoint, view, methods=['GET', 'POST'])", "def edit():", "def link_edit_callback(self):\n pass", "def the_user_edit_page_should_open(driver):\n assert wait_on_element(driver, 7, xpaths.add_User.edit_Title)\n time.sleep(0.5)", "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def manage_edit_save(self, REQUEST):\n self._config.update(ldap_config.read_form(REQUEST.form, edit=True))\n REQUEST.RESPONSE.redirect(self.absolute_url() + '/manage_edit')", "def edit_agency(self) -> object:\n self.edit_agency_button.click()\n\n return AgencySelectModal(self).wait_for_component_to_be_present()", "def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)", "def edit_employee_locations(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n form = Add_Loc_Form(obj = employee)\n \n form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n \n if form.validate_on_submit():\n \n location = Location.query.get(form.location.data) \n employee.locations.append(location)\n db.session.add(employee)\n \n db.session.commit()\n\n \n\n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n \n return render_template(\"/admin/employee_cert.html\", employee = employee, form = form)", "def editPage(request, title):\n entry = util.get_entry(title)\n if request.method == \"POST\":\n # check if the data is valid then save/replace old data\n form = editPageForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data[\"editTitle\"]\n content = form.cleaned_data[\"editBody\"]\n\n util.save_entry(title, content)\n\n # take user to their editted page\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": title\n }))\n # give user a editting form with existing data filled in by defult. \n else:\n editForm = editPageForm(initial={\n \"editTitle\": title,\n \"editBody\": entry\n })\n editFormTitle = editForm[\"editTitle\"]\n editFormBody = editForm[\"editBody\"]\n return render(request, \"encyclopedia/editPage.html\", {\n \"formTitle\": editFormTitle,\n \"formBody\": editFormBody\n })", "def test_address_page(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type = \"html_text\")\n self.assertTrue(b'Address Locator' in response.data)", "def new_address():\n user = dbwrangler.get_current_user()\n if user:\n return render_template(\"new_user_more.html\")\n else:\n return redirect(\"/\")", "def edit_form_entry_edit_option_html(cls):\n return \"\"\"\n <li><a href=\"{edit_url}\">\n <span class=\"{edit_option_class}\"></span> {edit_text}</a>\n </li>\n \"\"\".format(\n edit_url=\"{edit_url}\",\n edit_option_class=cls.form_edit_form_entry_option_class,\n edit_text=\"{edit_text}\",\n )", "def restaurants_edit(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n # Find the restaurant\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n # Only edit if the entry was re-written\n if len(request.form['address']) > 0:\n restaurant.address = request.form['address']\n if len(request.form['phone']) > 0:\n restaurant.phone = request.form['phone']\n if len(request.form['web']) > 0:\n restaurant.web = helper.check_restaurant_URL(request.form['web'])\n if len(request.form['tag_line']) > 0:\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n helper.delete_restaurant_tag_pairs(restaurant.id)\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, restaurant.id)\n if len(request.form['description']) > 0:\n restaurant.description = request.form['description']\n\n restaurant.last_update = datetime.utcnow()\n\n session.add(restaurant)\n session.commit()\n flash(\"Restaurant {} edited!\".format(restaurant.name))\n return redirect(url_for('restaurants_page'))\n else:\n # Get user info if the user is signed in to render edit form\n user_info = helper.get_user_if_exists(login_session)\n tag_rest_list = session.query(RestaurantTags).filter_by(restaurant_id=restaurant.id).all()\n tag_line = ''\n # Create a tag line - by compiling the string tag_name for each tag\n for pair in tag_rest_list:\n tag = session.query(Tags).filter_by(id=pair.tag_id).first()\n tag_line += tag.tag_name + ', '\n return render_template('editrestaurant.html',\n restaurant=restaurant,\n tag_line=tag_line,\n user_info=user_info)", "def update(self, addr, replace=False):\n if self.ignorer.ignore_address(addr[1]):\n return False\n try:\n with self.connect() as c:\n cur = c.cursor()\n if replace:\n present = cur.execute(\"SELECT 1 FROM AddressBook WHERE address = ?\", [addr[1]])\n if present:\n cur.execute(\"UPDATE AddressBook SET name = ? WHERE address = ?\", addr)\n else:\n cur.execute(\"INSERT INTO AddressBookView VALUES(?,?)\", addr)\n else:\n cur.execute(\"INSERT INTO AddressBookView VALUES(?,?)\", addr)\n return True\n except sqlite3.IntegrityError:\n return False", "def edit(user_id):\n if user_id != current_user.id:\n return abort(403)\n\n user = get_user(user_id)\n form = EditForm(obj=user)\n form.email.data = user.email\n\n if form.validate_on_submit():\n password = form.password.data\n username = form.username.data\n\n save_result = edit_user(user_id, password, username, user.active)\n user = save_result['entry']\n form = EditForm(request.form, obj=save_result['entry'])\n form.email.data = user.email\n return redirect(url_for('.index'))\n \n return render_template('users/edit.html'\n ,form=form\n ,user=user\n ,t=t\n ,m=m)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)", "def home_edituser():\n\tpass", "def click_the_edit_button_that_appears(driver):\n driver.find_element_by_xpath(xpaths.users.eric_Edit_Button).click()", "def __edit(self):\n from .BookmarkPropertiesDialog import BookmarkPropertiesDialog\n \n idx = self.bookmarksTree.currentIndex()\n sourceIndex = self.__proxyModel.mapToSource(idx)\n node = self.__bookmarksModel.node(sourceIndex)\n dlg = BookmarkPropertiesDialog(node)\n dlg.exec_()", "def edit_place(place_id):\n place_db = mongo.db.places.find_one_or_404({'_id': ObjectId(place_id)})\n if request.method == 'GET':\n form = EditPlacesForm(data=place_db)\n return render_template('edit_restaurant.html', recipe=place_db, form=form)\n form = EditPlacesForm(request.form)\n if form.validate_on_submit():\n places_db = mongo.db.places\n places_db.update_one({\n '_id': ObjectId(place_id),\n }, {\n '$set': {\n 'name': request.form['name'],\n 'city': request.form['city'],\n 'added_by': session['username'],\n 'description': request.form['description'],\n 'tags': request.form['tags'],\n 'image': request.form['image'],\n }\n })\n return redirect(url_for('home'))\n return render_template('edit_restaurant.html', place=place_db, form=form)", "def _enter_edit_mode(self):\n edit_mode = self.UTILS.element.getElement(DOM.DownloadManager.download_edit_button,\n \"Download edit button\", True, 10)\n edit_mode.tap()\n self.UTILS.element.waitForElements(DOM.DownloadManager.downloads_edit_header_title,\n \"Edit downloads header\")", "def show_edit_user_form(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('edit_user.html', user=user)", "def edit_restaurant(restaurant_id):\n user_id = login_session['user_id']\n r = read_restaurants(restaurant_id, user_id)\n if r[1] is True: # Means if user is owner\n if request.method == 'GET':\n return render_template('restaurants/editrestaurant.html',\n restaurant=r[0][0])\n elif request.method == 'POST':\n # Got post request -> First we get the request arguemnts\n name = request.form['name']\n address = request.form['address']\n city = request.form['city']\n state = request.form['state']\n zipCode = request.form['zipCode']\n # Next we do the db edit\n update_restaurant(restaurant_id, name, address,\n city, state, zipCode)\n # Finally we return the success html\n flash(\"Edited your restaurant\")\n return render_template(\"submitted.html\")\n else:\n return \"Invalid http\"\n else:\n flash(\"You need to be the owner of the restaurant to edit\")\n return redirect(url_for('site.show_restaurants',\n restaurant_id=restaurant_id))", "def show_edit_form(self, obj_pk=None):\n obj = self.model.objects.get(pk=obj_pk)\n # if there is no edit permission then does not show the form\n if not self.has_view_permissions(obj): return\n\n\n # create the edit form a add it to the empty widget details\n # override the function hide_form to make sure the list is shown after the user close the edition form\n params = {\n 'title':'Edit',\n 'model':self.model,\n 'pk':obj.pk,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines': self.INLINES} )\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly': self.READ_ONLY})\n\n editmodel_class = self.get_editmodel_class(obj)\n editform = editmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_EDIT:\n self._details.value = editform\n self._list.hide()\n self._details.show()\n\n # only if the button exists:\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o): getattr(self, o).hide()\n\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()", "def edit(request, address):\n if address.startswith(\"/\"):\n address = address[1:]\n if address.endswith(\"/\"):\n address = address[:-1]\n\n # we try to find the parent. Creating a page without parent isn't possible.\n parent = None\n if \"/\" in address:\n parent = address.rsplit(\"/\", 1)[0]\n else:\n parent = \"\"\n\n try:\n parent = Page.objects.get(address=parent)\n except Page.DoesNotExist:\n parent = None\n\n # try to get the page itself, which might exist\n try:\n page = Page.objects.get(address=address)\n except Page.DoesNotExist:\n page = None\n\n initial = {}\n if page:\n initial[\"title\"] = page.title\n initial[\"content\"] = page.content\n\n if request.method == 'POST':\n # the form has been sent, use the different access rights\n form = PageForm(request.POST, initial=initial)\n if form.is_valid():\n title = form.cleaned_data[\"title\"]\n content = form.cleaned_data[\"content\"]\n user = request.user\n user = user if user.is_authenticated else None\n can = False\n if user and user.is_superuser:\n # the superuser can do it all\n can = True\n elif parent and page is None and parent.access(user, \"write\"):\n # the page doesn't exist, but the parent does, and the user can edit it\n can = True\n elif page and page.access(user, \"write\"):\n # the page already exist and the user can edit it\n can = True\n\n if can:\n new_page = Page.objects.create_or_update_content(address, user, content)\n new_page.title = title\n if parent is not None and page is None:\n new_page.can_write = parent.can_write\n new_page.can_read = parent.can_read\n new_page.save()\n\n return HttpResponseRedirect('/wiki/' + address)\n else:\n form = PageForm(initial=initial)\n\n return render(request, \"wiki/edit.html\", {'form': form, 'address': address, \"page\": page, \"parent\": parent})", "def show_employee_edit_form(self, staff_ob, number):\n\n print(self.LENGTH_STAR * \"*\")\n print(f\"EDIT {staff_ob.role.upper()}\\n\")\n\n if number == 1:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s address\\nThe current address is: {staff_ob.address}\")\n new_address = self.get_address()\n while new_address == False:\n new_address = self.get_address()\n self.check_action_edit_form(staff_ob, number, new_address)\n\n elif number == 2:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s mobile number\\nThe current mobile number is: {staff_ob.mobile_number}\")\n new_mobile_number = self.get_mobile_number()\n while new_mobile_number == False:\n new_mobile_number = self.get_mobile_number\n self.check_action_edit_form(staff_ob, number, new_mobile_number)\n \n elif number == 3:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s email\\nThe current the email is: {staff_ob.email}\")\n new_email = self.get_email()\n while new_email == False:\n new_email = self.get_email()\n self.check_action_edit_form(staff_ob, number, new_email)\n \n print(f\"\\n{staff_ob.name}'s information successfully changed!\\n\")\n \n return", "def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })", "def show_edit_user_form(user_id):\r\n user = User.query.get_or_404(user_id)\r\n\r\n return render_template('edit-user.html', user=user)", "def click_bulk_edit_button(self):\n self.click_element(self.bulk_edit_button_locator, True)", "def edit_locations(location_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n location = Location.query.get_or_404(location_id)\n form = Location_Form(obj = location)\n\n if form.validate_on_submit():\n location.site_name = form.site_name.data,\n location.city = form.city.data,\n location.state = form.state.data\n \n db.session.commit()\n flash(f\"Location {site_name} has been updated\")\n return redirect(\"/administrator\")\n else:\n return render_template(\"/admin/edit_location.html\", form = form, location = location)", "def add_new_address(self, address: dict) -> None:\n self.new_address_button.click()\n\n self.address_form.select_location(address['address'])\n self.address_form.label_input.fill(address['name'])\n\n self.address_form.save_button.click()", "def _edit( self, remote_url ):\n # strip off scheme and machine from URL if present\n tokens = urlparse.urlparse( remote_url, 'http' )\n if tokens[1]:\n # There is a nethost, remove it\n t=('', '') + tokens[2:]\n remote_url=urlparse.urlunparse(t)\n # if URL begins with site URL, remove site URL\n utool = queryUtility(IURLTool)\n if utool is None:\n # fallback for bootstrap\n utool = aq_get(self, 'portal_url', None)\n portal_url = utool.getPortalPath()\n i = remote_url.find(portal_url)\n if i==0:\n remote_url=remote_url[len(portal_url):]\n # if site is still absolute, make it relative\n if remote_url[:1]=='/':\n remote_url=remote_url[1:]\n self.remote_url=remote_url\n\n # save unique id of favorite\n self.remote_uid = self._getUidByUrl()", "def edit_person(self, pk):", "def create_or_update_address(address, customer):\n\tname = frappe.db.get_value('Address', { 'entity_id': address.get('entity_id') })\n\tif not name:\n\t\taddr = frappe.new_doc('Address')\n\t\taddr.address_title = \"{} {} {}\".format(\n\t\t\taddress.get(\"firstname\"),\n\t\t\taddress.get(\"lastname\"),\n\t\t\taddress.get(\"entity_id\")\n\t\t)\n\telse:\n\t\taddr = frappe.get_doc(\"Address\", name)\n\n\taddr.address_type = get_address_type(address).get('type')\n\taddr.entity_id = address.get('entity_id')\n\taddr.address_line1 = address.get('street')[0]\n\taddr.address_line2 = address.get('street')[1] if len(address.get('street')) > 1 else \"\"\n\taddr.city = address.get('city')\n\taddr.country = frappe.db.get_value('Country', { 'code': address.get('country_id') })\n\taddr.state = address.get('region')\n\taddr.pincode = address.get('postcode')\n\taddr.phone = address.get('telephone') or '00000'\n\taddr.fax = address.get('fax')\n\taddr.customer = customer\n\taddr.customer_name = address.get('firstname')+' '+address.get('lastname')\n\taddr.is_primary_address = get_address_type(address).get('is_primary_address')\n\taddr.is_shipping_address = get_address_type(address).get('is_shipping_address')\n\n\taddr.save(ignore_permissions=True)", "def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)", "def admin(self, view):\n view.admin = True\n return view", "def enableAddressLayer(self):\n\n self._deladdressaction.setEnabled(True)\n self._createnewaddressaction.setEnabled(True)\n self._moveaddressaction.setEnabled(True)\n self._updateaddressaction.setEnabled(True)\n self._highlightaction.setEnabled(True)", "def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def test_edit_has_populated_form(testapp, fill_the_db, login_testcase):\n response = testapp.get('/journal/1/edit-entry', status=200)\n title = response.html.find_all('input')[1]['value']\n body = response.html.form.textarea.contents[0]\n assert title == ENTRIES[0]['title']\n assert body == ENTRIES[0]['body']", "def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)", "def edit_contact(self):\n edit_data = input(\"Enter the first name of user you want to edit\\n\")\n\n for contact in self.contact_list:\n if contact.first_name == edit_data:\n user_input = int(input(\n \"Enter the number that you want to edit field in details\"\n \" \\n 1. First Name 2. Last name 3. Address 4. city 5. state 6.zip 7. Phone number 8.Email \\n\"))\n if user_input == 1:\n first_name = input(\"Enter new first name\\n\")\n contact.first_name = first_name\n elif user_input == 2:\n last_name = input(\"Enter new last name\\n\")\n contact.last_name = last_name\n elif user_input == 3:\n address = input(\"Enter new address\\n\")\n contact.address = address\n elif user_input == 4:\n city = input(\"Enter new city\\n\")\n contact.city = city\n elif user_input == 5:\n state = input(\"Enter new state\\n\")\n contact.state = state\n elif user_input == 6:\n zip = input(\"Enter new zip\\n\")\n contact.zip = zip\n elif user_input == 7:\n phone_number = input(\"Enter new phone number\\n\")\n contact.phone_number = phone_number\n elif user_input == 8:\n email = input(\"Enter new email\\n\")\n contact.email = email\n else:\n print(\"Please enter a valid input\")\n else:\n print(\"Please enter a valid name\")", "def open_attr_req_editing(self, element) -> None:\n pass", "def edit_entry(self, id, body=None, link=None, **args):\n args.update(id=id)\n if body: args.update(body=body)\n if link: args.update(link=link)\n return self.fetch(\"/entry\", post_args=args)", "def edit_item(self):\n for item in self.selection():\n origin_name = self.set(item)[\"1\"]\n origin_url = self.set(item)[\"2\"]\n popup = GetItemURLDialogue(self, \"Edit Item\", origin_name, origin_url)\n\n self.item(item, values=(popup.name, popup.url, self.set(item)[\"3\"]))\n self.set(item)[\"2\"] = popup.url\n\n # Edit the item - backend\n for row in s.item:\n if row[\"item\"] == origin_name and row[\"url\"] == origin_url:\n s.item.remove(row)\n s.updateItem({\"item\": popup.name, \"url\": popup.url})", "def show_orion_admin_url(self, obj):\n return obj.orion_admin_url", "def test_edit_view(self):\n target_url = url_for('content.edit_content')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def editChange(self,editBtn):\n if self.edit ==True:\n self.updateDetails(\"normal\") #update details column\n self.edit = False #switch boolean\n self.editBtn.setText(\"Edit\") #update button text\n else:\n self.updateDetails(\"edit\") #update details column\n self.edit= True #switch boolean\n self.editBtn.setText(\"Stop Editing\") #update button text", "def editarEndereco(self, ide, rua = None, num = None, bairro = None, cidade = None, uf = None, cep = None, complemento = None):\r\n try:\r\n if ide is None:\r\n return False\r\n else:\r\n if rua != None:\r\n self.cursor.execute(\"UPDATE ENDERECO SET RUA = '%s' WHERE ID = %s;\" %(rua, ide))\r\n if num != None:\r\n self.cursor.execute(\"UPDATE ENDERECO SET NUM = %s WHERE ID = %s;\" %(num, ide))\r\n if bairro != None:\r\n self.cursor.execute(\"UPDATE ENDERECO SET BAIRRO = '%s' WHERE ID = %s;\" %(bairro, ide))\r\n if cidade != None:\r\n self.cursor.execute(\"UPDATE ENDERECO SET CIDADE = '%s' WHERE ID = %s;\" %(cidade, ide))\r\n if uf != None:\r\n self.cursor.execute(\"UPDATE ENDERECO SET UF = '%s' WHERE ID = %s;\" %(uf, ide))\r\n if cep != None:\r\n self.cursor.execute(\"UPDATE ENDERECO SET CEP = '%s' WHERE ID = %s;\" %(cep, ide))\r\n if complemento != None:\r\n self.cursor.execute(\"UPDATE ENDERECO SET COMPLEMENTO = '%s' WHERE ID = %s;\" %(complemento, ide))\r\n return True\r\n except:\r\n return False", "def __get_redirect_url(self):\n if self.get_submit_save_and_continue_edititing_button_name() not in self.request.POST:\n return self.request.cradmin_app.reverse_appindexurl()\n return self.request.cradmin_app.reverse_appurl(\n 'groupcomment-edit',\n args=self.args,\n kwargs=self.kwargs)", "def edit_register(id):\n add_employee = False\n\n employee = Employee.query.get_or_404(id) #from table\n print('----update 1----')\n form = UpdateForm(obj=employee) #if not 404\n print('----update 2----')\n if form.validate_on_submit():\n employee.email = email=form.email.data\n employee.username=form.username.data\n employee.glad_id=form.glad_id.data\n employee.tel_no=form.tel_no.data\n employee.role_id=form.role_id.data\n employee.password=form.password.data\n\n # UPDATE employee to the database\n print('----update----',employee.role_id)\n db.session.commit()\n flash('You have successfully updated! ')\n\n # # redirect to the login page\n # return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Update')", "def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)", "def save_edit(self):\r\n self.driver.find_element_by_xpath('//*[@id=\"vnos\"]/span[1]/div/a').click()", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)", "def is_edit(self):\n return self._tag == 'edit'", "def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)", "def form_valid(self, form):\n address_form = AddressInlineFormSet(self.request.POST, instance = self.object)\n if form.is_valid() and address_form.is_valid():\n form.save()\n address_form.save()\n return HttpResponseRedirect(self.get_success_url())\n else:\n context = {\n 'form' : form,\n 'address_form' : address_form\n }\n return self.render_to_response(context)", "def isEditLink(self, rel, type = None): #$NON-NLS-1$\r\n return self._isInRelList(rel, ZAtomRelTypes.ATOM_EDIT_LINK_REL_LIST)", "def admin_user_area():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n edit = request.args.get('edit', None)\n today = datetime.date.today()\n reservations_list = get_user_reservations_list(user_id)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n if edit == \"true\":\n edit_mode = True\n else:\n edit_mode = False\n user = get_user_by_id(user_id)\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('user_area.html', user=user_id, session_id=session_id, edit_mode=edit_mode,\n surname=user.surname, name=user.name, birthdate=user.birthdate, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list, admin=True)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def _editClickedSlot(self):\r\n\r\n index = self.propertiesTableView.selectionModel().currentIndex()\r\n if index.isValid():\r\n self.propertiesTableView.edit(index)", "def model_admin_url(self, instance: BaseModel, name: str = None) -> str:\n url = resolve_url(\n admin_urlname(instance._meta, SafeText(\"change\")), instance.pk\n )\n return format_html('<a href=\"{}\">{}</a>', url, name or str(instance))", "def _get_address_bar(parent_handle):\n childwins = []\n win32gui.EnumChildWindows(parent_handle, _enumWindowsCallback,\n childwins)\n for win in childwins:\n child_handle = win[0]\n class_name = win[1]\n if 'Edit' in class_name:\n edit_text = _get_edit_text(child_handle)\n if 'http://' in edit_text or 'https://' in edit_text:\n return child_handle # then this must be it...", "def admin_edit_users():\n return user_management_handler(\"show_admin_edit_users\", \"new_users\", False)", "def show_fresh_address(self):\n\t\treturn self.__fresh_account()[\"address\"]", "def edit(tesserae, tessera_id):\n try:\n return tesserae.edit(tessera_id)\n except TesseraError, e:\n sys.stderr.write(\"Error: %s\\n\", str(e))\n return False", "def editar_empresa(id):\n cadastrando_empresa = False\n\n empresa = Empresa.query.get_or_404(id)\n form = EditarEmpresaForm(obj=empresa)\n\n if form.validate_on_submit():\n empresa.nome = form.nome.data\n empresa.simbolo = form.simbolo.data\n empresa.regiao = form.regiao.data\n empresa.tipo = form.tipo.data\n empresa.abertura = form.abertura.data\n empresa.fechamento = form.fechamento.data\n empresa.zona = form.zona.data\n empresa.moeda = form.moeda.data\n db.session.commit()\n flash('Empresa editada com sucesso!')\n\n return redirect(url_for('home.listar_empresas'))\n\n form.nome.data = empresa.nome\n form.simbolo.data = empresa.abertura \n form.regiao.data = empresa.regiao\n form.tipo.data = empresa.tipo\n form.abertura = empresa.abertura\n form.fechamento = empresa.fechamento\n form.zona.data = empresa.zona\n form.moeda.data = empresa.moeda\n\n\n return render_template('home/empresa.html', action=\"Edit\",\n cadastrando_empresa=cadastrando_empresa, form=form,\n empresa=empresa, title=\"Editar empresa\")", "def test_edit_view(self):\n self.client.post(reverse('misago:admin:users:bans:new'), data={\n 'check_type': '0',\n 'banned_value': 'Admin',\n })\n\n test_ban = Ban.objects.get(banned_value='admin')\n form_link = reverse('misago:admin:users:bans:edit', kwargs={'pk': test_ban.pk})\n\n response = self.client.post(form_link, data={\n 'check_type': '1',\n 'banned_value': '[email protected]',\n 'user_message': 'Lorem ipsum dolor met',\n 'staff_message': 'Sit amet elit',\n 'expires_on': '',\n })\n self.assertEqual(response.status_code, 302)\n\n response = self.client.get(reverse('misago:admin:users:bans:index'))\n response = self.client.get(response['location'])\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '[email protected]')", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def allow_to_edit(user):\n return allow_to_edit_well(user)", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def test_update_route_has_populated_form(testapp, fill_the_db):\n response = testapp.get('/journal/1/edit-entry', status=200)\n title = response.html.form.input[\"value\"]\n body = response.html.form.textarea.contents[0]\n assert title == ENTRIES[0][\"title\"]\n assert body == ENTRIES[0][\"body\"]", "def startNewAddressTool(self):\n self.iface.mapCanvas().setMapTool(self._createnewaddresstool)\n self._createnewaddresstool.setEnabled(True)", "def edit(self,item=None):\r\n raise AbstractError\r\n return False" ]
[ "0.7748994", "0.6355615", "0.5770656", "0.5743798", "0.5656257", "0.55580795", "0.55527085", "0.55411637", "0.5522946", "0.5501704", "0.5454978", "0.5401237", "0.5389246", "0.5380006", "0.53368807", "0.53319603", "0.5306126", "0.52961576", "0.52745545", "0.52547807", "0.5243811", "0.5242433", "0.52371204", "0.52199036", "0.5215948", "0.5209036", "0.52000123", "0.51821035", "0.513429", "0.51050574", "0.51001364", "0.5094042", "0.50936365", "0.507857", "0.5076266", "0.50678056", "0.50556934", "0.5045548", "0.50056046", "0.49972856", "0.499551", "0.49889207", "0.4982295", "0.4946047", "0.4941043", "0.4937125", "0.49214903", "0.49205768", "0.4918814", "0.49059254", "0.48899147", "0.48888195", "0.48853394", "0.4876813", "0.48636302", "0.48604658", "0.48203564", "0.48188213", "0.48171434", "0.48042086", "0.48018605", "0.47950184", "0.4794343", "0.4789267", "0.47846258", "0.47730145", "0.4772723", "0.47661215", "0.47634205", "0.47619274", "0.47514093", "0.47501644", "0.4748515", "0.47483164", "0.47293496", "0.47076443", "0.47075817", "0.4706267", "0.4694961", "0.46930483", "0.4691633", "0.46915576", "0.46914512", "0.4688384", "0.46828955", "0.46799543", "0.4674472", "0.4668519", "0.46661648", "0.4665612", "0.46622357", "0.46562812", "0.46455318", "0.46454215", "0.46448848", "0.46432877", "0.46431202", "0.46404362", "0.4622876", "0.46182817" ]
0.81699383
0
Run the algorithm for the scores in the axis layer (0 for x, 1 for y). This score and the associated ranking are reliable, the ranking of the opposite score is not.
def run(self, axis, gamma): # Trajectories of the main score to compute and the opposite one traj_s, traj_o = [_np.ones(self.d[axis])], [_np.ones(self.d[1-axis])] # Ranked indices of the scores rank_s, rank_o = _np.array([], dtype=int), _np.array([], dtype=int) # List of node indices that have reached the zero threshold zeros_s, zeros_o = _np.array([], dtype=int), _np.array([], dtype=int) # Main loop for t in range(int(self.params['t_max'])): # Computing the opposite score without approx o = self._one_step(gamma, 1-axis, traj_s[-1]) rank_o, zeros_o = self._update_zero_rank(o, zeros_o, rank_o) traj_o = _np.concatenate((traj_o, [o])) # Computing the main score (given the opposite one) without approx s = self._one_step(gamma, axis, o) rank_s, zeros_s = self._update_zero_rank(s, zeros_s, rank_s) # Imposing the threshold to the score s[zeros_s] = self.params['low_bound'] traj_s = _np.concatenate((traj_s, [s])) # Checking the convergence if self._converg_check(axis, t, traj_s): break # Finalize the ranking of the positive scores rank_s = _np.append(rank_s, _np.argsort(s)[len(zeros_s):])[::-1] rank_o = _np.append(rank_o, _np.argsort(o)[len(zeros_o):])[::-1] # Update the class variables self._update_vars(axis, traj_s, traj_o, rank_s, rank_o, t) if self.params['print_info']: print ("Convergence in " + str(t) + " time steps.") if t >= self.params['t_max']: print("Warning. Stationary state not reached.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_run(self, axis):\n if (self.x_traj, self.y_traj)[axis] is None:\n if (self.inverse_x_traj, self.inverse_y_traj)[axis] is None:\n raise Exception('The algorithm has not been run.')\n else:\n if self.params['print_info']:\n print('Warning: you are using the opposite score. It can contain errors if any score is a zero below threshold.')\n return (self.inverse_x_traj, self.inverse_y_traj)[axis], (self.inverse_x_ranking, self.inverse_y_ranking)[axis]\n return (self.x_traj, self.y_traj)[axis], (self.x_ranking, self.y_ranking)[axis]", "def score(self, X, y=...):\n ...", "def score(self, X, y):\n ...", "def score(self, X, y):\n raise NotImplementedError('Abstract method \"score\" must be '\n 'specialised!')", "def rank(self,others):\n self.__verify(others)\n \n #construct the n evaluation criteria + classes in an extensible way\n #evalFn = [AP,R] in the standard format -> column with as many rows as replicates\n numClasses = others[0].eval['APBCI'].shape[2]\n\n iouType = others[0].params.iouType\n if iouType in [\"segm\",\"bbox\"]:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,3,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,1,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,3,2],axis=(0,2))]\n\n evfAP = lambda c: (lambda AP,R: np.nanmean(AP[:,:,c,0,-1],axis=0))\n for i in range(numClasses):\n evalFunctions.append(evfAP(i))\n\n else:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,0],axis=(0,2))]\n\n numReplicates = others[0].eval['APBCI'].shape[1]\n numInstances = len(others)\n numEvals = len(evalFunctions)\n\n replicateStats = np.zeros((numReplicates,numInstances))\n\n outperformMatrix = np.zeros((numInstances,numInstances,numEvals))\n rankCI = np.zeros((numInstances,3,numEvals))\n ranks = np.zeros((numInstances,numEvals,numReplicates))\n\n for evi,evf in enumerate(evalFunctions):\n for oi,o in enumerate(others):\n replicateStats[:,oi] = evf(o.eval['APBCI'],o.eval['RBCI'])\n\n for oi in range(len(others)):\n for oj in range(len(others)):\n outperformMatrix[oi,oj,evi] = np.mean(replicateStats[:,oi]>replicateStats[:,oj])\n\n for bci in range(numReplicates):\n ranks[:,evi,bci] = stats.rankdata(-replicateStats[bci,:],method='min')\n\n for oi in range(len(others)): \n rankCI[oi,0,evi] = np.mean(ranks[oi,evi,:])\n #use simple percentile method; the bias correction misbehaves \n rankCI[oi,1:,evi] = np.percentile(ranks[oi,evi,:],[100*(self.params.bootstrapAlpha/2),100*(1-self.params.bootstrapAlpha/2)])\n\n return rankCI, outperformMatrix, ranks", "def run(self):\n if self.pb.xvalEN and not self.isXvalMain:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.train_exam_no\n dt[self.val_indices] = 0.0\n else:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.pb.total_exam_no\n\n val = np.zeros(8,dtype=\"float32\")-1\n boosting = None\n wl = None\n if self.pb.algorithm == 'conf-rated':\n boosting = ConfidenceRated(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'adaboost':\n boosting = AdaBoost(self)\n wl = AdaBoostWL(self)\n elif self.pb.algorithm == 'adaboost-fast':\n boosting = AdaBoostFast(self)\n wl = AdaBoostFastWLMPI(self)\n elif self.pb.algorithm == 'rankboost':\n boosting = RankBoost(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'rankboost-fast':\n boosting = RankBoost(self)\n wl = AdaBoostFastWL(self)\n else:\n raise Exception(\"Unknown Boosting Algorithm\")\n \n for r in range(self.pb.rounds):\n tree = wl.run(dt)\n dt = boosting.run(dt = dt,\n r = r,\n tree = tree)\n \n if self.isXvalMain:\n boosting.finalize()\n \n \"\"\"Sync the predictions and save them to a file\"\"\"\n if self.pb.isLeader:\n if self.pb.xvalEN and not self.isXvalMain:\n val_predictions = boosting.get_val_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n val_predictions = val_predictions,\n hypotheses = hypotheses,\n )\n if self.pb.testEN and self.isXvalMain:\n train_predictions = np.zeros([self.pb.total_exam_no,self.pb.rounds],\n dtype=\"float32\")\n test_predictions = np.zeros([self.pb.test_exam_no,self.pb.rounds],\n dtype=\"float32\")\n for slv in np.arange(self.pb.comm_size):\n tr_i1 = self.pb.train_partition[slv]\n tr_i2 = self.pb.train_partition[slv+1]\n te_i1 = self.pb.test_partition[slv]\n te_i2 = self.pb.test_partition[slv+1]\n if slv == 0:\n train_predictions[tr_i1:tr_i2,:] = boosting.get_train_predictions()\n test_predictions[te_i1:te_i2,:] = boosting.get_test_predictions()\n else:\n train_predictions[tr_i1:tr_i2,:] = self.pb.comm.recv(source=slv,tag=11)\n test_predictions[te_i1:te_i2,:] = self.pb.comm.recv(source=slv,tag=12)\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n test_predictions = test_predictions,\n hypotheses = hypotheses,\n )\n if not self.pb.testEN and self.isXvalMain:\n train_predictions = np.zeros([self.pb.total_exam_no,self.pb.rounds],\n dtype=\"float32\")\n for slv in np.arange(self.pb.comm_size):\n tr_i1 = self.pb.train_partition[slv]\n tr_i2 = self.pb.train_partition[slv+1]\n if slv == 0:\n train_predictions[tr_i1:tr_i2,:] = boosting.get_train_predictions()\n else:\n train_predictions[tr_i1:tr_i2,:] = self.pb.comm.recv(source=slv,tag=11)\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n hypotheses = hypotheses,\n )\n else:\n if self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n self.pb.comm.send(train_predictions,dest = 0,tag=11)\n test_predictions = boosting.get_test_predictions()\n self.pb.comm.send(test_predictions,dest = 0,tag=12)\n if not self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n self.pb.comm.send(train_predictions,dest = 0,tag=11)", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def update_score(self, red_alliance, blue_alliance, prediction, red_score, blue_score):\n score = self.get_score(red_score, blue_score)\n\n for team in red_alliance:\n self.scores[team] = self.el.update(self.scores[team], prediction, score)\n\n for team in blue_alliance:\n self.scores[team] = self.el.update(self.scores[team], 1 - prediction, score)", "def run(self):\n if self.pb.xvalEN and not self.isXvalMain:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.train_exam_no\n dt[self.val_indices] = 0.0\n else:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.pb.total_exam_no\n\n val = np.zeros(8,dtype=\"float32\")-1\n boosting = None\n wl = None\n if self.pb.algorithm == 'conf-rated':\n boosting = ConfidenceRated(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'adaboost':\n boosting = AdaBoost(self)\n wl = AdaBoostWL(self)\n elif self.pb.algorithm == 'adaboost-fast':\n boosting = AdaBoostFast(self)\n wl = AdaBoostFastWL(self)\n elif self.pb.algorithm == 'rankboost':\n boosting = RankBoost(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'rankboost-fast':\n boosting = RankBoost(self)\n wl = AdaBoostFastWL(self)\n else:\n raise Exception(\"Unknown Boosting Algorithm\")\n \n for r in range(self.pb.rounds):\n tree = wl.run(dt)\n dt = boosting.run(dt = dt,\n r = r,\n tree = tree)\n \n if self.isXvalMain:\n boosting.finalize()\n \n \"\"\"Sync the predictions and save them to a file\"\"\"\n if self.pb.isLeader:\n if self.pb.xvalEN and not self.isXvalMain:\n val_predictions = boosting.get_val_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n val_predictions = val_predictions,\n hypotheses = hypotheses,\n )\n if self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n test_predictions = boosting.get_test_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n test_predictions = test_predictions,\n hypotheses = hypotheses,\n )\n if not self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n hypotheses = hypotheses,\n )", "def evaluate_ranking_loss(eval_model, x_, y_, df_, loss_fn, mode='val'):\n\n qm_tensors_, db_mask_, blink_mask_ = convert_values_to_tensors(df_)\n dataset = TensorDataset(x_, y_, qm_tensors_, db_mask_, blink_mask_)\n question_sampler = QuestionSampler(torch.utils.data.SequentialSampler(range(len(qm_tensors_))),\n qm_tensors_, False)\n loader = DataLoader(dataset, batch_sampler=question_sampler, shuffle=False)\n\n eval_model.eval()\n with torch.no_grad():\n pred_ = []\n for x_, y_, qm_, db_mask, blink_mask in loader:\n xbatch_list = []\n xbatch_idxes = []\n for mask in [db_mask, blink_mask]:\n idxes = get_idxes_from_mask(mask)\n xbatch_idxes.append(idxes)\n x_pick, y_pick, qm_pick = x_[idxes], y_[idxes], qm_[idxes]\n\n if len(x_pick) == 0:\n xbatch_list.append(torch.tensor([]))\n continue\n # elif len(x_pick) <= 1:\n # xbatch_list.append(torch.tensor([]))\n # continue\n x_batch = x_pick\n xbatch_list.append(x_batch)\n if (len(xbatch_list[0]) == 0) and (len(xbatch_list[1]) == 0):\n print(x_, y_, qm_, db_mask, blink_mask)\n print(xbatch_idxes)\n yhat = eval_model(xbatch_list[0], xbatch_list[1])\n # get individual instance predicted prob\n yhat_in_order1 = torch.zeros(len(x_), 1)\n yhat_in_order2 = torch.zeros(len(x_), 1)\n yhat_in_order1[xbatch_idxes[0]] = yhat[:len(xbatch_idxes[0])]\n yhat_in_order2[xbatch_idxes[1]] = yhat[len(xbatch_idxes[0]):]\n yhat_in_order = torch.max(torch.cat((yhat_in_order1, yhat_in_order2), 1), 1)[0].reshape(-1,1)\n # print(yhat_in_order)\n pred_.append(yhat_in_order)\n pred_ = torch.cat(pred_, 0)\n prec, recall, f1, df_output = compute_qald_metrics(pred_, df_, gold_file_name=args.gold_file_name)\n avg_loss, _ = compute_loss(eval_model, loader, loss_fn, optimizer=None)\n print(\"Current {} -- prec {}; recall {}; f1 {}, loss {}\".format(mode, prec, recall, f1, avg_loss))\n\n return avg_loss, f1, pred_, df_output", "def scoring(self):\n pass", "def score(self, x, y, **kwargs):\n kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)\n loss = self.model.evaluate(x, y, **kwargs)\n if isinstance(loss, list):\n return loss[0]\n return loss", "def score(self, X, y):\r\n n_feature, _ = self.check_model()\r\n _, n_label = y.shape\r\n y = self.check_X_y_weights(X, y)\r\n\r\n if X.shape[1] == (n_feature + 1):\r\n X = X[:, 1:]\r\n\r\n assert (X.shape[1] == n_feature), \"X is of the wrong shape\"\r\n\r\n if self.scoring_func is None:\r\n y_pred = self.forward(X)\r\n\r\n loss = self.loss_func(torch.from_numpy(y_pred).float(),\r\n torch.from_numpy(y).float())\r\n loss = torch.mean(torch.sum(loss, 1)).numpy()\r\n\r\n return - loss\r\n else:\r\n y_pred = self.predict(X)\r\n return self.scoring_func(y_pred, y)", "def scores(model,X_train,y_train, X_test,y_test,inside):\r\n \r\n model.best_apply(X_test,y_test)\r\n y_pred = model.forward(X_test).cpu()\r\n y_pred = np.asarray(torch.argmax(y_pred, dim = 1).squeeze())\r\n y_test_ = np.asarray(torch.argmax(y_test.cpu(), dim = 1))\r\n acc1 = accuracy_score(y_test_,y_pred)\r\n model.best_apply(X_train,y_train)\r\n y_pred = model.forward(X_train).cpu()\r\n y_pred = np.asarray(torch.argmax(y_pred, dim = 1).squeeze())\r\n y_train_ = np.asarray(torch.argmax(y_train.cpu(), dim = 1))\r\n acc2 = accuracy_score(y_train_,y_pred)\r\n if inside :\r\n print(\"###### Results ##########\")\r\n print(\"The Validation Acuuracy is : \",acc1)\r\n print(\"The Training Acuuracy is : \",acc2)\r\n return [acc1,acc2]", "def score(self,x,**kwargs):\r\n if self.kfun != 'matrix' and len(self.sv): \r\n k = self.kfun(x,self.sv,**self.cparam)\r\n #print \"Kernel after test: \", k\r\n else:\r\n k = x\r\n \r\n \r\n self.W=self.alphas \r\n self.mat=self.kfun(np.array([self.sv[1]]), self.sv,**self.cparam) \r\n self.bias=self.svLabels[1]- np.dot((self.alphas*self.svLabels).T,self.mat.T) \r\n z=np.dot((self.alphas*self.svLabels).T,k.T)+self.bias\r\n \r\n #print \"bias: \", self.bias, \"\\nZ: \",z\r\n \r\n \r\n return z", "def pre_step(x, y):\r\n feed_dict = {\r\n cnn.input_x: x,\r\n cnn.input_y: y,\r\n cnn.dropout_keep_prob: 1.0\r\n }\r\n scores = sess.run([cnn.scores], feed_dict)\r\n return scores", "def r_point(self):\n self.r_score += 1\n self.update_scoreboard()", "def score(self, X, y, *args, **kwargs):\n y_t = self.target_transform.transform(y)\n\n if hasattr(self, 'ml_score') and self.ml_score:\n log.info('Using custom score')\n return r2_score(y_true=y_t,\n y_pred=self._notransform_predict(\n X, *args, **kwargs))\n else:\n return super().score(X, y, *args, **kwargs)", "def score(self, y_true, y_pred):\r\n pass", "def update_scores(self):\n self.score[0] = (-1)*sum(self.board[self.board == -1])\n self.score[1] = sum(self.board[self.board == 1])\n #self.score[i] = sum(1 for j in range(len(stones_on_board)) if stones_on_board[j] == i)", "def score(self) -> int:\n return self.function(self.x, self.y)", "def score(self, epochs=None, y=None):\n import sklearn.metrics\n from sklearn.base import is_classifier\n from sklearn.metrics import accuracy_score, mean_squared_error\n if check_version('sklearn', '0.17'):\n from sklearn.base import is_regressor\n else:\n def is_regressor(clf):\n return False\n\n # Run predictions if not already done\n if epochs is not None:\n self.predict(epochs)\n else:\n if not hasattr(self, 'y_pred_'):\n raise RuntimeError('Please predict() epochs first or pass '\n 'epochs to score()')\n\n # Check scorer\n if self.score_mode not in ('fold-wise', 'mean-fold-wise',\n 'mean-sample-wise'):\n raise ValueError(\"score_mode must be 'fold-wise', \"\n \"'mean-fold-wise' or 'mean-sample-wise'. \"\n \"Got %s instead'\" % self.score_mode)\n score_mode = self.score_mode\n if (self.predict_mode == 'mean-prediction' and\n self.score_mode != 'mean-sample-wise'):\n warn(\"score_mode changed from %s set to 'mean-sample-wise' because\"\n \" predict_mode is 'mean-prediction'.\" % self.score_mode)\n score_mode = 'mean-sample-wise'\n self.scorer_ = self.scorer\n if self.scorer_ is None:\n # Try to guess which scoring metrics should be used\n if self.predict_method == \"predict\":\n if is_classifier(self.clf):\n self.scorer_ = accuracy_score\n elif is_regressor(self.clf):\n self.scorer_ = mean_squared_error\n\n elif isinstance(self.scorer_, str):\n if hasattr(sklearn.metrics, '%s_score' % self.scorer_):\n self.scorer_ = getattr(sklearn.metrics, '%s_score' %\n self.scorer_)\n else:\n raise KeyError(\"{0} scorer Doesn't appear to be valid a \"\n \"scikit-learn scorer.\".format(self.scorer_))\n if not self.scorer_:\n raise ValueError('Could not find a scoring metric for clf=%s '\n ' and predict_method=%s. Manually define scorer'\n '.' % (self.clf, self.predict_method))\n\n # If no regressor is passed, use default epochs events\n if y is None:\n if self.predict_mode == 'cross-validation':\n y = self.y_train_\n else:\n if epochs is not None:\n y = epochs.events[:, 2]\n else:\n raise RuntimeError('y is undefined because '\n 'predict_mode=\"mean-prediction\" and '\n 'epochs are missing. You need to '\n 'explicitly specify y.')\n if not np.all(np.unique(y) == np.unique(self.y_train_)):\n raise ValueError('Classes (y) passed differ from classes used '\n 'for training. Please explicitly pass your y '\n 'for scoring.')\n elif isinstance(y, list):\n y = np.array(y)\n\n # Clean attributes\n for att in ['scores_', 'y_true_']:\n if hasattr(self, att):\n delattr(self, att)\n\n self.y_true_ = y # to be compared with y_pred for scoring\n\n # Preprocessing for parallelization across training times; to avoid\n # overheads, we divide them in large chunks.\n n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))\n parallel, p_func, n_jobs = parallel_func(_score_slices, n_jobs)\n n_estimators = len(self.train_times_['slices'])\n n_chunks = min(n_estimators, n_jobs)\n chunks = np.array_split(range(len(self.train_times_['slices'])),\n n_chunks)\n scores = parallel(p_func(\n self.y_true_, [self.y_pred_[train] for train in chunk],\n self.scorer_, score_mode, self._cv_splits)\n for chunk in chunks)\n # TODO: np.array scores from initialization JRK\n self.scores_ = np.array([score for chunk in scores for score in chunk])\n return self.scores_", "def preprocess_lab_data(self):\n print(\"Processing dataset from the labs ...\")\n self.full_dataset[\"score\"] = self.full_dataset[\"score\"].apply(lambda x: -1 if x==0 else 1)", "def compute_scores(self, score_type, adjust = None, verbose = False):\n if score_type == 'coupling':\n scores = self.coupling\n elif score_type == 'barycentric':\n ot_emd = ot.da.EMDTransport()\n ot_emd.xs_ = self.xs\n ot_emd.xt_ = self.xt\n ot_emd.coupling_= self.coupling\n xt_s = ot_emd.inverse_transform(Xt=self.xt) # Maps target to source space\n scores = -sp.spatial.distance.cdist(self.xs, xt_s, metric = self.metric) #FIXME: should this be - dist?\n elif score_type == 'distance':\n # For baselines that only use distances without OT\n scores = -sp.spatial.distance.cdist(self.xs,self.xt, metric = self.metric)\n elif score_type == 'projected':\n # Uses projection mapping, computes distance in projected space\n scores = -sp.spatial.distance.cdist(self.xs,[email protected], metric = self.metric)\n\n if adjust == 'csls':\n scores = csls(scores, knn = 10)\n #print('here')\n elif adjust == 'isf':\n raise NotImplementedError('Inverted Softmax not implemented yet')\n\n self.scores = scores\n\n if verbose:\n plt.figure()\n plt.imshow(scores, cmap='jet')\n plt.colorbar()\n plt.show()", "def rank_yx(self, rankyx, rank_to_yx=1):\r\n if rank_to_yx == 1:\r\n x = int(rankyx) % int(self.shapes[1])\r\n y = (rankyx - x) / int(self.shapes[1])\r\n return [y, x] # More convenient to return y, x\r\n \r\n if rank_to_yx == 0: # that means transform yx to rank, expecting rankyx to be a list, may not be necessary\r\n rankyx = rankyx[0] * int(self.shapes[1]) + rankyx[1]\r\n return rankyx # returns back a float\r", "def compute_positions(scores, layers, cells, direction):\n prior = 1/np.arange(3, layers+3)\n prior = prior/prior.sum()\n x = np.linspace(-5, 5, layers)[::-1] \n prior = 1/(1 + np.exp(-x))\n prior = prior/prior.sum()\n\n # Compute the probability depending on the direction in which we want to know where the change is\n if direction == \"lr\":\n col_scores = np.array([scores[np.arange(cells[0])*cells[1]+j, np.arange(cells[0])*cells[1]+j+1].sum() for j in range(layers)])\n elif direction == \"rl\":\n col_scores = np.array([scores[np.arange(cells[0])*cells[1]+cells[1]-1-j, np.arange(cells[0])*cells[1]+cells[1]-1-j-1].sum() for j in range(layers)])\n elif direction == \"tb\":\n col_scores = np.array([scores[np.arange(cells[1])+cells[1]*j, np.arange(cells[1])+cells[1]*(j+1)].sum() for j in range(layers)])\n elif direction == \"bt\":\n col_scores = np.array([scores[np.arange(cells[1])+cells[1]*(cells[0]-1-j), np.arange(cells[1])+cells[1]*(cells[0]-1-j-1)].sum() for j in range(layers)])\n \n # Apply softmax + multiply by prior -> Then get the most likely position\n col_scores = sm(col_scores)\n position = np.argmax(col_scores*prior)\n return position", "def _run(output_dir_name):\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name\n )\n\n # Housekeeping.\n y_tick_labels = [\n '{0:s}'.format(s) for s in MODEL_DEPTH_WIDTH_STRINGS\n ]\n x_tick_labels = [\n '{0:d}'.format(c) for c in FIRST_LAYER_CHANNEL_COUNTS\n ]\n\n y_axis_label = 'NN depth, width'\n x_axis_label = 'Spectral complexity'\n\n # Plot grid for U-net++ without deep supervision.\n dimensions_3d = (\n len(MODEL_WIDTHS), len(MODEL_DEPTHS), len(FIRST_LAYER_CHANNEL_COUNTS)\n )\n num_weights_matrix = numpy.reshape(NUM_WEIGHTS_ARRAY_PLUSPLUS, dimensions_3d)\n num_weights_matrix = numpy.swapaxes(num_weights_matrix, 0, 1)\n\n num_depth_width_combos = len(MODEL_DEPTH_WIDTH_STRINGS)\n num_channel_counts = len(FIRST_LAYER_CHANNEL_COUNTS)\n dimensions_2d = (num_depth_width_combos, num_channel_counts)\n num_weights_matrix = numpy.reshape(num_weights_matrix, dimensions_2d)\n\n all_weights = numpy.concatenate((\n NUM_WEIGHTS_ARRAY_PLUSPLUS, NUM_WEIGHTS_ARRAY_PLUSPLUSPLUS,\n NUM_WEIGHTS_ARRAY_PLUSPLUS_DEEP, NUM_WEIGHTS_ARRAY_PLUSPLUSPLUS_DEEP\n ), axis=0)\n\n min_colour_value = numpy.min(numpy.log10(all_weights))\n max_colour_value = numpy.max(numpy.log10(all_weights))\n\n figure_object, axes_object = _plot_scores_2d(\n score_matrix=numpy.log10(num_weights_matrix),\n min_colour_value=min_colour_value,\n max_colour_value=max_colour_value,\n x_tick_labels=x_tick_labels, y_tick_labels=y_tick_labels\n )\n\n figure_width_px = (\n figure_object.get_size_inches()[0] * figure_object.dpi\n )\n marker_size_px = figure_width_px * (\n MARKER_SIZE_GRID_CELLS / num_weights_matrix.shape[1]\n )\n\n if SELECTED_MARKER_INDICES[0] == 0:\n axes_object.plot(\n SELECTED_MARKER_INDICES[2], SELECTED_MARKER_INDICES[1],\n linestyle='None', marker=MARKER_TYPE,\n markersize=marker_size_px, markeredgewidth=0,\n markerfacecolor=MARKER_COLOUR,\n markeredgecolor=MARKER_COLOUR\n )\n\n axes_object.set_xlabel(x_axis_label)\n axes_object.set_ylabel(y_axis_label)\n axes_object.set_title(NN_TYPE_STRINGS_FANCY[0])\n\n panel_file_names = [\n '{0:s}/num_weights_log10_{1:s}.jpg'.format(\n output_dir_name, NN_TYPE_STRINGS[0].replace('_', '-')\n )\n ]\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI,\n pad_inches=0, bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n # Plot grid for U-net++ with deep supervision.\n num_weights_matrix = numpy.reshape(\n NUM_WEIGHTS_ARRAY_PLUSPLUS_DEEP, dimensions_3d\n )\n num_weights_matrix = numpy.swapaxes(num_weights_matrix, 0, 1)\n num_weights_matrix = numpy.reshape(num_weights_matrix, dimensions_2d)\n\n figure_object, axes_object = _plot_scores_2d(\n score_matrix=numpy.log10(num_weights_matrix),\n min_colour_value=min_colour_value,\n max_colour_value=max_colour_value,\n x_tick_labels=x_tick_labels, y_tick_labels=y_tick_labels\n )\n\n if SELECTED_MARKER_INDICES[0] == 1:\n axes_object.plot(\n SELECTED_MARKER_INDICES[2], SELECTED_MARKER_INDICES[1],\n linestyle='None', marker=MARKER_TYPE,\n markersize=marker_size_px, markeredgewidth=0,\n markerfacecolor=MARKER_COLOUR,\n markeredgecolor=MARKER_COLOUR\n )\n\n axes_object.set_xlabel(x_axis_label)\n axes_object.set_ylabel(y_axis_label)\n axes_object.set_title(NN_TYPE_STRINGS_FANCY[1])\n\n panel_file_names.append(\n '{0:s}/num_weights_log10_{1:s}.jpg'.format(\n output_dir_name, NN_TYPE_STRINGS[1].replace('_', '-')\n )\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI,\n pad_inches=0, bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n # Plot grid for U-net3+ without deep supervision.\n num_weights_matrix = numpy.reshape(\n NUM_WEIGHTS_ARRAY_PLUSPLUSPLUS, dimensions_3d\n )\n num_weights_matrix = numpy.swapaxes(num_weights_matrix, 0, 1)\n num_weights_matrix = numpy.reshape(num_weights_matrix, dimensions_2d)\n\n figure_object, axes_object = _plot_scores_2d(\n score_matrix=numpy.log10(num_weights_matrix),\n min_colour_value=min_colour_value,\n max_colour_value=max_colour_value,\n x_tick_labels=x_tick_labels, y_tick_labels=y_tick_labels\n )\n\n if SELECTED_MARKER_INDICES[0] == 2:\n axes_object.plot(\n SELECTED_MARKER_INDICES[2], SELECTED_MARKER_INDICES[1],\n linestyle='None', marker=MARKER_TYPE,\n markersize=marker_size_px, markeredgewidth=0,\n markerfacecolor=MARKER_COLOUR,\n markeredgecolor=MARKER_COLOUR\n )\n\n axes_object.set_xlabel(x_axis_label)\n axes_object.set_ylabel(y_axis_label)\n axes_object.set_title(NN_TYPE_STRINGS_FANCY[2])\n\n panel_file_names.append(\n '{0:s}/num_weights_log10_{1:s}.jpg'.format(\n output_dir_name, NN_TYPE_STRINGS[2].replace('_', '-')\n )\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI,\n pad_inches=0, bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n # Plot grid for U-net3+ with deep supervision.\n num_weights_matrix = numpy.reshape(\n NUM_WEIGHTS_ARRAY_PLUSPLUSPLUS_DEEP, dimensions_3d\n )\n num_weights_matrix = numpy.swapaxes(num_weights_matrix, 0, 1)\n num_weights_matrix = numpy.reshape(num_weights_matrix, dimensions_2d)\n\n figure_object, axes_object = _plot_scores_2d(\n score_matrix=numpy.log10(num_weights_matrix),\n min_colour_value=min_colour_value,\n max_colour_value=max_colour_value,\n x_tick_labels=x_tick_labels, y_tick_labels=y_tick_labels\n )\n\n if SELECTED_MARKER_INDICES[0] == 3:\n axes_object.plot(\n SELECTED_MARKER_INDICES[2], SELECTED_MARKER_INDICES[1],\n linestyle='None', marker=MARKER_TYPE,\n markersize=marker_size_px, markeredgewidth=0,\n markerfacecolor=MARKER_COLOUR,\n markeredgecolor=MARKER_COLOUR\n )\n\n axes_object.set_xlabel(x_axis_label)\n axes_object.set_ylabel(y_axis_label)\n axes_object.set_title(NN_TYPE_STRINGS_FANCY[3])\n\n panel_file_names.append(\n '{0:s}/num_weights_log10_{1:s}.jpg'.format(\n output_dir_name, NN_TYPE_STRINGS[3].replace('_', '-')\n )\n )\n\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI,\n pad_inches=0, bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n # Concatenate panels into one figure.\n concat_file_name = '{0:s}/num_weights_log10.jpg'.format(output_dir_name)\n print('Concatenating panels to: \"{0:s}\"...'.format(concat_file_name))\n imagemagick_utils.concatenate_images(\n input_file_names=panel_file_names,\n output_file_name=concat_file_name,\n num_panel_rows=2, num_panel_columns=2\n )\n imagemagick_utils.resize_image(\n input_file_name=concat_file_name,\n output_file_name=concat_file_name, output_size_pixels=int(1e7)\n )", "def _update_vars(self, axis, traj_s, traj_o, rank_s, rank_o, t):\n if axis == 0:\n self.x_traj = traj_s\n self.x_ranking = rank_s\n self.x_scores = traj_s[-1]\n self.inverse_y_traj = traj_o\n self.inverse_y_ranking = rank_o\n self.inverse_y_scores = traj_o[-1]\n if axis == 1:\n self.y_traj = traj_s\n self.y_ranking = rank_s\n self.y_scores = traj_s[-1]\n self.inverse_x_traj = traj_o\n self.inverse_x_ranking = rank_o\n self.inverse_x_scores = traj_o[-1]", "def run(pos_data,ball_data,match,ranking_type='A'):\n roles = ['home','guest']\n sections = ['1st','2nd']\n result = {'home':[0]*2, 'guest':[0]*2, 'ball':[0]*2}\n \n # switch for l2r switching mode\n l2r_section = 0\n\n # processing player position data first \n for sec in sections:\n home_direction = 'r2l'\n for role in roles:\n print('Processing: %s-%s...' % (role,sec))\n sorted_pos_data = sort_position_data(pos_data[role][sec], ranking_type)\n stitched_data = stitch_position_data(sorted_pos_data,ball_data[sec!='1st'])\n if role == 'home':\n home_direction = determine_playing_direction(stitched_data[:,0:2])\n if home_direction == 'l2r':\n switch_playing_direction(stitched_data)\n l2r_section = 0 if sec=='1st' else 1\n rescale_playing_coords(stitched_data,match['stadium'])\n result[role][0 if sec=='1st' else 1] = stitched_data\n print('done')\n \n # processing ball data\n print('Processing ball...')\n switch_playing_direction(ball_data[l2r_section][:,1:3])\n for i in [0,1]:\n rescale_playing_coords(ball_data[i][:,1:3],match['stadium'])\n result['ball'][0] = ball_data[0][:,1:3]\n result['ball'][1] = ball_data[1][:,1:3]\n\n #correct value ranges.\n print('clamping values.')\n clamp_values(result)\n print('done.')\n return result", "def score(self, X, y):\n X_pp = self.preprocessor.transform(X)\n # Score the model on the data here\n return(self.estimator.score(X_pp, y))", "def score(self, x, y, verbose=False):\n y_pred = self.forward(x).T\n\n y_p = np.argmax(y_pred, axis=-1)\n y_t = np.argmax(y, axis=-1)\n fpr, tpr, thresholds = roc_curve(y_t, y_p, pos_label=2)\n\n metricas = {\n 'accuracy': accuracy_score(y_t, y_p),\n 'f1':f1_score(y_t, y_p, average='macro'),\n 'balanced accuracy': balanced_accuracy_score(y_t, y_p),\n 'auc': auc(fpr, tpr)\n }\n\n if verbose:\n print('Accuracy', metricas['accuracy'])\n print('F1-Macro', metricas['f1'])\n print('Balanced accuracy', metricas['balanced accuracy'])\n print('AUC', metricas['auc'])\n\n return metricas", "def score(self, X, y):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.score(stuff,y)\n\n return result\n pass", "def evaluate_ranking_loss(eval_model, x_, y_, m_labels_, ques_, loss_fn, mode='val'):\n eval_model.eval()\n\n dataset_ = TensorDataset(x_, y_)\n question_sampler = QuestionSampler(torch.utils.data.SequentialSampler(range(len(y_))), y_, False)\n loader = DataLoader(dataset_, batch_sampler=question_sampler, shuffle=False)\n total_loss = 0.0\n\n with torch.no_grad():\n pred_ = eval_model(x_, m_labels_)\n\n batch_num = 0\n for xb, yb in loader:\n if yb.shape[0] == 1:\n # print(xb, yb)\n continue\n yhat = eval_model(xb, yb)\n yb = yb.reshape(-1, 1)\n yhat_pos = yhat[-1].repeat((len(yhat) - 1), 1)\n yhat_neg = yhat[:-1]\n loss = loss_fn(yhat_pos, yhat_neg, torch.ones((len(yhat) - 1), 1).to(device))\n total_loss += loss.item() * (yb.shape[0] - 1)\n batch_num += 1\n avg_loss = total_loss / batch_num\n prec, recall, f1, _ = get_qald_metrics(pred_, m_labels_, ques_, mode=mode) # train and val both use 'val' mode\n\n return avg_loss, f1, pred_", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def custom_scoring(y_te, y_pred):\n #weights computed with training data set\n w = np.array([0.02409584, 0.00787456, 0.03685528, 0.01760536, 0.04589969, 0.8483942 , 0.01724058, 0.00203449]);\n \n ## F1 SCORES\n #evaluate F1 score, precision and recall for each label, \n #along with custom proportionally weighted F1 score\n #and built in weighted and macro F1 scores\n F1_tab, Ptab, Rtab, pf1 = F1_score(y_te, y_pred, w)\n f = F1Score(8, threshold = 0.5, average = 'weighted')\n f.update_state(y_te, y_pred)\n wf1 = f.result().numpy() #weighted f1 score\n f.reset_states()\n f = F1Score(8, threshold = 0.5, average = 'macro')\n f.update_state(y_te, y_pred)\n mf1 = f.result().numpy() #macro f1 score\n f.reset_states()\n\n ##EDIT DISTANCE\n #edit_dist_av = LevDistMultilabels(y_true, y_pred)\n\n ##ACCURACY\n #evaluate accuracy per label\n acc_tab = Acc(y_te, y_pred)\n\n return wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab", "def update(self, rank):\n # calculate MR and MRR\n self.mr += rank\n self.mrr += 1 / rank\n # calculate Hits@k\n if rank <= 1:\n self.hits1 += 1\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 3:\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 5:\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 10:\n self.hits10 += 1", "def positive_scores(self, axis, th_stat=10**(-2)):\n \n traj, rank = self._check_run(axis)\n \n low_boundary = _np.exp(_np.log(self.params['low_bound'])/2)\n t = len(traj)-1\n stat = 0\n \n # Iteration over the trajectories\n for i in range(len(traj[0])):\n # above the low boundary\n if traj[t][i] > low_boundary:\n # stationary (change less than stat_th)\n if abs(_np.log(traj[t][i]) - _np.log(traj[t-1][i])) < th_stat:\n stat += 1\n return stat", "def validate_average_rank(self) -> float:\n logger.info('Average rank validation ...')\n\n args = self.args\n self.biencoder.eval()\n distributed_factor = self.distributed_factor\n\n if args.use_dict_input:\n data_iterator = self.get_dict_data_iterator(args.dev_psgs_file, args.dev_queries_file,\n args.dev_qrels_file, args.dev_trec_file,\n args.dev_batch_size,\n shuffle=False, split=False)\n else:\n data_iterator = self.get_data_iterator(args.dev_file, args.dev_batch_size, shuffle=False)\n\n sub_batch_size = args.val_av_rank_bsz\n sim_score_f = ClusterNllLoss.get_similarity_function()\n q_represenations = []\n ctx_represenations = []\n positive_idx_per_question = []\n\n num_hard_negatives = args.val_av_rank_hard_neg\n num_other_negatives = args.val_av_rank_other_neg\n\n log_result_step = args.log_batch_step\n\n for i, samples_batch in enumerate(data_iterator.iterate_data()):\n # samples += 1\n if len(q_represenations) > args.val_av_rank_max_qs / distributed_factor:\n break\n\n biencoder_input = ClusterBertEncoder.create_biencoder_input(samples_batch, self.tensorizer,\n True,\n max_query_length=args.query_sequence_length,\n max_doc_length=args.sequence_length,\n num_hard_negatives=num_hard_negatives,\n num_other_negatives=num_other_negatives,\n shuffle=False)\n total_ctxs = len(ctx_represenations)\n ctxs_ids = biencoder_input.context_ids\n ctxs_segments = biencoder_input.ctx_segments\n bsz = ctxs_ids.size(0)\n\n # split contexts batch into sub batches since it is supposed to be too large to be processed in one batch\n for j, batch_start in enumerate(range(0, bsz, sub_batch_size)):\n\n q_ids, q_segments = (biencoder_input.question_ids, biencoder_input.question_segments) if j == 0 \\\n else (None, None)\n\n if j == 0 and args.n_gpu > 1 and q_ids.size(0) == 1:\n # if we are in DP (but not in DDP) mode, all model input tensors should have batch size >1 or 0,\n # otherwise the other input tensors will be split but only the first split will be called\n continue\n\n ctx_ids_batch = ctxs_ids[batch_start:batch_start + sub_batch_size]\n ctx_seg_batch = ctxs_segments[batch_start:batch_start + sub_batch_size]\n\n q_attn_mask = self.tensorizer.get_attn_mask(q_ids)\n ctx_attn_mask = self.tensorizer.get_attn_mask(ctx_ids_batch)\n with torch.no_grad():\n q_dense, ctx_dense = self.biencoder(q_ids, q_segments, q_attn_mask, ctx_ids_batch, ctx_seg_batch,\n ctx_attn_mask)\n\n if q_dense is not None:\n q_represenations.extend(q_dense.cpu().split(1, dim=0))\n\n ctx_represenations.extend(ctx_dense.cpu().split(1, dim=0))\n\n batch_positive_idxs = biencoder_input.is_positive\n positive_idx_per_question.extend([total_ctxs + v for v in batch_positive_idxs])\n\n if (i + 1) % log_result_step == 0:\n logger.info('Av.rank validation: step %d, computed ctx_vectors %d, q_vectors %d', i,\n len(ctx_represenations), len(q_represenations))\n\n ctx_represenations = torch.cat(ctx_represenations, dim=0)\n q_represenations = torch.cat(q_represenations, dim=0)\n\n logger.info('Av.rank validation: total q_vectors size=%s', q_represenations.size())\n logger.info('Av.rank validation: total ctx_vectors size=%s', ctx_represenations.size())\n\n q_num = q_represenations.size(0)\n assert q_num == len(positive_idx_per_question)\n\n scores = sim_score_f(q_represenations, ctx_represenations)\n values, indices = torch.sort(scores, dim=1, descending=True)\n\n rank = 0\n for i, idx in enumerate(positive_idx_per_question):\n # aggregate the rank of the known gold passage in the sorted results for each question\n gold_idx = (indices[i] == idx).nonzero()\n rank += gold_idx.item()\n\n if distributed_factor > 1:\n # each node calcuated its own rank, exchange the information between node and calculate the \"global\" average rank\n # NOTE: the set of passages is still unique for every node\n eval_stats = all_gather_list([rank, q_num], max_size=1000)\n for i, item in enumerate(eval_stats):\n remote_rank, remote_q_num = item\n if i != args.local_rank:\n rank += remote_rank\n q_num += remote_q_num\n\n av_rank = float(rank / q_num)\n logger.info('Av.rank validation: average rank %s, total questions=%d', av_rank, q_num)\n return av_rank", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def reset_score(self):\n self.x_score = 0\n self.o_score = 0", "def classification_score(self, x, y):\t\n\t\tpass", "def run(self, x):\n \"*** YOUR CODE HERE question 1 ***\"\n score = nn.DotProduct(self.w, x)\n\n return score", "def evaluation(self):\n rows_list = []\n for name in self.single_classifier_best.keys():\n row = {}\n row['algorithm'] = name \n row[self.scoring_metric] = self.single_classifier_best[name].best_score_\n rows_list.append(row)\n \n scoring_df = pd.DataFrame(rows_list)\n scoring_sorted = scoring_df.sort_values(self.scoring_metric, ascending=False)\n print()\n print('*'*shutil.get_terminal_size().columns)\n print(scoring_sorted)\n print('*'*shutil.get_terminal_size().columns)\n self.evaluation_scores = scoring_sorted", "def evaluate(self, X_test, Y_test):\n \n test_data = zip(X_test, Y_test)\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n \n# Updated for the testing\n# ========================\n return (sum(int(x == y) for (x, y) in test_results) / 100)", "def evaluate(self, batch_x, batch_y):\n raise NotImplementedError()", "def score(self, x, y):\n y_hat = (self.model['b1'] * x) + self.model['b0']\n return R2(y, y_hat)", "def run(self):\n dataset = self.make_dataset()\n preprocess_mask = self.preprocess()\n cnn1_scores = self.run_cnn(dataset, 'cnn1')\n cnn2_scores = self.run_cnn(dataset, 'cnn2')\n return self.score_stamps(preprocess_mask, cnn1_scores, cnn2_scores)", "def do_score_prediction(self):\n \n import iread.myio as mio\n from igui.score_canvas import ScoreCanvas\n exp_name = 'JPS_act_12_exp_4_accv_half_fc_j2'\n exp_name_base = 'ASM_act_12_exp_4'\n exp_base_folder = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp'\n exp_path = iu.fullfile(exp_base_folder, 'folder_%s' % exp_name, 'batches.meta')\n meta_base_path = iu.fullfile(exp_base_folder, 'folder_%s' % exp_name_base, 'batches.meta')\n meta = mio.unpickle(exp_path)\n meta_base = mio.unpickle(meta_base_path)\n images_path = meta_base['images_path']\n \n pred_pose = meta['feature_list'][0]\n gt_pose = meta['random_feature_list'][0]\n ntotal = gt_pose.shape[-1]\n print 'gt_pose_shape',gt_pose.shape\n print 'pred_pose_shape', pred_pose.shape\n ref_frame = 7600 # This is the index in test range\n ## ref_frame = 2600 # This is the index in test range\n test_range = self.test_data_provider.feature_range\n ref_idx = test_range[ref_frame]\n \n n_to_show = 1000\n \n idx_to_show = np.random.choice(ntotal, n_to_show - 1)\n idx_to_show = [ref_idx] + idx_to_show.tolist() \n idx_to_show = np.asarray(idx_to_show, dtype=np.int).flatten()\n \n ref_pose = pred_pose[...,ref_idx].reshape((-1,1),order='F') \n pose_to_eval =gt_pose[...,idx_to_show]\n output_feature_name = 'fc_2' # <------------------Parameter\n output_layer_idx = self.get_layer_idx(output_feature_name)\n\n # do it once <------------- Maybe it can support multiple batch in the future\n data_dim = self.model_state['layers'][output_layer_idx]['outputs']\n print 'data_dim', data_dim\n \n cur_data = [np.require(np.tile(ref_pose, [1,n_to_show]), \\\n dtype=np.single,requirements='C'), \\\n np.require(pose_to_eval.reshape((-1,n_to_show),order='F'),\\\n dtype=np.single,requirements='C'), \\\n np.require(np.zeros((1,n_to_show),dtype=np.single), \\\n requirements='C'),\n np.require(np.zeros((n_to_show,data_dim),dtype=np.single), \\\n requirements='C')]\n residuals = cur_data[1][...,0].reshape((-1,1),order='F') - cur_data[1]\n dp = self.test_data_provider\n mpjpe = dutils.calc_mpjpe_from_residual(residuals, dp.num_joints)\n\n gt_score = dp.calc_score(mpjpe, dp.mpjpe_factor/dp.max_depth,\\\n dp.mpjpe_offset/dp.max_depth).reshape((1,n_to_show)).flatten()\n self.libmodel.startFeatureWriter(cur_data, output_layer_idx)\n self.finish_batch()\n score = cur_data[-1].T\n print 'dim score', score.shape, 'dim gt_score', gt_score.shape\n score = score.flatten()\n # score = gt_score.flatten()\n def my_sort_f(k):\n if k == 0:\n return 10000000\n else:\n return score[k]\n sorted_idx = sorted(range(n_to_show), key=my_sort_f,reverse=True)\n s_to_show = [idx_to_show[k] for k in sorted_idx]\n sorted_score = np.asarray( [score[k] for k in sorted_idx])\n \n pose_to_plot = self.convert_relskel2rel(cur_data[1])\n sorted_pose = pose_to_plot[...,sorted_idx]\n class ScorePoseCanvas(ScoreCanvas):\n def __init__(self,data_dic):\n import iread.h36m_hmlpe as h36m\n ScoreCanvas.__init__(self,data_dic)\n self.pose_data = data_dic['pose_data']\n self.limbs = h36m.part_idx\n self.tmp = 0\n def show_image(self,ax):\n # ScoreCanvas.show_image(self,ax)\n # return\n import Image\n idx =self.cur_data_idx\n if idx == 0:\n self.tmp = self.tmp + 1\n if self.tmp == 1:\n img = self.load_image(idx)\n ax.imshow(np.asarray(img))\n return\n print 'Current data idx %d ' % self.cur_data_idx\n # params = {'elev':-89, 'azim':-107}\n # params = {'elev':-69, 'azim':-107}\n params = {'elev':-81, 'azim':-91} # frontal view\n fig = plt.figure(100)\n from mpl_toolkits.mplot3d import Axes3D\n import imgproc\n # new_ax = self.fig.add_axes( rng_rel,projection='polar')\n new_ax = fig.add_subplot(111,projection='3d')\n imgproc.turn_off_axis(new_ax)\n cur_pose = self.pose_data[...,idx].reshape((3,-1),order='F')\n dutils.show_3d_skeleton(cur_pose.T,\\\n self.limbs, params)\n xmin,xmax = np.min(cur_pose[0]),np.max(cur_pose[0])\n ymin,ymax = np.min(cur_pose[1]),np.max(cur_pose[1])\n zmin,zmax = np.min(cur_pose[2]),np.max(cur_pose[2])\n def extent(x,y,ratio):\n x = x + (x-y) * ratio\n y = y + (y-x) * ratio\n return -0.4,0.4\n r = 0.1\n new_ax.set_xlim(extent(xmin,xmax,r))\n new_ax.set_ylim(extent(ymin,ymax,r))\n new_ax.set_ylim(extent(zmin,zmax,r))\n tmp_folder = '/public/sijinli2/ibuffer/2014-CVPR2015/tmp/images'\n save_path = iu.fullfile(tmp_folder, 'tmp_image.png')\n plt.savefig(save_path)\n img = Image.open(save_path)\n plt.close(100)\n img_arr = np.asarray(img)\n s = np.int(img_arr.shape[0]/5.0)\n e = np.int(img_arr.shape[0] - s)\n s = 0\n e = img_arr.shape[0]\n img_arr = img_arr[s:e,:,:]\n ax.imshow(np.asarray(img_arr))\n # ax.plot([1,0,0],[0,1,0],[0,0,1])\n\n\n sc = ScorePoseCanvas({'x': np.asarray(range(len(idx_to_show))), 'y':sorted_score,\\\n 'images_path': [images_path[k] for k in s_to_show], \\\n 'pose_data':sorted_pose})\n sc.start()\n print 'max score is ' , sorted_score.max()\n gt_sort_idx = sorted(range(n_to_show), key=lambda k:gt_score[k], reverse=True)\n sorted_gt_score = np.asarray([gt_score[k] for k in gt_sort_idx])\n sorted_score_by_gt = [score[k] for k in gt_sort_idx]\n pl.plot(np.asarray(range(n_to_show)), sorted_gt_score, 'r', label='gt_score')\n pl.plot(np.asarray(range(n_to_show)), sorted_score_by_gt, 'g', label='pred_score')", "def _forward(self, x):\n # Computing: `x * w^T`\n score = CArray(x.dot(self.w.T)).todense().ravel() + self.b\n\n scores = CArray.ones(shape=(x.shape[0], self.n_classes))\n scores[:, 0] = -score.ravel().T\n scores[:, 1] = score.ravel().T\n\n return scores", "def score(self):", "def calculate(self, prev_scores):\n self.set_scores(prev_scores)\n for match in self.week.matches:\n if match.played:\n # Fetch old scores\n winner_score = float(prev_scores[match.winner])\n loser_score = float(prev_scores[match.loser])\n\n # Update this ranking's scores\n score_delta = loser_score * 0.1\n self.score[match.winner] = winner_score + score_delta\n self.score[match.loser] = loser_score - score_delta", "def get_prediction_scores(self, X_te, Y_te, scoring):\n X_te = X_te - self.Xm\n if self.scale_X:\n X_te = X_te / self.Xs\n M_te = X_te @ self.Q\n\n scores = torch.zeros(Y_te.shape[1], len(self.ls), dtype=X_te.dtype)\n for j, Y_te_j in enumerate(Y_te.t()):\n N_te_j = None\n for k, l in enumerate(self.ls):\n Yhat_te_j, N_te_j = self._predict_single(\n X_te, M_te, j, l, N_te_j\n )\n scores[j, k] = scoring(Y_te_j, Yhat_te_j).item()\n return scores", "def calculate_scores(self):\n # Prediction based scores\n #self.report = classification_report(self.y_test, self.y_pred)\n self.accuracy = accuracy_score(self.y_real, self.y_pred)\n self.precision = precision_score(self.y_real, self.y_pred)\n self.recall = recall_score(self.y_real, self.y_pred)\n self.f1 = f1_score(self.y_real, self.y_pred)\n \n # Probability based scores\n self.fpr, self.tpr, _ = roc_curve(self.y_real, self.y_proba)\n self.average_precision = average_precision_score(self.y_real, self.y_proba)\n self.brier_loss = brier_score_loss(self.y_real, self.y_proba)\n self.roc_auc = roc_auc_score(self.y_real, self.y_proba)\n self.prec_cur, self.recall_cur, _ = precision_recall_curve(self.y_real, self.y_proba)", "def evalrank(model, data, split='dev'):\n print 'Loading dataset'\n if split == 'dev':\n X = load_dataset(data, load_train=False)[1]\n else:\n X = load_dataset(data, load_train=False)[2]\n\n print 'Computing results...'\n train = load_dataset('CAD', load_train=True)[0]\n vectors = encode_sentences(model, train[0], verbose=False)\n # demo.retrieve_captions(model, net, train[0], vectors, 'image.jpg', k=5)\n ls = encode_sentences(model, X[0])\n lim = encode_images(model, X[1])\n\n (r1, r5, r10) = i2t(lim, X[0], train[0], vectors)\n print \"Image to text: %.1f, %.1f, %.1f\" % (r1, r5, r10)\n # (r1i, r5i, r10i, medri) = t2i(lim, ls)\n # print \"Text to image: %.1f, %.1f, %.1f, %.1f\" % (r1i, r5i, r10i, medri)", "def custom_score_2(game, player):\n # TODO: finish this function!\n if game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf\n if game.is_winner(player):\n #print(\"You win\")\n return math.inf\n\n # center\n width = game.width / 2\n height = game.height / 2\n\n # Opponent\n opponent = game.get_opponent(player)\n opp_y_coord, opp_x_coord = game.get_player_location(opponent)\n opp_x_eval = (width - float(opp_x_coord)) ** 2\n opp_y_eval = (height - float(opp_y_coord)) ** 2\n opp_center_eval = float(opp_x_eval + opp_y_eval)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n opp_score = opp_moves * 2 - opp_center_eval\n score = no_moves - opp_score/rem_spaces\n return float(score)", "def _evaluate(self, x, y):\n raise NotImplementedError()", "def _score_to_decision(self, score):", "def _score(self, x, seq):\n pass", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()", "def update_score():\n pass", "def score(self, X, y):\n out = None\n ### YOUR CODE HERE\n pred = self.predict(X)\n assert pred.shape == y.shape\n out = ((pred-y)**2).mean()\n ### END CODE\n return out", "def score_batch(self, pipelines, X, y, objectives):\n check_all_pipeline_names_unique(pipelines)", "def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n score = multi_scale_ssim(x=x, y=y, kernel_size=self.kernel_size, kernel_sigma=self.kernel_sigma,\n data_range=self.data_range, reduction=self.reduction, scale_weights=self.scale_weights,\n k1=self.k1, k2=self.k2)\n return torch.ones_like(score) - score", "def find_ranking(comparisons, equal_width=0.2, max_rank=-1, verbose=False):\n # remove unnecessary variables\n comparisons = {(i, j) if i < j else (j, i): value if i < j else 1 - value\n for (i, j), value in comparisons.items()}\n nodes = np.unique(\n [i for ij in comparisons.keys() for i in ij])\n\n # define variables\n model = Model('comparison')\n model.setParam('OutputFlag', verbose)\n values = np.fromiter(comparisons.values(), dtype=float)\n assert values.max() <= 1 and values.min() >= 0\n # variables to encode the error of comparisons\n E_ij = model.addVars(comparisons.keys(), name='e_ij', vtype=GRB.CONTINUOUS,\n ub=1.0-values, lb=-values)\n # variables to encode hard choice of >=, <=, ==\n Ge_ij = model.addVars(comparisons.keys(), name='ge_ij', vtype=GRB.BINARY)\n Le_ij = model.addVars(comparisons.keys(), name='le_ij', vtype=GRB.BINARY)\n Eq_ij = model.addVars(comparisons.keys(), name='eq_ij', vtype=GRB.BINARY)\n # variables to help with transitivity in non-fully connected graphs\n if max_rank < 1:\n max_rank = len(nodes)\n R_i = model.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=max_rank)\n # variables to emulate abs\n T_ij_pos = {}\n T_ij_neg = {}\n index = (values != 1) & (values != 0)\n T_ij_pos = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_pos', lb=0, ub=1-values[index])\n T_ij_neg = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_neg', lb=0, ub=values[index])\n model.update()\n\n # emulate abs for non-binary comparisons: E_ij = T_ij_pos - T_ij_neg\n model.addConstrs(\n (E_ij[ij] == T_ij_pos[ij] - T_ij_neg[ij] for ij in T_ij_pos),\n 'E_ij = T_ij_pos - T_ij_neg')\n\n # hard decision of >=, <=, and ==\n lower_bound = 0.5 - equal_width / 2.0\n upper_bound = 0.5 + equal_width / 2.0\n # <=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound <= ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound >= -1 + ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_upper_bound')\n # >=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound >= -le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound <= 1 - le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_upper_bound')\n # ==\n model.addConstrs(\n (le + eq + ge == 1 for le, eq, ge in zip(\n Le_ij.values(), Eq_ij.values(), Ge_ij.values())), 'eq_ij')\n\n # transitivity\n for (i, j), eq_a in Eq_ij.items():\n le_a = Le_ij[i, j]\n ge_a = Ge_ij[i, j]\n for k in nodes:\n j_, k_ = j, k\n if j > k:\n j_, k_ = k, j\n eq_b = Eq_ij.get((j_, k_), None)\n if eq_b is None:\n continue\n else:\n le_b = Le_ij[j_, k_]\n ge_b = Ge_ij[j_, k_]\n if j_ != j:\n le_b, ge_b = ge_b, le_b\n\n i_, k_ = i, k\n if i > k:\n i_, k_ = k, i\n eq_c = Eq_ij.get((i_, k_), None)\n if eq_c is None:\n continue\n else:\n le_c = Le_ij[i_, k_]\n ge_c = Ge_ij[i_, k_]\n if i_ != i:\n le_c, ge_c = ge_c, le_c\n\n # a <= b and b <= c -> a <= c\n model.addLConstr(\n ge_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_ge_{i},{j},{k}')\n # a >= b and b >= c -> a >= c\n model.addLConstr(\n le_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_le_{i},{j},{k}')\n # a <= b and b == c -> a <= c\n model.addLConstr(\n le_a + eq_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_leeq_{i},{j},{k}')\n # a == b and b <= c -> a <= c\n model.addLConstr(\n eq_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_eqle_{i},{j},{k}')\n # a >= b and b == c --> a >= c\n model.addLConstr(\n ge_a + eq_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_geeq_{i},{j},{k}')\n # a == b and b >= c --> a >= c\n model.addLConstr(\n eq_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_eqge_{i},{j},{k}')\n # a == b and b == c --> a == c\n model.addLConstr(\n eq_a + eq_b, GRB.LESS_EQUAL, 1 + eq_c,\n f'transitivity_eq_{i},{j},{k}')\n\n # transitivity helper (for not-fully connected graphs)\n # also provides a latent rank\n big_m = max_rank\n model.addConstrs(\n ((1 - ge_ij) * big_m + R_i[i] >= R_i[j] + 1 for (i, j), ge_ij in Ge_ij.items()),\n 'rank_transitivity_larger')\n model.addConstrs(\n ((1 - le_ij) * big_m + R_i[j] >= R_i[i] + 1 for (i, j), le_ij in Le_ij.items()),\n 'rank_transitivity_smaller')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[j] >= R_i[i] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal1')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[i] >= R_i[j] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal2')\n\n # objective function\n objective = LinExpr()\n for ij, value in comparisons.items():\n if value == 1.0:\n objective += -E_ij[ij]\n elif value == 0.0:\n objective += E_ij[ij]\n else:\n objective += T_ij_pos[ij] + T_ij_neg[ij]\n model.setObjective(objective, GRB.MINIMIZE)\n\n # solve\n model.optimize()\n\n # verify abs emulation: one T_ij has to be 0\n for ij, value in T_ij_pos.items():\n assert value.X == 0 or T_ij_neg[ij] == 0, \\\n f'T_{ij} pos {value.X} neg {T_ij_neg[ij]}'\n\n # find minimal Rs\n model_ = Model('comparison')\n model_.setParam('OutputFlag', verbose)\n R_i = model_.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=len(nodes))\n for ((i, j), ge_ij), le_ij in zip(Ge_ij.items(), Le_ij.values()):\n if ge_ij.x == 1:\n model_.addConstr(R_i[i] >= R_i[j] + 1)\n elif le_ij.x == 1:\n model_.addConstr(R_i[j] >= R_i[i] + 1)\n else:\n model_.addConstr(R_i[j] == R_i[i])\n model_.setObjective(R_i.sum(), GRB.MINIMIZE)\n model_.optimize()\n\n return [model_.getVarByName(f'r_i[{i}]').X for i in range(len(nodes))], \\\n model.objVal", "def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1", "def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1", "def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1", "def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1", "def score(self,A,B,method=\"mse\"):\n if method ==\"mse\":\n loss = self._reconstruction_loss\n elif method == \"mae\":\n loss = self._mae\n data={self._xphA: A,\n self._xphB: B,\n K.learning_phase():False}\n return self.sess.run(loss,\n feed_dict=data)", "def scoreEvaluationFunction(currentGameState):\r\n return currentGameState.getScore()", "def main():\n args = get_args().parse_args()\n\n (wins, losses) = args.record.split('-')\n print wins, losses\n \n is_winning_streak = args.streak[0] == 'W'\n streak_length = args.streak[1:]\n print is_winning_streak, streak_length\n\n (recent_wins, recent_losses) = args.last.split('-')\n print recent_wins, recent_losses\n\n power_ranking = calculate_power_ranking(int(wins), int(losses), is_winning_streak,\n int(streak_length), int(recent_wins),\n int(recent_losses))\n print power_ranking", "def rank_vars(xTrain, yTrain, scoreFunc):\r\n funcsDic = {\r\n 'pearsonr': [np.arange(xTrain.shape[1]), 1], \r\n 'mutual_info_score': np.arange(xTrain.shape[0]),\r\n 'ttest_ind': [np.arange(xTrain.shape[1]), 1], \r\n }\r\n \r\n scores = list()\r\n for feat in np.arange(xTrain.shape[1]):\r\n if scoreFunc.func_name == 'pearsonr':\r\n scores.append(scoreFunc(xTrain[:, feat], yTrain))\r\n elif scoreFunc.func_name == 'ttest_ind':\r\n scores.append(scoreFunc(xTrain[yTrain == 1, feat], xTrain[yTrain==-1, feat]))\r\n \r\n scores = np.asarray(scores)\r\n pvals = scores[funcsDic[scoreFunc.func_name]]\r\n sortedIndices = [i[0] for i in sorted(enumerate(pvals), key=lambda x:x[1])]\r\n return sortedIndices", "def disp_score():", "def score(self, X, y=None) -> float:\n self.check_is_fitted()\n X = self._check_clusterer_input(X)\n return self._score(X, y)", "def _run(evaluation_dir_name, smoothing_radius_grid_cells,\n score_colour_map_name, num_ex_colour_map_name, max_colour_percentile,\n output_dir_name):\n\n if smoothing_radius_grid_cells <= 0:\n smoothing_radius_grid_cells = None\n\n score_colour_map_object = pyplot.get_cmap(score_colour_map_name)\n num_ex_colour_map_object = pyplot.get_cmap(num_ex_colour_map_name)\n error_checking.assert_is_geq(max_colour_percentile, 90.)\n error_checking.assert_is_leq(max_colour_percentile, 100.)\n\n grid_metafile_name = grids.find_equidistant_metafile(\n directory_name=evaluation_dir_name, raise_error_if_missing=True\n )\n\n print('Reading grid metadata from: \"{0:s}\"...'.format(grid_metafile_name))\n grid_metadata_dict = grids.read_equidistant_metafile(grid_metafile_name)\n print(SEPARATOR_STRING)\n\n num_grid_rows = len(grid_metadata_dict[grids.Y_COORDS_KEY])\n num_grid_columns = len(grid_metadata_dict[grids.X_COORDS_KEY])\n\n auc_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n csi_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n pod_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n far_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n num_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n num_positive_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n\n for i in range(num_grid_rows):\n for j in range(num_grid_columns):\n this_eval_file_name = model_eval.find_file(\n directory_name=evaluation_dir_name, grid_row=i, grid_column=j,\n raise_error_if_missing=False)\n\n if not os.path.isfile(this_eval_file_name):\n warning_string = (\n 'Cannot find file (this may or may not be a problem). '\n 'Expected at: \"{0:s}\"'\n ).format(this_eval_file_name)\n\n warnings.warn(warning_string)\n continue\n\n print('Reading data from: \"{0:s}\"...'.format(this_eval_file_name))\n this_evaluation_dict = model_eval.read_evaluation(\n this_eval_file_name)\n\n num_examples_matrix[i, j] = len(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n num_positive_examples_matrix[i, j] = numpy.sum(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n\n this_evaluation_table = this_evaluation_dict[\n model_eval.EVALUATION_TABLE_KEY]\n\n auc_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.AUC_KEY].values\n )\n csi_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.CSI_KEY].values\n )\n pod_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.POD_KEY].values\n )\n far_matrix[i, j] = 1. - numpy.nanmean(\n this_evaluation_table[model_eval.SUCCESS_RATIO_KEY].values\n )\n\n print(SEPARATOR_STRING)\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n if smoothing_radius_grid_cells is not None:\n print((\n 'Applying Gaussian smoother with e-folding radius of {0:.1f} grid '\n 'cells...'\n ).format(\n smoothing_radius_grid_cells\n ))\n\n orig_num_examples_matrix = num_examples_matrix + 0\n num_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_examples_matrix = numpy.round(num_examples_matrix).astype(int)\n num_examples_matrix[orig_num_examples_matrix == 0] = 0 # HACK\n\n num_positive_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_positive_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_positive_examples_matrix = (\n numpy.round(num_positive_examples_matrix).astype(int)\n )\n num_positive_examples_matrix[num_examples_matrix == 0] = 0\n\n auc_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(auc_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n csi_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(csi_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n pod_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(pod_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n far_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(far_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n panel_file_names = []\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # Plot number of examples.\n this_data_matrix = numpy.maximum(numpy.log10(num_examples_matrix), 0.)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=0., max_colour_value=max_colour_value,\n plot_cbar_min_arrow=False, plot_cbar_max_arrow=True, log_scale=True)\n\n axes_object.set_title(r'Number of examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(a)')\n\n panel_file_names.append('{0:s}/num_examples.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot number of positive examples.\n this_data_matrix = num_positive_examples_matrix.astype(float)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n this_data_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=True)\n\n axes_object.set_title('Number of tornadic examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(b)')\n\n panel_file_names.append(\n '{0:s}/num_positive_examples.jpg'.format(output_dir_name)\n )\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot AUC.\n max_colour_value = numpy.nanpercentile(auc_matrix, max_colour_percentile)\n min_colour_value = numpy.maximum(\n numpy.nanpercentile(auc_matrix, 100. - max_colour_percentile),\n 0.5\n )\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=auc_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('AUC (area under ROC curve)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')\n\n panel_file_names.append('{0:s}/auc.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot CSI.\n max_colour_value = numpy.nanpercentile(csi_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n csi_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=csi_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('CSI (critical success index)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')\n\n panel_file_names.append('{0:s}/csi.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot POD.\n max_colour_value = numpy.nanpercentile(pod_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n pod_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=pod_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('POD (probability of detection)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')\n\n panel_file_names.append('{0:s}/pod.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot FAR.\n max_colour_value = numpy.nanpercentile(far_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n far_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=far_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('FAR (false-alarm ratio)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(f)')\n\n panel_file_names.append('{0:s}/far.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Concatenate panels.\n concat_file_name = '{0:s}/spatially_subset_evaluation.jpg'.format(\n output_dir_name)\n print('Concatenating panels to: \"{0:s}\"...'.format(concat_file_name))\n\n imagemagick_utils.concatenate_images(\n input_file_names=panel_file_names, output_file_name=concat_file_name,\n num_panel_rows=NUM_PANEL_ROWS, num_panel_columns=NUM_PANEL_COLUMNS)\n\n imagemagick_utils.resize_image(\n input_file_name=concat_file_name, output_file_name=concat_file_name,\n output_size_pixels=CONCAT_FIGURE_SIZE_PX)", "def new_evaluate(board):\n\n #Logic for new_evaluate function:\n #1)Traverse through each of the columns\n #2)For each of the columns, find the top most element.\n\t #If the topmost element = Current Player\n\t\t \t#3)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a POSITIVE value\n\t #Else\n\t\t \t#4)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a NEGATIVE value\n #5)Sort these Positive and Negative scores\n #6)IF the highest negative score is greater than the highest positive score, then it means that the opposition has MORE chances to WIN.\n #So, that has to be blocked and so we will return that HIGHEST NEGATIVE value as the score for that board\n #7)ELSE we go ahead and return the HIGHEST POSITIVE value as the score for that board\n #->This logic has increasing the AGGRESSION of the player a lot and it makes senses we hope.\n\n posdict = {}\n negdict = {}\n for col in range(7):\n if(board.get_top_elt_in_column(col)==board.get_current_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = board._max_length_from_cell(rowValue,col)\n posdict[col]=score\n elif(board.get_top_elt_in_column(col)==board.get_other_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = -(board._max_length_from_cell(rowValue,col))\n negdict[col]=score\n\n\n sorted(posdict.values(),reverse= True)\n sorted(negdict.values())\n if((bool(posdict))and (bool(negdict))):\n if(abs(negdict.values()[0]) >= ((posdict.values()[0]))):\n return negdict[negdict.keys()[0]]\n else:\n return posdict[posdict.keys()[0]]\n elif(bool(posdict)):\n return posdict[posdict.keys()[0]]\n elif(bool(negdict)):\n return negdict[negdict.keys()[0]]\n else:\n return 0", "def score_fn(self):\n raise NotImplementedError()", "def score(self, X, y, batch_size=50):\n features = self.encode(X, batch_size=batch_size)\n return self.classifier.score(features, y)", "def score(self, x: np.ndarray) -> np.ndarray:\n score = self.backend.score(self.backend._to_backend_dtype(x))\n return self.backend._to_frontend_dtype(score)", "def score(self, X: TwoDimArray, y: OneDimArray = None) -> float:\n\n check_is_fitted(self, '_glasso')\n\n return self._glasso.score(X)", "def forward(ctx, scores):\n idx = argmax(scores, dim=-1)\n scores_net = eye(scores.size(-1), device=scores.device)\n return scores_net[idx]", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def __call__(self, score, model):\n if self.best_score is None:\n # assign the best score and save the model at the end of the first epoch\n self.best_score = score\n self.save_checkpoint(model)\n elif score < self.best_score + self.delta:\n # if the score not increase of at least delta, increment the counter and if it reach the patience early stops\n self.counter += 1\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n # otherwise the score is better that the saved one, so replace the best score and save the model\n self.best_score = score\n self.save_checkpoint(model)\n self.counter = 0", "def auto_evaluation(model,x_train,y_train,x_test,y_test):\n\n y_train_prediction=model.predict(x_train)\n y_test_prediction=model.predict(x_test)\n\n plt.scatter(y_train,y_train_prediction,c=\"b\",s=1,alpha=0.5)\n plt.scatter(y_test,y_test_prediction,c=\"r\",s=2,alpha=0.5)\n plt.xlabel(\"actual\")\n plt.ylabel(\"predicted\")\n\n print(\"tr R2: {:.2f}\".format(r2_score(y_train_prediction,y_train)))\n print(\"te R2: {:.2f}\".format(r2_score(y_test_prediction,y_test))) \n \n return y_train_prediction,y_test_prediction", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def r_point(self):\n self.r_score += 1\n self.update()", "def rank(m, axis=0, method='average', ascending=False, reverse=False):\n if isinstance(m, list):\n m = np.array(m)\n if ascending == reverse: # greater is better (descending order)\n m = -m # take the opposite to inverse rank\n r = np.apply_along_axis(rankdata, axis, m, method=method) # convert values to ranking in all rows or columns\n return process_vote(m, r)", "def get_metrics(cfg, model, X_anchor, y_anchor, X_gal, y_gal, annoy_index, vec_dim):\n rank10_acc = 0\n rank5_acc = 0\n rank1_acc = 0\n avg_acc = 0\n vote_res = 0\n\n l2 = []\n for anchor in range(0, len(X_anchor)):\n res = get_result(get_image_features(cfg, model, X_anchor[anchor]), annoy_index)\n vote = defaultdict(int)\n # Accuracy\n correct = 0\n for i in res[:10]:\n vote[y_gal[i]] += 1\n\n max_key = max(vote, key=vote.get)\n if max_key == y_anchor[anchor]:\n vote_res += 1\n \n\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1 \n\n avg_acc += correct/len(res)\n\n # Mean Average Precision\n l1 = []\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1\n l1.append(1)\n else:\n l1.append(0)\n l2.append(l1) \n\n # Rank10 Accuracy\n for each_val in res[:10]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank10_acc += 1\n break\n \n # Rank5 Accuracy\n for each_val in res[:5]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank5_acc += 1\n break\n\n # Rank1 Accuracy\n for each_val in res[:1]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank1_acc += 1\n break\n\n print(\"Avg acc is :: {avg_acc}\".format(avg_acc = avg_acc/len(X_anchor)))\n print(\"Rank 10 acc is :: {rank10_acc}\".format(rank10_acc = rank10_acc/len(X_anchor)))\n print(\"Rank 5 acc is :: {rank5_acc}\".format(rank5_acc = rank5_acc/len(X_anchor)))\n print(\"Rank 1 acc is :: {rank1_acc}\".format(rank1_acc = rank1_acc/len(X_anchor)))\n print(\"Mean Avg Precision is :: {mAP}\".format(mAP=mean_average_precision(l2)))\n print(\"Vote res :: \", vote_res/len(X_anchor))\n\n return rank1_acc/len(X_anchor), mean_average_precision(l2)", "def predict(self, x_val, y1_test=None, y2_test=None):\n if y1_test is None and y2_test is None:\n y1_test = np.ones(len(x_val))\n y2_test = y1_test\n score_model = False\n else:\n score_model = True\n\n self.stage_one.predict(x_val, y1_test)\n if score_model:\n stage_one_score = self.stage_one.score\n\n # This section will not generalize well beyond this specific dataset.\n if score_model:\n df = pd.DataFrame(data=[y1_test, self.stage_one.pred],\n columns=['y1_test', 'stage_one.pred'])\n df = pd.DataFrame(y1_test).rename(columns={'class_second': 'y1_test'})\n df['stage_one.pred'] = self.stage_one.pred\n df['tweets'] = x_val\n df['y2_test'] = y2_test\n\n # x2_val = df['tweets'].loc[df['stage_one.pred'] == 0]\n # y2_test_ = df['y2_test'].loc[df['stage_one.pred'] == 0]\n x2_val = df['tweets'].loc[df['stage_one.pred'] == 1]\n y2_test_ = df['y2_test'].loc[df['stage_one.pred'] == 1]\n else:\n # x2_val = x_val[self.stage_one.pred == 0]\n x2_val = x_val[self.stage_one.pred == 1]\n y2_test_ = np.ones(len(x2_val))\n\n self.stage_two.predict(x2_val, y2_test_)\n if score_model:\n stage_two_score = self.stage_two.score\n return (stage_one_score, stage_two_score, stage_one_score*stage_two_score)\n else:\n # return tweets that contain hate speech\n return x2_val[self.stage_two.pred == 0]", "def get_polyscore(self,X_test=None,y_test=None,metric='adjusted_r2'):\n X = self.get_points()\n y_pred = self.get_polyfit(X)\n train_score = score(self._model_evaluations, y_pred,metric, X=X)\n if X_test is not None and y_test is not None:\n y_pred_test = self.get_polyfit(X_test)\n test_score = score(y_test,y_pred_test,metric,X=X_test)\n return train_score, test_score\n else:\n return train_score", "def evaluate(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)", "def main():\n graph_alg_eq()\n graph_points()\n graph_smooth_from_pts()\n\n return GOOD_RET # success", "def styblinskitankfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = np.zeros((x.shape[0], 1))\n for i in range(n):\n scores += x[:, i] ** 4 - 16 * x[:, i] ** 2 + 5 * x[:, i]\n scores *= 0.5\n return scores" ]
[ "0.6627804", "0.6555331", "0.6459971", "0.5922805", "0.5655154", "0.5596297", "0.5581356", "0.5572477", "0.55473816", "0.5538019", "0.5492573", "0.5474258", "0.5454743", "0.54495645", "0.54365635", "0.5428014", "0.54214275", "0.54198104", "0.5418108", "0.5414656", "0.5410967", "0.5396969", "0.5384275", "0.53795254", "0.5376827", "0.5349952", "0.534903", "0.5332553", "0.5322696", "0.5318197", "0.53151476", "0.52902913", "0.5271636", "0.5260204", "0.5260108", "0.52508485", "0.5247232", "0.5243922", "0.5240747", "0.5239355", "0.52388096", "0.5233807", "0.52275246", "0.5221788", "0.5218996", "0.5218726", "0.5211635", "0.52049625", "0.5203669", "0.51912093", "0.51877534", "0.517864", "0.5169333", "0.516902", "0.51686835", "0.5158324", "0.5152412", "0.51491857", "0.51437145", "0.51437145", "0.51437145", "0.51437145", "0.51437145", "0.51437145", "0.5139344", "0.5137981", "0.51378465", "0.5131359", "0.51246274", "0.5114021", "0.5108517", "0.5108517", "0.5108517", "0.5108517", "0.5105084", "0.5101936", "0.5101859", "0.51005816", "0.5099474", "0.50923896", "0.5091486", "0.5086944", "0.5084811", "0.5080646", "0.5078523", "0.507327", "0.5068066", "0.5066557", "0.50658745", "0.5064409", "0.50638884", "0.5054698", "0.50546724", "0.5052691", "0.50503534", "0.50444937", "0.5041456", "0.5041132", "0.504014", "0.50391644" ]
0.66810125
0
Gets the fraction of positive stationary scores.
def positive_scores(self, axis, th_stat=10**(-2)): traj, rank = self._check_run(axis) low_boundary = _np.exp(_np.log(self.params['low_bound'])/2) t = len(traj)-1 stat = 0 # Iteration over the trajectories for i in range(len(traj[0])): # above the low boundary if traj[t][i] > low_boundary: # stationary (change less than stat_th) if abs(_np.log(traj[t][i]) - _np.log(traj[t-1][i])) < th_stat: stat += 1 return stat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def positive_share(self) -> float:\n pos = self.tsdf.pct_change()[1:][self.tsdf.pct_change()[1:].values > 0.0].count()\n tot = self.tsdf.pct_change()[1:].count()\n return float(pos / tot)", "def game_score(self):\n score = self.score.quantize(Decimal('0.001'))\n return score if score > 0 else 0", "def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)", "def get_score(self):\n return float(self._score)", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def get_f_score(self):\n return self.get_g_score() + self.get_h_score()", "def get_percentage_sf_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_sf)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def get_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/(self.votes+self.field.weight)", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def null_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return 0.", "def z_score(self) -> float:\n return float((self.tsdf.pct_change().iloc[-1] - self.tsdf.pct_change().mean()) / self.tsdf.pct_change().std())", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def drift_score(self):\n if self.measured_val is None:\n return 0.0\n\n if self.rebalance_type == self.REBALANCE_TYPE_ABSOLUTE:\n return (self.measured_val - self.configured_val) / self.rebalance_thr\n else:\n return ((self.measured_val - self.configured_val) / self.configured_val) / self.rebalance_thr", "def result(self):\n tp = self.true_positive\n fp = self.false_positive\n return math_ops.div_no_nan(tp, tp + fp)", "def get_g_score(self):\n return self._g_score", "def worst(self) -> float:\n return float(self.tsdf.pct_change().min())", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def get_score(self, solution: np.array) -> float:\n pass", "def mapd(self) -> float:\n a = np.sum(np.abs(self.predicted - self.true))\n b = np.sum(np.abs(self.true))\n return float(a / b)", "def get_average_presence_of_students(students):\n total_presence = 0\n for row in students:\n total_presence += int(row[6])\n average_presence = str(total_presence/len(students))\n if average_presence.find(\".5\") != -1:\n average_presence = float(average_presence) + 1\n else: \n average_presence = float(average_presence)\n\n return int(average_presence)", "def calculate_sentiment(positive, negative):\n denominator = (positive - negative)\n numerator = (positive + negative)\n if numerator == 0:\n return 0\n return 0.268 * (denominator / numerator)", "def denominator(self):\n return 1", "def get_percentage_f_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_f)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def fraction_nonzero(module):\n return count_params_nonzero(module) / count_params(module)", "def get_score(p):\n temp = path[round(p[0], 1), round(p[1], 1)] / a_star\n return (clip(1 - temp, a_min=0, a_max=1) + clip(1 - temp, a_min=0, a_max=1) ** 2) / 2", "def getScore(self, node):\n return self.getGravityScoreFromNode(node) or 0", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def rmsse(self, seasonality: int = 1) -> float:\n q = np.abs(self._error()) / self.mae(self.true[seasonality:], self._naive_prognose(seasonality))\n return float(np.sqrt(np.mean(np.square(q))))", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def dire_score(self):\n return self._get(\"dire_score\")", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def div(self):\n a = self.nums()\n x = LibraryFunctions.per(a, 0.9) - LibraryFunctions.per(a, 0.1)\n return x / 2.58", "def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)", "def train_frac(self):\n return self._train_frac", "def pss(self):\n return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \\\n ((self.table[0, 0] + self.table[1, 0]) * (self.table[0, 1] + self.table[1, 1]))", "def radiant_score(self):\n return self._get(\"radiant_score\")", "def value(self):\n if len(self.fscore_history) == 0:\n return 0\n else:\n return np.mean(self.fscore_history)", "def get_score(self):\r\n return None", "def gpa(self):\n try:\n return sum(self.courses.values()) / len(self.courses)\n except ZeroDivisionError:\n return 0", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def get_score(self):\n rewards, resets = self.runner.get_rewards_resets()\n self.runner.clear_rewards_resets()\n assert rewards.ndim == 1 and resets.ndim == 1, (rewards.ndim, resets.ndim)\n assert rewards.shape[0] == resets.shape[0], (rewards.shape, resets.shape)\n scores = [0]\n for t in reversed(range(rewards.shape[0])):\n if resets[t]:\n scores.append(0)\n scores[-1] += rewards[t]\n return np.mean(scores)", "def get_b(self):\n return ((self.s_pos / self.n_pos) + (self.s_neg / self.n_neg)) / 2", "def smape(self) -> float:\n _temp = np.sum(2 * np.abs(self.predicted - self.true) / (np.abs(self.true) + np.abs(self.predicted)))\n return float(100 / len(self.true) * _temp)", "def get_score(self):\n\n return self._score", "def get_score(self) -> int:\n return self.rstate.score()", "def get_score(self):\n return self.__score", "def compute_f_score(alpha, true_positves, false_positives, false_negatives):\n num = true_positves\n den = np.float64(alpha * (true_positves + false_positives) +\\\n (1 - alpha) * (true_positves + false_negatives))\n with np.errstate(divide='ignore', invalid='ignore'):\n return num / den", "def pct(self):\n\t\treturn self.bottle.pct()", "def result(self):\n tp = self.true_positive.read_value()\n fn = self.false_negative.read_value()\n return math_ops.div_no_nan(tp, tp + fn)", "def value_ret(self) -> float:\n if float(self.tsdf.iloc[0]) == 0.0:\n raise Exception('First data point == 0.0')\n return float(self.tsdf.iloc[-1] / self.tsdf.iloc[0] - 1)", "def get_as_float(self):\n return float(self.numerator / self.denominator)", "def getScore(self):\r\n return self._score", "def samp_frac(self):\n return self._samp_frac", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def getScore(self):\n return self._score", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def _calculate_score(predictions: np.ndarray, correct: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(np.log(predictions + 1) - np.log(correct + 1))) / len(correct))", "def rmsle(self) -> float:\n return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def getScore(self):\n return sum(self.field)", "def total_score(self):\n return _projected_site_total_score(self)", "def get_probability_loss(self):\n return sum(self._loss)/len(self._loss)", "def getScore(self, gameState):\n\n if (self.red):\n return gameState.getScore()\n else:\n return gameState.getScore() * -1", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def possession(self, good):\n return float(self._haves[good])", "def S(self):\n Ae = 1.0/float(len(self.K)) \n return (self.avg_Ao() - Ae)/(1.0 - Ae)", "def matthewscc(self):\n if not self.total_examples:\n return 0.\n\n true_pos = float(self.true_positives)\n false_pos = float(self.false_positives)\n false_neg = float(self.false_negatives)\n true_neg = float(self.true_negatives)\n terms = [(true_pos + false_pos),\n (true_pos + false_neg),\n (true_neg + false_pos),\n (true_neg + false_neg)]\n denom = 1.\n for t in filter(lambda t: t != 0., terms):\n denom *= t\n return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)", "def get_mean(self):\n try:\n return sum(self.speakers.values()) / len(self.speakers)\n except (ZeroDivisionError):\n return 0.0", "def calculate_score(self):\n try:\n self.score = self.__total_comment_score / float(self.num_comments)\n except ZeroDivisionError:\n self.score = float(0)", "def score(cur_ven, ven):\r\n try:\r\n alpha = 750\r\n numerator = (ven[\"rating\"] * 0.75) + (2.5 * (1- eulers**(-ven[\"ratingSignals\"]/144)))\r\n cur_coord = (cur_ven[\"location\"][\"lat\"], cur_ven[\"location\"][\"lng\"])\r\n ven_coord = (ven[\"location\"][\"lat\"], ven[\"location\"][\"lng\"])\r\n denominator = vincenty(cur_coord, ven_coord).meters + alpha\r\n except Exception as e:\r\n print \"{}, \\n has produced an error from {}\".format(ven[\"name\"], e)\r\n return float(\"-inf\")\r\n return numerator / denominator", "def cronbach_alpha(self) -> float:\n itemscores = np.stack([self.true, self.predicted])\n itemvars = itemscores.var(axis=1, ddof=1)\n tscores = itemscores.sum(axis=0)\n nitems = len(itemscores)\n return float(nitems / (nitems - 1.) * (1 - itemvars.sum() / tscores.var(ddof=1)))", "def get_r_score(self):\n return self.r_score", "def calculate(self):\n\n gt = self.ground_truth.flatten().astype(np.int8)\n seg = self.segmentation.flatten().astype(np.int8)\n\n probability_difference = np.absolute(gt - seg).sum()\n probability_joint = (gt * seg).sum()\n\n if probability_joint != 0:\n return probability_difference / (2. * probability_joint)\n else:\n return -1", "def get_value(\n self\n ) -> float:\n\n return self.average", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def ratio_local_cons(self):\n if self.current_energy_consumed == 0.0:\n return 1.0\n else:\n return self.local_cons / self.current_energy_consumed", "def calculate_positron_fraction(\n positron_energy, isotope_energy, isotope_intensity\n):\n return positron_energy / np.sum(isotope_energy * isotope_intensity)", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def acceptance_fraction(self):\n return self.sampler.acceptance_fraction", "def acceptance_fraction(self):\n return float(self._accepted / self.num_iterations)", "def elution_score(self):\n return self.score", "def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def smdape(self) -> float:\n return float(np.median(2.0 * self._ae() / ((np.abs(self.true) + np.abs(self.predicted)) + EPS)))", "def acceptance_rate(self):\n total = float(self.accepted + self.rejected)\n return self.accepted / total", "def get_score(self):\r\n return self.lcp.get_score()", "def compute_fraction( poi_messages, all_messages ):\n import math\n if poi_messages == 0 or all_messages == 0 or math.isnan(float(poi_messages)) or math.isnan(float(all_messages)) :\n return 0.\n fraction = 0.\n fraction = float(poi_messages) / float(all_messages) \n return fraction" ]
[ "0.6627993", "0.66167814", "0.66115385", "0.65545696", "0.64541936", "0.6416723", "0.63477427", "0.63358057", "0.63094145", "0.6304095", "0.628694", "0.62657887", "0.62262195", "0.61896664", "0.6168893", "0.61631715", "0.61393696", "0.6111311", "0.6108707", "0.6084148", "0.60761714", "0.6073406", "0.6073316", "0.60536677", "0.605203", "0.60387444", "0.602564", "0.60252386", "0.5993697", "0.5978535", "0.5958823", "0.5955687", "0.5944775", "0.59443057", "0.59356964", "0.59263736", "0.59263736", "0.59263736", "0.5913756", "0.59115237", "0.5908968", "0.5908968", "0.5908968", "0.5907544", "0.5902421", "0.59011996", "0.58813137", "0.58730155", "0.587213", "0.58668137", "0.58583164", "0.58567435", "0.585564", "0.58503413", "0.5849997", "0.5842663", "0.5840269", "0.5837454", "0.5812161", "0.58035535", "0.5800793", "0.57902026", "0.57829887", "0.5781378", "0.5780975", "0.57726806", "0.5772235", "0.57574", "0.5756556", "0.5754479", "0.5752282", "0.574795", "0.5746013", "0.5742877", "0.5740939", "0.5738643", "0.573851", "0.57369703", "0.573584", "0.57284933", "0.57241964", "0.5723374", "0.5723305", "0.5719764", "0.571459", "0.57143533", "0.5710754", "0.5703422", "0.569309", "0.5688714", "0.5682666", "0.5682548", "0.5677757", "0.5675936", "0.5675464", "0.5672092", "0.566654", "0.56651217", "0.56630164", "0.56630003", "0.56476545" ]
0.0
-1
Compute the extinction area given the last ranking obtained through the run method
def ext_area(self, axis): traj, rank = self._check_run(axis) return _ext_area(axis, rank, self._neighb[1], self._neighb[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _overlap_energy(self, this, that):\n if not this.overlaps(that):\n return 0.0\n\n return min(10.0 / this.rank, 10.0 / that.rank)", "def _ext_area(axis, ranking, row_ind_at_col, col_ind_at_row):\n if axis == 0:\n indexes_lists = _np.array([col_ind_at_row[:], row_ind_at_col[:]])\n else:\n indexes_lists = _np.array([row_ind_at_col[:], col_ind_at_row[:]])\n if len(ranking) != len(indexes_lists[0]):\n print ('Dimensions do not match')\n return\n \n # Counting the already extincted columns. They are the ones whose list of\n # associated row indexes is empty. In that case the extinction counter is\n # increased and a -1 is added to the indexes list.\n ext_nodes = 0\n for c in range(len(indexes_lists[1])):\n if len(indexes_lists[1][c]) == 0:\n ext_nodes += 1\n indexes_lists[1][c] = _np.append(indexes_lists[1][c], -1)\n \n ext_curve = [ext_nodes]\n # Iteration over the ranked nodes to remove, r\n for r in ranking[:-1]:\n # Iter over the connected nodes in the other layer, r\n for c in indexes_lists[0][r]:\n # Removing the ranked node from the neighbours of c\n indexes_lists[1][c] = indexes_lists[1][c][indexes_lists[1][c] != r]\n \n # If the neighbours of c is empty, then c gets extincted\n if len(indexes_lists[1][c]) == 0:\n ext_nodes += 1\n indexes_lists[1][c] = _np.append(indexes_lists[1][c], -1)\n \n ext_curve.append(ext_nodes)\n \n # Returning the area below the extinction curve\n return sum(ext_curve) / float(len(indexes_lists[0]) * len(indexes_lists[1]))", "def _calculate_fitness(self):\n pass", "def lowest_rank_approx(A,e):", "def __rank__(self) -> int:", "def _calc_neighborhood_func(self, curr_it: int, mode: str) -> float:\n return decreasing_rate(\n self.radius_max_,\n self.radius_min_,\n iteration_max=self.max_iterations_,\n iteration=curr_it,\n mode=mode,\n )", "def overall_reduction(self):\n return 84", "def get_absolute_regret(self):\n values = self.stats['return_stats']['episode_totals']\n first_episode = self.get_convergence_episode()\n final_return = self.get_final_return()\n regret = np.sum(final_return - values[:first_episode])\n return regret", "def evaluate(self) -> int:", "def rank(self,others):\n self.__verify(others)\n \n #construct the n evaluation criteria + classes in an extensible way\n #evalFn = [AP,R] in the standard format -> column with as many rows as replicates\n numClasses = others[0].eval['APBCI'].shape[2]\n\n iouType = others[0].params.iouType\n if iouType in [\"segm\",\"bbox\"]:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,3,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,1,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,3,2],axis=(0,2))]\n\n evfAP = lambda c: (lambda AP,R: np.nanmean(AP[:,:,c,0,-1],axis=0))\n for i in range(numClasses):\n evalFunctions.append(evfAP(i))\n\n else:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,0],axis=(0,2))]\n\n numReplicates = others[0].eval['APBCI'].shape[1]\n numInstances = len(others)\n numEvals = len(evalFunctions)\n\n replicateStats = np.zeros((numReplicates,numInstances))\n\n outperformMatrix = np.zeros((numInstances,numInstances,numEvals))\n rankCI = np.zeros((numInstances,3,numEvals))\n ranks = np.zeros((numInstances,numEvals,numReplicates))\n\n for evi,evf in enumerate(evalFunctions):\n for oi,o in enumerate(others):\n replicateStats[:,oi] = evf(o.eval['APBCI'],o.eval['RBCI'])\n\n for oi in range(len(others)):\n for oj in range(len(others)):\n outperformMatrix[oi,oj,evi] = np.mean(replicateStats[:,oi]>replicateStats[:,oj])\n\n for bci in range(numReplicates):\n ranks[:,evi,bci] = stats.rankdata(-replicateStats[bci,:],method='min')\n\n for oi in range(len(others)): \n rankCI[oi,0,evi] = np.mean(ranks[oi,evi,:])\n #use simple percentile method; the bias correction misbehaves \n rankCI[oi,1:,evi] = np.percentile(ranks[oi,evi,:],[100*(self.params.bootstrapAlpha/2),100*(1-self.params.bootstrapAlpha/2)])\n\n return rankCI, outperformMatrix, ranks", "def rank():\n return 0", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def worst_score(self):\r\n pass", "def fitness(self):\n # TO BE DECIDED\n return 1", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def e(self):\n\n ylow = self.e_min\n yhigh = self._e\n\n xlow = 0\n xhigh = self.anneal_max\n\n steep_mult = 8\n\n steepness = steep_mult / (xhigh - xlow)\n offset = (xhigh + xlow) / 2\n midpoint = yhigh - ylow\n\n x = np.clip(self.avg_score, 0, xhigh)\n x = steepness * (x - offset)\n e = ylow + midpoint / (1 + np.exp(x))\n return e", "def completing_evalution(self, *args):\n self.calculate_risk_tol(*args)\n graham_picks = key_ratios(\"GrahamScore\", total_score)\n lynch_picks = key_ratios(\"LynchScore\", total_score)\n return (graham_picks, lynch_picks)", "def evaluate(self, mode=0):\r\n winner = self.determine_winner()\r\n if winner:\r\n return winner * self.WIN_SCORE\r\n\r\n if mode == 1:\r\n return self.centre_priority_evaluate()\r\n elif mode == 2:\r\n return 0.5 * (self.centre_priority_evaluate() + self.piece_evaluate())\r\n else:\r\n return self.piece_evaluate()", "def update_measure(self):\r\n lam = len(self.fit)\r\n idx = np.argsort(self.fit + self.fitre)\r\n ranks = np.argsort(idx).reshape((2, lam))\r\n rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])\r\n\r\n # compute rank change limits using both ranks[0] and ranks[1]\r\n r = np.arange(1, 2 * lam) # 2 * lam - 2 elements\r\n limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0,i] + 1 - (ranks[0,i] > ranks[1,i]))),\r\n self.theta*50) +\r\n Mh.prctile(np.abs(r - (ranks[1,i] + 1 - (ranks[1,i] > ranks[0,i]))),\r\n self.theta*50))\r\n for i in self.idx]\r\n # compute measurement\r\n # max: 1 rankchange in 2*lambda is always fine\r\n s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda\r\n self.noiseS += self.cum * (np.mean(s) - self.noiseS)\r\n return self.noiseS, s", "def calculate_fitness(self, obs):\n feature_units = obs.observation.feature_units\n self.fitness = self.initial_enemy_hit_points + self.calculate_hitpoints(\n feature_units, _PLAYER_SELF) - self.calculate_hitpoints(feature_units, _PLAYER_ENEMY)\n max_fitness = self.max_fitness\n return self.fitness / max_fitness", "def evaluate_fitness(self, pos):\n _, index = self.tree.query(pos)\n return 1 - (self.fitness_function[index // self.resolution][index % self.resolution] - self.min) / (self.max - self.min)", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def get_final_return(self):\n values = self.stats['return_stats']['episode_totals']\n _, y, (_, _) = self._moving_average(values, window=_ROLLING_WINDOW,\n p=_CONFIDENCE_LEVEL)\n return y[-1]", "def calculate_fitness(self, obs):\n feature_units = obs.observation.feature_units\n self.fitness = self.initial_enemy_hit_points + self.calculate_hitpoints(\n feature_units, _PLAYER_SELF) - self.calculate_hitpoints(feature_units, _PLAYER_ENEMY)\n return self.fitness / 950", "def calculate_fitness(self, obs):\n feature_units = obs.observation.feature_units\n total_damage_dealt = self.initial_enemy_hit_points - \\\n self.calculate_hitpoints(feature_units, _PLAYER_ENEMY)\n hit_point_loss = self.initial_self_hit_points - \\\n self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n self.fitness = (total_damage_dealt - hit_point_loss) / \\\n self.initial_self_hit_points + 1\n return self.fitness", "def get_estimated_rank(self):\n # At the moment the rank returned by this function is normally too high for either\n # my machine or the tensorly library to handle, therefore I have made it just return 1 for right now\n\n I = len(self.tdt[0])\n J = len(self.tdt[0][0])\n K = len(self.tdt)\n\n if I == 1 or J == 1 or K == 1:\n return 1\n elif I == J == K == 2:\n return 2\n elif I == J == 3 and K == 2:\n return 3\n elif I == 5 and J == K == 3:\n return 5\n elif I >= 2*J and K == 2:\n return 2*J\n elif 2*J > I > J and K ==2:\n return I\n elif I == J and K == 2:\n return I\n elif I >= J*K:\n return J*K\n elif J*K - J < I < J*K:\n return I\n elif I == J*K - I:\n return I\n else:\n print(I, J, K, \"did not have an exact estimation\")\n return min(I*J, I*K, J*K)", "def calcFitness (self) :\n fitnessArray = [[8, 4, 2, 1],\n [16, 8, 4, 2],\n [32, 16, 8, 4],\n [64, 32, 16, 8]]\n # fitnessArray = [[160, 80, 5, 4],\n # [320, 40, 4, 3],\n # [640, 20, 3, 2],\n # [1280, 10, 2, 1]]\n fitness = 0\n for k in range(4) :\n for i in range (4) :\n fitness += self.grid[k,i] * fitnessArray[k][i]\n return (fitness / 100)", "def percenter(rank, max_rank):\n\treturn 100 * (rank/(max_rank or 1))", "def compute(self):\n return self.elbo_component / self.get_intervals_recorded()", "def get_convergence_episode(self):\n values = self.stats['return_stats']['episode_totals']\n _, y, (y_lower, _) = self._moving_average(\n values, window=_ROLLING_WINDOW, p=_CONFIDENCE_LEVEL)\n # The convergence is established as the first time the average return\n # is above the lower bounds of the final return.\n first_episode = max(np.argmax(y >= y_lower[-1]), 1)\n return first_episode", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def evaluate(self):\r\n\r\n fitness = 0\r\n\r\n for i in range(len(self.representation)):\r\n # Calculates full distance, including from last city\r\n # to first, to terminate the trip\r\n fitness += distance_matrix[self.representation[i - 1]][self.representation[i]]\r\n\r\n return int(fitness)", "def validate_average_rank(self) -> float:\n logger.info('Average rank validation ...')\n\n args = self.args\n self.biencoder.eval()\n distributed_factor = self.distributed_factor\n\n if args.use_dict_input:\n data_iterator = self.get_dict_data_iterator(args.dev_psgs_file, args.dev_queries_file,\n args.dev_qrels_file, args.dev_trec_file,\n args.dev_batch_size,\n shuffle=False, split=False)\n else:\n data_iterator = self.get_data_iterator(args.dev_file, args.dev_batch_size, shuffle=False)\n\n sub_batch_size = args.val_av_rank_bsz\n sim_score_f = ClusterNllLoss.get_similarity_function()\n q_represenations = []\n ctx_represenations = []\n positive_idx_per_question = []\n\n num_hard_negatives = args.val_av_rank_hard_neg\n num_other_negatives = args.val_av_rank_other_neg\n\n log_result_step = args.log_batch_step\n\n for i, samples_batch in enumerate(data_iterator.iterate_data()):\n # samples += 1\n if len(q_represenations) > args.val_av_rank_max_qs / distributed_factor:\n break\n\n biencoder_input = ClusterBertEncoder.create_biencoder_input(samples_batch, self.tensorizer,\n True,\n max_query_length=args.query_sequence_length,\n max_doc_length=args.sequence_length,\n num_hard_negatives=num_hard_negatives,\n num_other_negatives=num_other_negatives,\n shuffle=False)\n total_ctxs = len(ctx_represenations)\n ctxs_ids = biencoder_input.context_ids\n ctxs_segments = biencoder_input.ctx_segments\n bsz = ctxs_ids.size(0)\n\n # split contexts batch into sub batches since it is supposed to be too large to be processed in one batch\n for j, batch_start in enumerate(range(0, bsz, sub_batch_size)):\n\n q_ids, q_segments = (biencoder_input.question_ids, biencoder_input.question_segments) if j == 0 \\\n else (None, None)\n\n if j == 0 and args.n_gpu > 1 and q_ids.size(0) == 1:\n # if we are in DP (but not in DDP) mode, all model input tensors should have batch size >1 or 0,\n # otherwise the other input tensors will be split but only the first split will be called\n continue\n\n ctx_ids_batch = ctxs_ids[batch_start:batch_start + sub_batch_size]\n ctx_seg_batch = ctxs_segments[batch_start:batch_start + sub_batch_size]\n\n q_attn_mask = self.tensorizer.get_attn_mask(q_ids)\n ctx_attn_mask = self.tensorizer.get_attn_mask(ctx_ids_batch)\n with torch.no_grad():\n q_dense, ctx_dense = self.biencoder(q_ids, q_segments, q_attn_mask, ctx_ids_batch, ctx_seg_batch,\n ctx_attn_mask)\n\n if q_dense is not None:\n q_represenations.extend(q_dense.cpu().split(1, dim=0))\n\n ctx_represenations.extend(ctx_dense.cpu().split(1, dim=0))\n\n batch_positive_idxs = biencoder_input.is_positive\n positive_idx_per_question.extend([total_ctxs + v for v in batch_positive_idxs])\n\n if (i + 1) % log_result_step == 0:\n logger.info('Av.rank validation: step %d, computed ctx_vectors %d, q_vectors %d', i,\n len(ctx_represenations), len(q_represenations))\n\n ctx_represenations = torch.cat(ctx_represenations, dim=0)\n q_represenations = torch.cat(q_represenations, dim=0)\n\n logger.info('Av.rank validation: total q_vectors size=%s', q_represenations.size())\n logger.info('Av.rank validation: total ctx_vectors size=%s', ctx_represenations.size())\n\n q_num = q_represenations.size(0)\n assert q_num == len(positive_idx_per_question)\n\n scores = sim_score_f(q_represenations, ctx_represenations)\n values, indices = torch.sort(scores, dim=1, descending=True)\n\n rank = 0\n for i, idx in enumerate(positive_idx_per_question):\n # aggregate the rank of the known gold passage in the sorted results for each question\n gold_idx = (indices[i] == idx).nonzero()\n rank += gold_idx.item()\n\n if distributed_factor > 1:\n # each node calcuated its own rank, exchange the information between node and calculate the \"global\" average rank\n # NOTE: the set of passages is still unique for every node\n eval_stats = all_gather_list([rank, q_num], max_size=1000)\n for i, item in enumerate(eval_stats):\n remote_rank, remote_q_num = item\n if i != args.local_rank:\n rank += remote_rank\n q_num += remote_q_num\n\n av_rank = float(rank / q_num)\n logger.info('Av.rank validation: average rank %s, total questions=%d', av_rank, q_num)\n return av_rank", "def find_max_score_location(grid, shape):", "def eval(self, x, y):\n return 100 * (1 - self._root.eval(x, y) / x.shape[0])", "def _distance_last_evaluations(self):\n if self.X.shape[0] < 2:\n # less than 2 evaluations\n return np.inf\n return np.sqrt(np.sum((self.X[-1, :] - self.X[-2, :]) ** 2))", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def extinction_efficiency(self):\r\n n = np.arange(1, self.n + 1)\r\n return 2*np.sum((2*n+1)*np.real(self.a + self.b))/self.x**2", "def get_max_score(location_list, grid, shape):", "def __call__(self, X, fit, func, ask=None, args=()):\r\n self.evaluations_just_done = 0\r\n if not self.maxevals or self.lam_reeval == 0:\r\n return 1.0\r\n res = self.reeval(X, fit, func, ask, args)\r\n if not len(res):\r\n return 1.0\r\n self.update_measure()\r\n return self.treat()", "def mbrae(self, benchmark: np.ndarray = None) -> float:\n return float(np.mean(self._bounded_relative_error(benchmark)))", "def get_fitness(self) -> float:\n return self.fitness", "def __calculate_estimation(self):\n estimation = 0.0\n for index_cluster in range(0, len(self.__clusters)):\n cluster = self.__clusters[index_cluster]\n index_medoid = self.__current[index_cluster]\n for index_point in cluster:\n estimation += euclidean_distance_square(\n self.__pointer_data[index_point],\n self.__pointer_data[index_medoid],\n )\n\n return estimation", "def gmrae(self, benchmark: np.ndarray = None) -> float:\n return _geometric_mean(np.abs(self._relative_error(benchmark)))", "def ml_result(self, var, e):\n\t\tdist = self.enumerate_ask(var, e)\n\t\treturn max(dist.items(), key=lambda x:x[1])[0]", "def get_total_risk_level(self) -> int:\n origin = Point(0, 0)\n bounded_points = self.grid.get_bounded_points(origin, self.target)\n return sum(self.get_region(point) for point, _ in bounded_points)", "def __calculate_estimation(self):\r\n estimation = 0.0\r\n for index_cluster in range(0, len(self.__clusters)):\r\n cluster = self.__clusters[index_cluster]\r\n index_medoid = self.__current[index_cluster]\r\n for index_point in cluster:\r\n estimation += euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[index_medoid])\r\n\r\n return estimation", "def balancedEE(leaf):\n # determine the exploitation and exploration value for the leaf\n leafExploitation = medianDominationCount(leaf)\n leafExploration = leaf.n / np.product(leaf.ub-leaf.lb)\n # determine the exploitation and exploration rank for each leafs (not efficient, need to outside)\n leafNodes = leaf.leafNodes()\n leafExploitationRank = 0\n leafExplorationRank = 0\n for _leaf in leafNodes:\n _exploitation = medianDominationCount(_leaf)\n _exploration = _leaf.n / np.product(_leaf.ub-_leaf.lb)\n leafExploitationRank += _exploitation < leafExploitation\n leafExplorationRank += _exploration < leafExploration\n # determine the number of total observed samples\n N = np.nanmax([sum([l.n for l in leaf.root.leafNodes()]),1]) \n # calculate the balanced value with adaptive weight on the exploration part\n balancedV = leafExploitationRank + leafExplorationRank\n print(leafExploitationRank, leafExplorationRank)\n return balancedV", "def max_score(self):\n return self.points", "def score(self) -> int:\n return self.function(self.x, self.y)", "def evaluate(self, state):\n _n = len(state)\n _t = np.ceil(self.t_pct*_n)\n\n # Calculate head and tail values\n tail_0 = self.tail(0, state)\n head_1 = self.head(1, state)\n\n # Calculate R(X, T)\n if (tail_0 > _t and head_1 > _t):\n _r = _n\n else:\n _r = 0\n\n # Evaluate function\n fitness = max(tail_0, head_1) + _r\n self.num_evals += 1\n return fitness", "def compute_average(self, error=None):\n\n nbjobs = len(self)\n if not nbjobs:\n return\n max_xsec = max(one.xsec for one in self)\n min_xsec = min(one.xsec for one in self)\n self.axsec = sum([one.axsec for one in self]) / nbjobs\n self.xsec = sum([one.xsec for one in self]) /nbjobs\n self.xerrc = sum([one.xerrc for one in self]) /nbjobs\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs\n if error:\n self.xerrc = error\n self.xerru = error\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = 0#sum([one.nw for one in self])\n self.maxit = 0#len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = sum([one.luminosity for one in self])\n self.ysec_iter = []\n self.yerr_iter = []\n self.th_maxwgt = 0.0\n self.th_nunwgt = 0 \n for result in self:\n self.ysec_iter+=result.ysec_iter\n self.yerr_iter+=result.yerr_iter\n self.yasec_iter += result.yasec_iter\n self.eff_iter += result.eff_iter\n self.maxwgt_iter += result.maxwgt_iter\n\n #check full consistency\n onefail = False\n for one in list(self):\n if one.xsec < (self.xsec - 25* one.xerru):\n if not onefail:\n logger.debug('multi run are inconsistent: %s < %s - 25* %s: assign error %s', one.xsec, self.xsec, one.xerru, error if error else max_xsec-min_xsec)\n onefail = True\n self.remove(one)\n if onefail:\n if error:\n return self.compute_average(error)\n else:\n return self.compute_average((max_xsec-min_xsec)/2.)", "def compute_cost(self, r):\n self.r_max = 1\n return np.exp(1 / (np.power(r, 2) - np.power(self.r_max, 2))) if r < self.r_max else 0", "def upper_bound(self) -> float:\n ...", "def result(self):\r\n # TODO: how about xcurrent?\r\n return self.best.get() + (\r\n self.countevals, self.countiter, self.gp.pheno(self.mean), self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5)", "def fitness(self):\n pass", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def calculate_fitness(self):\n self.fitness = distMat[self.tour[-1]][self.tour[0]]\n for i in range(self.dimension):\n self.fitness += distMat[self.tour[i]][self.tour[i - 1]]", "def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank", "def compute(self, pred: torch.Tensor, target: torch.Tensor) \\\n -> torch.Tensor:\n # If 2-dimensional, select the highest score in each row\n if len(target.size()) == 2:\n target = target.argmax(dim=1)\n\n ranked_scores = torch.argsort(pred, dim=1)[:, -self.top_k:]\n recalled = torch.sum((target.unsqueeze(1) == ranked_scores).float(), dim=1)\n return recalled.mean()", "def max_evidence(self):\n self.A = np.linalg.inv(self.Sn)\n A_eigval = np.linalg.eigvals(self.A)\n gamma = 0\n for i in range(len(A_eigval)):\n gamma += A_eigval[i]/(self.alpha + A_eigval[i])\n new_alpha = gamma/([email protected])\n\n sum = 0\n for i in range(self.n):\n sum +=(self.t[i][email protected]_matrix[i])**2\n new_beta = 1/((1/(self.n-gamma))*sum)\n\n return new_alpha, new_beta", "def calculate_fitness(self, obs):\n feature_units = obs.observation.feature_units\n self.fitness = self.initial_enemy_hit_points + self.calculate_hitpoints(\n feature_units, _PLAYER_SELF) - self.calculate_hitpoints(feature_units, _PLAYER_ENEMY)\n return self.fitness", "def score(self):\n xg, yg = self.goal\n xe, ye = self.empty_node()\n score = len(self.history) + 4*(xg + yg)\n if xg == 1:\n score -= 3\n if ye > 1:\n score += ye - 1\n dx = abs(xe - xg + 1)\n if xg and dx:\n score += dx\n return score", "def get_rmax(self):\n return self.rmax", "def centre_priority_evaluate(self):\r\n evaluation = 0\r\n for player in range(2):\r\n player_sign = player * 2 - 1\r\n for i in range(4):\r\n score = i + 1\r\n evaluation += player_sign * score * count_bits(self.bitboard_king[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n evaluation += player_sign * score * count_bits(self.bitboard_pawns[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n return evaluation", "def _compute_results(self):\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n self.distance = self._compute_distance_betw_consecutive_x()", "def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max", "def betterEvaluationFunction(currentGameState: GameState):\n \"*** YOUR CODE HERE ***\"\n ghostScore : float = 1\n nearGhosts : float = 0\n foodScore : float = 0\n curScore = currentGameState.getScore()\n\n nearestFood = [(0, 0), float('inf')]\n pacPos = currentGameState.getPacmanPosition()\n foodPoss= currentGameState.getFood().asList()\n capsulePoss = currentGameState.getCapsules()\n ghostPoss = currentGameState.getGhostPositions()\n\n for foodPos in foodPoss:\n val = manhattanDistance(foodPos, pacPos)\n if val < nearestFood[1]:\n nearestFood[1] = val\n nearestFood[0] = foodPos\n foodScore = nearestFood[1]\n \n for gpos in ghostPoss:\n val = manhattanDistance(pacPos, gpos)\n if val <= 1:\n nearGhosts += (1-val)\n ghostScore += val\n\n return curScore - (1/ghostScore) + (1/foodScore) - nearGhosts - len(capsulePoss)", "def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))", "def get_number_of_evaluation(self):\n return self.n_eval", "def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n\n if currentGameState.getFood().asList() == []: # Null list catch if there is no food on the board\n return currentGameState.getScore()\n else:\n return max([manhattanDistance(currentGameState.getPacmanPosition(),x) * -1\n for x in currentGameState.getFood().asList()]) + currentGameState.getScore()", "def expected_return(self, n_step):\r\n value = 0\r\n n_experiences = 50\r\n for i in range(n_experiences):\r\n trajectory = self.domain_exploration(n_step)\r\n value += self.compute_j(trajectory)\r\n return value/n_experiences", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def getReward(self):\n# def evaluateFitness(self):\n fitness = 0.0\n distance = self.env.getDistance()\n speed = self.env.getSpeed()\n theta = self.env.getOrientation()\n\n ## implementation 101\n timeBonus = (self.maxTime - self.t)/self.maxTime\n alpha = 1.0/((1+distance)*(1+fabs(theta))*(speed+1));\n if distance < 0.5*self.env.init_distance :\n if(distance < self.env.vicinity_distance and\n abs(theta) < self.env.vicinity_orientation and\n speed < self.env.vicinity_speed ):\n fitness = 1 + timeBonus; \n else:\n fitness = alpha;\n else: fitness = 0\n self.lastFitness = fitness\n if fitness > self.bestFitness : \n self.bestFitness = fitness \n\n return fitness", "def elution_score(self):\n return self.score", "def eval_func(self, game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n \n if game.is_winner(player):\n return float(\"inf\")\n \n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n if game.move_count < ((game.height * game.width)/2):\n return float(self.weights[0] * own_moves - \n self.weights[1] * opp_moves - \n self.weights[2] * __distance_from_center__(game, player))\n else:\n return float(self.weights[3] * own_moves - \n self.weights[4] * opp_moves - \n self.weights[5] * __distance_from_center__(game, player))", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n n = tp + tn + fp + fn\n e1 = (fn * (fn + 2 * tp) / (tp + fn) + fp * (fp + 2 * tn) / (tn + fp)) / n\n e2 = (fp * (fp + 2 * tp) / (tp + fp) + fn * (fn + 2 * tn) / (tn + fn)) / n\n\n return min(e1, e2)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n\n if game.move_count < 15:\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)\n\n delta = 0\n\n moves = game.get_legal_moves()\n initial_moves_count = len(moves)\n indexes = np.random.permutation(initial_moves_count)\n\n for i in range(0, min(4, initial_moves_count)):\n first_level = True\n simulation = game.copy()\n\n while True:\n moves = simulation.get_legal_moves()\n moves_count = len(moves)\n if moves_count == 0:\n if simulation.is_winner(player):\n delta = delta + 1\n else:\n delta = delta - 1\n break\n if first_level:\n selected_move = indexes[i]\n first_level = False\n else:\n selected_move = random.randint(0, moves_count - 1)\n\n simulation.apply_move(moves[selected_move])\n\n return float(own_moves + delta) #float(own_moves - opp_moves + 5 * delta)\n\n #return float(own_moves - opp_moves + free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))", "def mrr(self, ranking):\n return np.mean(ranking)", "def right(self) -> float:\n points = self.get_adjusted_points()\n x_points = [point[0] for point in points]\n return max(x_points)", "def mrae(self, benchmark: np.ndarray = None):\n return float(np.mean(np.abs(self._relative_error(benchmark))))", "def _fitness(individual, X, y):\n yhat = individual.evaluate(X)\n return ((y - yhat) ** 2).sum()", "def update(self,x): #update the estimate of rewards and number of esteps run\n\t\tself.N += 1\n\t\tself.estimate_mean = (1.0-1.0/self.N)*self.estimate_mean + (1.0/self.N)*x #recurence relation for averages", "def calculate(self):\n\n return self._calculate_area(self.ground_truth, self.slice_number)", "def leafScore(self) :\n return 0", "def annealing(self, precision=None, print_steps=True):\n if self.max_temp is None:\n self.get_max_temp()\n state = self.individuals[0]\n self._temp = self.max_temp\n generation = 0\n mins = []\n while self.evaluations < self.eval_limit:\n equilibrium = False\n while not equilibrium:\n solution = self.get_neighbour(state.solution)\n value = self.get_solutions_value(solution)\n fitness = self.get_fitness(value)\n if self.acceptance_probability(state.value, value, self._temp) > np.random.random():\n equilibrium = True\n if self.evaluations % 100 == 0:\n mins += [self.best_individual.value]\n if self.evaluations > self.eval_limit:\n break\n state.solution = solution\n state.value = value\n state.fitness = fitness\n self.update_temperature()\n if state.fitness > self.best_individual.fitness:\n self.best_individual = copy.deepcopy(state)\n if print_steps:\n print(\"Generation \", generation, \"; Evaluations: \", self.evaluations,\n \"; Temperature: \", self._temp, \"; \", state)\n if precision is not None and self.solution_precision(precision):\n break\n generation += 1\n return self.best_individual, mins", "def __call__(self, ret):\n \n ce = None\n for reli in itertools.product([False, True], repeat = len(ret)):\n\n mean, var = self.learner.updated_prediction({ i : 1 if r else -1 for i, r in zip(ret, reli) }, self.learner.candidates, cov_mode = 'diag')\n \n prob_irrel = scipy.stats.norm.cdf(0, mean, np.sqrt(var))\n prob_rel = 1. - prob_irrel\n cur_ce = np.sum(prob_irrel * np.log(prob_irrel + self.eps) + prob_rel * np.log(prob_rel + self.eps))\n \n if (ce is None) or (cur_ce < ce):\n ce = cur_ce\n \n return ce", "def getEll(self):\n\n\t\tellx = fftengine.fftfreq(self.data.shape[0])*2.0*np.pi / self.resolution.to(u.rad).value\n\t\telly = fftengine.rfftfreq(self.data.shape[0])*2.0*np.pi / self.resolution.to(u.rad).value\n\t\treturn np.sqrt(ellx[:,None]**2 + elly[None,:]**2)", "def cronbach_alpha(self) -> float:\n itemscores = np.stack([self.true, self.predicted])\n itemvars = itemscores.var(axis=1, ddof=1)\n tscores = itemscores.sum(axis=0)\n nitems = len(itemscores)\n return float(nitems / (nitems - 1.) * (1 - itemvars.sum() / tscores.var(ddof=1)))", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def get_binary_rf_area(self):\n\n if self.thr is None:\n raise LookupError('To th area, the receptive field should be thresholded!!')\n\n alt_step = abs(np.mean(np.diff(self.altPos).astype(np.float)))\n azi_step = abs(np.mean(np.diff(self.aziPos).astype(np.float)))\n\n return len(self.weights) * alt_step * azi_step", "def rank(self):\n rank = 0\n rho = self.array_form[:]\n n = self.size - 1\n size = n + 1\n psize = int(ifac(n))\n for j in xrange(size - 1):\n rank += rho[j]*psize\n for i in xrange(j + 1, size):\n if rho[i] > rho[j]:\n rho[i] -= 1\n psize //= n\n n -= 1\n return rank", "def computeX0 (self):\n self.m_x0 = np.sum(self.m_arr, axis=0)\n \"\"\" Subtract the point for which f(x) is max \"\"\"\n self.m_x0 -= self.m_arr[self.m_sorted[-1], :]\n \"\"\" Compute average \"\"\"\n self.m_x0 /= self.m_dim\n _debugPrint(\"Centroid: %s\" %self.m_x0)", "def scoreEvaluationFunction(currentGameState, index):\n if currentGameState.isLose():\n return -float(\"inf\")\n \n if currentGameState.isWin():\n return float(\"inf\")\n\n # ghost function\n def ghostScore(gameState):\n if len(gameState.getGhostStates()) == 0:\n return 0\n score = 0\n newGhostPos = gameState.getGhostPositions()\n newGhostStates = gameState.getGhostStates()\n for pacman in gameState.getPacmanPositions():\n for i in range(len(newGhostPos)):\n if newGhostStates[i].scaredTimer > 0:\n score += ((max(4 - euclidDistance(pacman, newGhostPos[i]), 0)) ** 2)\n else:\n score -= ((max(4 - euclidDistance(pacman, newGhostPos[i]), 0)) ** 2)\n if manhattanDistance(pacman, newGhostPos[i]) < 2:\n return -float(\"inf\")\n \n return score\n \n # food function\n def foodScore(gameState):\n score = 0\n for pacman in gameState.getPacmanPositions():\n pacScore = []\n for foodCoord in gameState.getFood().asList():\n pacScore.append(euclidDistance(foodCoord, pacman))\n score = min(pacScore)\n \n score = score * -2\n score -= len(gameState.getFood().asList()) * 15\n return score\n\n\n totalScore = currentGameState.getScore()[0]\n totalScore += ghostScore(currentGameState) \n totalScore += foodScore(currentGameState)\n return totalScore", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n \n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n GhostLocs = currentGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = currentGameState.getCapsules()\n Hueristic = 0.0\n \n if currentGameState.isWin():\n return 10000\n if currentGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)]\n\n if newPos in currentGameState.getCapsules():\n capsule = 100\n else: \n capsule = 0\n \n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 4:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*50\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n return Hueristic", "def get_score(location, grid, shape):", "def Ethanol(self) -> int:\n return self.raw_measure()[1]", "def area(self) -> float:\n raise NotImplementedError", "def est(self):\n self.min_key = 'average'\n if self.iteration >= 3:\n # Poll estimated times from different algorithms\n lin_est_time = self.lin_est_no_outlier()\n lin_no_est_time = self.lin_est_outlier()\n average_est_time = self.avg_est_no_outlier()\n average_no_est_time = self.avg_est_outlier()\n\n # Record discrepancies between the estimated delta t's and the\n # actual delta t.\n if self.iteration > 8:\n self.err_rec()\n\n # Review the choice of algorithm after every 15 jobs and switch\n # to a better one if necessary.\n if not self.override:\n if self.iteration % 5 == 0 and self.iteration > 8:\n self.least_err()\n\n # Return the time associated with the algorithm that offers the\n # highest accuracy.\n if self.min_key is 'average':\n est_time = average_est_time\n if self.min_key is 'average_no':\n est_time = average_no_est_time\n elif self.min_key is 'lin':\n est_time = lin_est_time\n elif self.min_key is 'lin_no':\n est_time = lin_no_est_time\n\n est_time = int(round(est_time))\n else:\n est_time = 0\n\n # Bypasses negative estimates occasionally generated by the linear\n # algorithm and huge numbers occasionally generated by the positive\n # exponential algorithm. 3.2e7 is a little over a year.\n if est_time < 0:\n est_time = self.est_time\n if not self.override:\n self.min_key = 'average'\n else:\n self.est_time = est_time\n\n return est_time" ]
[ "0.5964079", "0.59478605", "0.5941593", "0.59182143", "0.5786603", "0.5778234", "0.57550836", "0.5740219", "0.5736216", "0.5681183", "0.5669513", "0.56531614", "0.56521064", "0.56079173", "0.55998206", "0.5591468", "0.5575538", "0.55385804", "0.5520699", "0.55159914", "0.551502", "0.55144674", "0.55141747", "0.5506697", "0.5498125", "0.54953456", "0.5491692", "0.5471989", "0.54712856", "0.5467187", "0.54644704", "0.5449209", "0.54318404", "0.5425441", "0.5414447", "0.5412684", "0.54107934", "0.54100734", "0.5402503", "0.5381907", "0.5380527", "0.5378784", "0.53712845", "0.5366297", "0.53628856", "0.5362532", "0.53565776", "0.53540426", "0.5349062", "0.53462297", "0.5344771", "0.5343933", "0.5343559", "0.53429276", "0.5337479", "0.5329334", "0.532878", "0.53220147", "0.53214103", "0.5320175", "0.5319016", "0.53178084", "0.5310642", "0.53013146", "0.52985865", "0.52962685", "0.5292655", "0.52854383", "0.5277143", "0.5268681", "0.52644706", "0.526178", "0.52615976", "0.526148", "0.52610755", "0.5259959", "0.52592653", "0.5259028", "0.5255163", "0.525285", "0.52527106", "0.5251454", "0.52497226", "0.5247169", "0.524496", "0.52440226", "0.52420336", "0.5240281", "0.5239418", "0.52391195", "0.5239108", "0.5237286", "0.5237132", "0.5231005", "0.5227787", "0.52260303", "0.5223432", "0.5212448", "0.5210709", "0.520533" ]
0.55559903
17