query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Given a RandomNode returns a DecisionNode
def select_outcome(self, env, random_node): new_state_index, r, done, _ = env.step(random_node.action) return DecisionNode(state=new_state_index, father=random_node, is_final=done), r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_random_node(self):\n if random.randint(0, 100) > self.goal_sample_rate:\n random_node = self.Node(\n random.uniform(self.min_rand, self.max_rand),\n random.uniform(self.min_rand, self.max_rand),\n )\n else: # goal point sampling\n random_node = self.Node(self.end.x, self.end.y)\n return random_node", "def update_decision_node(self, decision_node, random_node, hash_preprocess):\n\n if hash_preprocess(decision_node.state) not in random_node.children.keys():\n decision_node.father = random_node\n random_node.add_children(decision_node, hash_preprocess)\n else:\n decision_node = random_node.children[hash_preprocess(decision_node.state)]\n\n return decision_node", "def select_next_node(self, actual_node):\n #Calculo de la sumatoria total de la pheromona en cada borde\n total_sum = 0.0\n for edge in actual_node.edges:\n total_sum += edge['Pheromone']\n\n #Calculo de la probabilidad de cada borde\n prob = 0\n edge_list = []\n p = []\n for edge in actual_node.edges:\n prob = edge['Pheromone']/total_sum\n edge['Probability'] = prob\n edges_list.append(edge)\n p.append(prob)\n\n #Limpiamos los valores de probabilidad\n for edge in actual_node.edges:\n edge['Probability'] = 0.0\n \"\"\"Devuelve el nodo en función de la probabilidad de las soluciones\"\"\" \n return np.random.choice(edge_list,1,p)[0]['FinalNode']", "def getRandom(self):\n result, node, index = self.node, self.node.next, 1\n\n while node:\n if random.random() < (1.0 / (index+1)):\n result = node\n node = node.next\n index += 1\n return result.val\n\n\n\n # Your Solution object will be instantiated and called as such:\n # obj = Solution(head)\n # param_1 = obj.getRandom()", "def randomnode(state,H,V):\n hit = randomhit(state,H,V)\n node = KFNode(hit,H)\n node.setstate('true',state)\n debug('randomnode x,node ',node)\n return node", "def choose_node(self, choices, scores):\n total = sum(scores)\n cumdist = list(itertools.accumulate(scores)) + [total]\n index = bisect.bisect(cumdist, random.random() * total)\n return choices[min(index, len(choices) - 1)]", "def build_decision_tree():\n\n decision_tree_root = None\n decision_tree_root = DecisionNode(None,None,lambda feature:feature[0]==1)\n decision_tree_root.left = DecisionNode(None,None,None,1)\n decision_tree_root.right = DecisionNode(None,None,lambda feature:feature[3]==1)\n decision_tree_root.right.left = DecisionNode(None,None,lambda feature:feature[1]==0)\n decision_tree_root.right.right = DecisionNode(None,None,lambda feature:feature[2]==1)\n decision_tree_root.right.left.left = DecisionNode(None,None,None,1)\n decision_tree_root.right.left.right = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.left = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.right = DecisionNode(None,None,None,1)\n return decision_tree_root", "def decision():\n return random.random() > 0.5", "def sample_with_and_interaction(node, config, deterministic_prediction):\n # List(Tuple<String Label, String Relation>) - Ex.: <'abundance','increases'>\n node_distribution = config.node_label_distribution_info[node.node_label]\n\n if node.node_label in VARIABLE_TYPE[\"Categorical\"]:\n return get_sample_for_binary_node(node, node_distribution, deterministic_prediction)\n elif node.node_label in VARIABLE_TYPE[\"Continuous\"]:\n return get_sample_for_continuous_node(node, node_distribution, deterministic_prediction)\n else:\n raise Exception(\"invalid node type\")", "def generateChild(problem, goal, node, action):\r\n # get the next state\r\n state = applyAction(node.state, action)\r\n # calculate hueristic cost\r\n estimateCost = evaluateCurrentPosition(problem, state)\r\n return Node(estimateCost, 0, state, node, action)", "def generate_new_node(self, parent, rand_node):\n dist = np.linalg.norm(parent.state - rand_node.state)\n if dist < self.Delta: # In case rand_node is very close to parent\n new_state = rand_node.state\n else:\n new_state = parent.state + (rand_node.state - parent.state) / dist * self.Delta\n new_node = Node(new_state)\n return new_node", "def __init__(self, max_depth=None, criterion='gini', random_state=0):\n print(\"Initialize the model Decision Tree Classifier... \")\n self.random_state = random_state\n self.model = tree.DecisionTreeClassifier(max_depth=max_depth, criterion=criterion, random_state=random_state)", "def _new_learning_node(self, initial_stats=None, parent_node=None,\n is_active=True):\n if initial_stats is None:\n initial_stats = {}\n\n if is_active:\n return AdaActiveLearningNodeRegressor(initial_stats, parent_node,\n random_state=self.random_state)\n else:\n prediction_option = self.leaf_prediction\n if prediction_option == self._TARGET_MEAN:\n return InactiveLearningNodeMean\n else:\n return InactiveLearningNodePerceptron", "def random_neighbor(node, topology):\n return np.random.choice(neighbors(node=node, topology=topology))", "def getRandom(self):\n ans = self.head\n index = 1\n node = ans.next\n while node:\n value = random.randrange(0, index + 1)\n if value == 0:\n ans = node\n index += 1\n node = node.next\n return ans.val", "def create_node(self, game_state, move=None, parent=None):\n # Pass the game state to the neural network to both evaluate the \n # how good the board position is and get the prior probability \n # distribution over possible next moves (ie the predicted distribution \n # of visit counts).\n move_priors, value = self.network.predict(game_state)\n \n # If a root node is being created, then add some dirichlet noise\n # to the prior probabilities to help exploration.\n if parent == None:\n dirichlet_noise = np.random.dirichlet([self.alpha]*96)\n for (i, move) in enumerate(move_priors.keys()):\n move_priors[move] = (move_priors[move] + dirichlet_noise[i])/2\n \n # Create the node for the given game state, with the predicted value\n # and priors, and attach it to the tree.\n new_node = TreeNode(game_state, value, move_priors, parent, move)\n if parent is not None:\n parent.add_child(move, new_node)\n return new_node", "def goalTest(node, goal):\r\n if node.state == goal:\r\n return node", "def get_decision_tree(log, net, initial_marking, final_marking, decision_point=None, attributes=None, parameters=None):\n from sklearn import tree\n\n if parameters is None:\n parameters = {}\n log = log_converter.apply(log, parameters=parameters)\n X, y, targets = apply(log, net, initial_marking, final_marking, decision_point=decision_point,\n attributes=attributes, parameters=parameters)\n dt = tree.DecisionTreeClassifier()\n dt = dt.fit(X, y)\n return dt, list(X.columns.values.tolist()), targets", "def getRandom(self) -> int:\n\n return random.choice(self.nodes).val", "def decideOnGoal(self):\r\n\r\n\t\tself.goalNode = self.simulationHandle.getMap().getRandomNode()", "def _get_dataset_node(self, nodes):\n if not nodes:\n raise WNoNodesFound()\n return random.choice(nodes)", "def test_randomly_select_node_2(self):\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = {c}\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2)\n\n self.assertNotIn(c, node_counter)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2)", "def random( cls,\n tree,\n sparsity,\n hidden_per_branch = 3,\n weight_range = [-1, 1],\n joint_range = [0, 1] ):\n num_branches = tree.get_num_branches()\n num_sensors = tree.get_num_leaves()\n num_motors = num_branches\n num_hidden = hidden_per_branch * num_branches\n num_neurons = num_hidden + num_motors + num_sensors\n\n m = num_neurons\n n = num_neurons - num_sensors\n\n weight_matrix = np.random.random( size = ( m, n ) ) * ( weight_range[1] - weight_range[0] ) + weight_range[0]\n expression_matrix = np.random.choice( [0, 1], size = ( m, n ), p = [ sparsity, 1 - sparsity ] )\n joint_vector = np.random.random( size = num_motors ) * ( joint_range[1] - joint_range[0] ) + joint_range[0]\n\n return cls( tree, weight_matrix, expression_matrix, joint_vector )", "def decision_tree_clf():\n\tclf_entropy = DecisionTreeClassifier(\n\t\tcriterion = \"entropy\", random_state = seed,\n\t\tmax_depth = 3, min_samples_leaf = 5\n\t\t)\n\treturn clf_entropy", "def puct_choice(node):\n return np.argmax(puct_distribution(node))", "def getRandom(self) -> int:\n R = self.head; k = 1\n node = self.head.next\n i = 1\n\n while(node):\n j = random.randint(1, i+1)\n if j <= k:\n R = node\n\n node = node.next\n i += 1\n\n return R.val", "def test_randomly_select_node_1(self):\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = set()\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 6), node_counter[b] / self.trials, places=2)\n\n self.assertIn(c, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[c] / self.trials, places=2)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[d] / self.trials, places=2)", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def getRondomNode_1(t):\n treelist = []\n traverse(t, treelist)\n random_num = random.randint(0, len(treelist) - 1)\n return treelist[random_num]", "def dec_model(params):\n\n if (params['random']):\n print(\"Random Decision Tree Parameters.\")\n params['criterion'] = random.choice([\"gini\", \"entropy\"])\n params['splitter'] = random.choice([\"best\", \"random\"])\n params['max_features'] = random.choice(['auto', 'sqrt', 'log2', random.randrange(50, 1000, step=25), None])\n params['max_depth'] = random.choice([None, random.randrange(5, 1000, step=5)])\n params['min_samples_split'] = random.choice([2, random.randrange(1, 50, step=1)])\n params['max_leaf_nodes'] = random.choice([None, random.randrange(2, 50, step=1)])\n params['min_samples_leaf'] = random.choice([1, random.randrange(5, 100, step=5)])\n print(params)\n \n model = tree.DecisionTreeClassifier(\n criterion=params['criterion'],\n splitter=params['splitter'],\n max_features=params['max_features'],\n max_depth=params['max_depth'],\n min_samples_split=params['min_samples_split'],\n max_leaf_nodes=params['max_leaf_nodes'],\n min_samples_leaf=params['min_samples_leaf']\n )\n\n return model" ]
[ "0.6875797", "0.6506942", "0.636228", "0.632881", "0.6185813", "0.6153899", "0.6089393", "0.6014047", "0.5892125", "0.5744766", "0.5736307", "0.56658673", "0.56345004", "0.562233", "0.5617714", "0.56049466", "0.5597951", "0.5591621", "0.5579573", "0.5573688", "0.557171", "0.5568989", "0.5539332", "0.5529664", "0.5520868", "0.55027145", "0.5478095", "0.54292387", "0.54282194", "0.5425857" ]
0.7110679
0
At the end of the simulations returns the most visited action
def best_action(self): number_of_visits_children = [node.visits for node in self.root.children.values()] index_best_action = np.argmax(number_of_visits_children) a = list(self.root.children.values())[index_best_action].action return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_action(self) -> int:\n # simulation loop\n for i in range(self.iterations):\n self.__simulate(self.root, self.iterations)\n\n # action choice\n max_q = 0\n best_action = 0\n for action in actions:\n new_node = self.root.children[action]\n value = new_node.Q\n if value > max_q:\n max_q = value\n best_action = action\n return best_action", "def _best_action(self, state):\n actions_rewards = list(self.Q[state].items())\n return max(actions_rewards, key=lambda x: x[1])[0]", "def get_action(self, state):\n if np.random.rand() <= self.epsilon:\n action_idx = random.randrange(self.action_size)\n else:\n \n # Use all traces for RNN\n #q = self.model.predict(state) # 1x8x3\n #action_idx = np.argmax(q[0][-1])\n\n # Only use last trace for RNN\n q = self.model.predict(state) # 1x3\n action_idx = np.argmax(q)\n return action_idx", "def chooseAction(self, gameState):\n probabilities = self.assignProbablities(gameState)\n #print probabilities\n prob, bestProbabilityAction = max(probabilities)\n return bestProbabilityAction", "def step(self):\n # No need for epsilon as exploration is controlled by c\n\n\n # Step in time (choose an action) \n n = np.sum(self.action_count)\n if n > 0: # Condition to evaluate on first iteration, because np.log(0) = -inf\n mask = self.action_count > 0 # Mask to avoid division by 0 on the formula for upper confidence uncertainties\n uncertainties = np.zeros(self.action_count.shape)\n uncertainties[mask] = self.c*np.sqrt(np.log(n)/self.action_count[mask])\n uncertainties[~mask] = float('inf') # We increment uncertainty of actions we've never chosen\n else:\n uncertainties = np.array(np.repeat(float('inf'), len(self.action_count))) \n optimals = self.Q + uncertainties # Uncertainty rises the value of less chosen actions, hence promoting exploration\n \n max_actions = np.argwhere(optimals == np.amax(optimals)).flatten() # greedy actions (max value)\n if len(max_actions) == 1:\n self.last_action = max_actions\n else:\n self.last_action = np.random.choice(max_actions)\n\n return self.last_action", "def get_highest_value_action(self, state):\n a = self.sess.run(self.network.maxOutputNode, feed_dict={self.network.inputs: [state]})\n return a[0]", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n legalActions = gameState.getLegalActions(0)\n legalActions.remove('Stop')\n \n besctaction = Directions.STOP\n score = float(\"-inf\")\n for action in legalActions:\n child = gameState.generateSuccessor(0, action)\n newscore = max(score, expectmax_value(self, child, self.depth, 1))\n if newscore > score:\n bestaction = action\n score = newscore\n \n return bestaction", "def bestAction(self):\n get_q = self.getQFunction()\n maxq = -5000\n best_actions = []\n for (state, action), q in get_q.items():\n if q > maxq:\n maxq = q\n best_actions = [action]\n elif q == maxq:\n best_actions.append(action)\n return self.tuple_to_dictionary(random.choice(best_actions))", "def get_action(self,state):\n \n q_values = self.__network.predict(state[None])[0]\n \n ###YOUR CODE\n if np.random.rand()<self.epsilon:\n return np.random.choice(self.n_actions)\n return np.argmax(q_values)", "def getAction(self, gameState):\n\n # Find the number of agents. This is (1 + number of ghosts)\n numberOfAgents = gameState.getNumAgents()\n\n # Starting from depth 0\n currDepth = 0\n\n # Agent property that will be updated every time we find a valid optimal action\n self.nextActionToTake = Directions.STOP\n\n\n # Start from MAX turn\n self.max_value(gameState, currDepth, self.depth, numberOfAgents)\n\n # print \"totalNodesExpandedTillNow = \", self.totalNodesExpandedTillNow\n\n # Return the property which contains the best action till now.\n return self.nextActionToTake", "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n maxvalue = -100000000\n bestaction = None\n for action in self.mdp.getPossibleActions(state):\n valueforthisaction = self.getQValue(state, action) # is this right? \n if valueforthisaction > maxvalue:\n bestaction = action\n maxvalue = valueforthisaction\n return bestaction", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n legalActions = gameState.getLegalActions(0)\n legalActions.remove('Stop')\n alpha = float(\"-inf\")\n beta = float(\"inf\")\n \n besctaction = Directions.STOP\n score = float(\"-inf\")\n for action in legalActions:\n child = gameState.generateSuccessor(0, action)\n newscore = max(score, alphabeta_value(self, child, self.depth, alpha, beta, 1))\n if newscore > score:\n bestaction = action\n score = newscore\n \n return bestaction", "def chooseAction(self, epsilon, state):\n if random.uniform(0, 1) < epsilon:\n return random.randrange(9)\n\n cur_best_val = -float('inf')\n cur_best_action = 0\n\n data = env.getAllNextStates(state)\n\n with torch.no_grad():\n for action, next_state, done in data:\n if next_state != state:\n value = self.NN(self.RBF[next_state]).item() if not done else 0\n if value > cur_best_val:\n cur_best_val = value\n cur_best_action = action\n #print(data)\n return cur_best_action", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n legalActions = gameState.getLegalActions(0)\n legalActions.remove('Stop')\n \n besctaction = Directions.STOP\n score = float(\"-inf\")\n for action in legalActions:\n child = gameState.generateSuccessor(0, action)\n newscore = max(score, minimax_value(self, child, self.depth, 1))\n if newscore > score:\n bestaction = action\n score = newscore\n \n return bestaction", "def getAction(self, gameState):\n result = float(\"-inf\")\n action = 1\n for agentState in gameState.getLegalActions(0):\n valorminimax = self.expectiMaxDecision(1, 0, gameState.generateSuccessor(0, agentState))\n if valorminimax > result:\n result = valorminimax\n action = agentState\n return action", "def get_action(self, state):\n\n best_action = None\n best_value = -np.inf\n actions = [0, 1, 2, 3] # left, down, right, up\n for a in actions:\n row = state // self.edge\n col = state % self.edge\n # print (row, col)\n if a == 0:\n col = max(col-1, 0)\n elif a == 1:\n row = min(row+1, self.edge-1)\n elif a == 2:\n col = min(col+1, self.edge-1)\n elif a == 3:\n row = max(row-1, 0)\n # print (row, col)\n\n new_state = row * self.edge + col\n # print (new_state)\n if (self.values[new_state] > best_value or new_state == self.num_states-1): #goal\n best_value = 1.0 if new_state == self.num_states-1 else self.values[new_state]\n best_action = a\n return best_action", "def getPolicy(self, state):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n possibleActions = self.mdp.getPossibleActions(state)\n #checking for terminal state (no possible actions)\n if len(possibleActions) is 0: \n return None\n \n #attempt at using the Counter\n eValsActions = util.Counter()\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n return eValsActions.argMax()\n \n #fail attempt using lists :(\n \"\"\"\n #list to hold the expected value of the actions\n eValsActions = []\n #iterate through all actions and their transtion states\n for action in possibleActions:\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #expected value of reward with discount * the value of the transitions\n eValsActions[action] += probability * (self.mdp.getReward( state, action, transitionState) + self.discount * self.values[transitionState])\n \n #now iterate through and find the action with the best value\n #(that will be the best action)\n maxVal = -float(\"inf\")\n bestAction = None\n for action in possibleActions:\n if eValsActions[action] > maxVal:\n maxVal = eValsAction[action]\n bestAction = action\n \"\"\"\n return action\n # END OUR CODE", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n for idx,a in enumerate(actions):\n baby = self.getSuccessor(gameState, a)\n qsum = [self.evaluate(baby, action) for action in baby.getLegalActions(self.index)]\n values[idx] += min(qsum) \n\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n\n return random.choice(bestActions)", "def bestAction(self, state):\n action = self.q_network.chooseBestAction(state)\n V = max(self.q_network.qValues(state))\n return action, V", "def getAction(self, gameState):\n # Find the number of agents. This is (1 + number of ghosts)\n numberOfAgents=gameState.getNumAgents()\n\n # Starting from depth 0\n currDepth=0\n\n # Agent property that will be updated every time we find a valid optimal action\n self.nextActionToTake=Directions.STOP\n alpha = -99999999999\n beta = 99999999999\n\n # Start from MAX turn\n self.max_value(gameState, currDepth, self.depth, numberOfAgents , alpha, beta)\n\n # print \"totalNodesExpandedTillNow = \", self.totalNodesExpandedTillNow\n\n # Return the property which contains the best action till now.\n return self.nextActionToTake", "def getAction(self, gameState):\n result = float(\"-inf\")\n action = 1\n for agentState in gameState.getLegalActions(0):\n valorminimax = self.miniMaxDecision(1, 0, gameState.generateSuccessor(0, agentState))\n if valorminimax > result:\n result = valorminimax\n action = agentState\n return action", "def getAction(self, gameState):\n bestVal = -INF\n bestAction = None\n searchDepth = self.depth * gameState.getNumAgents()\n for action in gameState.getLegalActions(0):\n state = gameState.generateSuccessor(0, action)\n newVal = self.expectimax(state, 1, searchDepth - 1)\n if newVal > bestVal:\n bestVal = newVal\n bestAction = action\n return bestAction", "def select_final(self):\n best_qsa_star = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n if qsa_star > best_qsa_star:\n best_qsa_star = qsa_star\n best_node = c\n return best_node.action", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n\n # minimax-decision\n self.NumAgents = gameState.getNumAgents()\n v = -10000\n action = ''\n # The following for will evaluate each action starting from the root , and finds the one that will lead\n # to a maximum possible score\n for a in self.Actions(gameState, self.index):\n score = self.MinValue(self.Result(gameState, a, self.index), self.index + 1, self.depth)\n if score > v:\n action = a\n v = score\n return action\n\n util.raiseNotDefined()", "def max_diffs(state):\n # your code here\n return best_action(state, pig_actions, Q_pig, win_diff)", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n\n bestchoice = \"Stop\"\n value = float(\"-inf\")\n i = 0\n actlist = gameState.getLegalActions(0)\n while i < len(actlist):\n action = actlist[i]\n arg1 = self.minValue(gameState.generateSuccessor(0, action), 1, self.depth)\n maxVal = max(value, arg1)\n if value < maxVal:\n value = maxVal\n bestchoice = action\n i = i + 1\n return bestchoice", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n \n #print \"\\n\\n\\n\"\n \n def MaxValue(gameState, currentDepth, agentNumber):\n if currentDepth is self.depth or gameState.isWin() or gameState.isLose():\n #print \"\\t\", self.evaluationFunction(gameState)\n return (self.evaluationFunction(gameState), Directions.STOP)\n \n largestValue = float(\"-inf\")\n bestAction = Directions.STOP\n legalActions = gameState.getLegalActions(agentNumber)\n legalActions.sort()\n for action in legalActions:\n if action is Directions.STOP:\n continue\n successor = gameState.generateSuccessor(agentNumber, action)\n nextAgentNumber = (agentNumber + 1) % gameState.getNumAgents()\n if nextAgentNumber is 0:\n successorValue= MaxValue(successor, currentDepth + 1, nextAgentNumber)[0]\n else:\n successorValue = ExpValue(successor, currentDepth, (agentNumber + 1) % gameState.getNumAgents())[0]\n if(successorValue > largestValue):\n largestValue = successorValue\n bestAction = action\n return (largestValue, bestAction)\n \n def ExpValue(gameState, currentDepth, agentNumber):\n if currentDepth is self.depth or gameState.isWin() or gameState.isLose():\n #print \"\\t\", self.evaluationFunction(gameState)\n return (self.evaluationFunction(gameState), Directions.STOP)\n \n totalValue = 0\n legalActions = gameState.getLegalActions(agentNumber)\n legalActions.sort()\n for action in legalActions:\n successor = gameState.generateSuccessor(agentNumber, action)\n nextAgentNumber = (agentNumber + 1) % gameState.getNumAgents()\n if nextAgentNumber is 0:\n successorValue = MaxValue(successor, currentDepth + 1, nextAgentNumber)[0]\n else:\n successorValue = ExpValue(successor, currentDepth, nextAgentNumber)[0]\n totalValue += successorValue\n return (totalValue/len(legalActions), Directions.STOP)\n \n result= MaxValue(gameState, 0, 0)\n resultActionToTake =result[1]\n \n #print gameState.getLegalActions(0)\n #print 'AlphaBeta value for depth ', self.depth,' ',result[0]\n import time\n\n #print \"SCORE picked \", result[0]\n #time.sleep(1)\n #print 'This should always be true... ', resultActionToTake in gameState.getLegalActions(0)\n return resultActionToTake", "def chooseAction(self, gameState):\n\n # Track opponents position\n self.trackGhosts(gameState)\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n\n # You can profile your evaluation time by uncommenting these lines\n values = [self.evaluate(gameState, a) for a in actions]\n\n # Trick to avoid getting stuck in the same position for too long\n okValues = []\n okActions = []\n for i, a in enumerate(actions):\n newPos = self.getSuccessor(gameState, a).getAgentState(self.index).getPosition()\n # If any newPos is in lastPositions more than twice, we remove it from the list\n if self.lastPositions.count(newPos) <= 2:\n okValues.append(values[i])\n okActions.append(a)\n\n # Choose best action from list of actions\n try:\n maxValue = max(okValues)\n bestActions = [a for a, v in zip(okActions, okValues) if v == maxValue]\n except ValueError:\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n chosenAction = random.choice(bestActions)\n\n print \"BEST ACTION:\", chosenAction, maxValue, \"\\n--------------------------------------------------------------\\n\"\n # if chosenAction == Directions.STOP:\n # raw_input(\"Stopped! Go check what happened\")\n\n # Save current chosen position in lastPositions and advance the index\n self.lastPositions[self.lastPositionsIdx % self.positionsStored] = self.getSuccessor(gameState, chosenAction).getAgentState(self.index).getPosition()\n self.lastPositionsIdx += 1\n\n # If we are eating any ghost, update our future belief about it\n self.updateEatenOpponents2(gameState, chosenAction)\n # Update food eaten by opponents\n self.babies = self.getFoodYouAreDefending(gameState).asList()\n # Update self.isPacman\n self.isPacman = [self.getSuccessor(gameState, chosenAction).getAgentState(i).isPacman for i in range(gameState.getNumAgents())]\n\n return chosenAction" ]
[ "0.70127183", "0.687149", "0.6863603", "0.67862076", "0.6771671", "0.6724781", "0.670188", "0.66465", "0.66390723", "0.66019285", "0.6597186", "0.6585305", "0.6570425", "0.6556634", "0.65416694", "0.65311867", "0.6512796", "0.6509938", "0.6498248", "0.64976394", "0.6495535", "0.6488972", "0.6484267", "0.64834446", "0.64761853", "0.64644635", "0.6458198", "0.6450924", "0.64367723", "0.6431182" ]
0.7107038
0
Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.
def from_expression( cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None ) -> Step: ctes = ctes or {} expression = expression.unnest() with_ = expression.args.get("with") # CTEs break the mold of scope and introduce themselves to all in the context. if with_: ctes = ctes.copy() for cte in with_.expressions: step = Step.from_expression(cte.this, ctes) step.name = cte.alias ctes[step.name] = step # type: ignore from_ = expression.args.get("from") if isinstance(expression, exp.Select) and from_: step = Scan.from_expression(from_.this, ctes) elif isinstance(expression, exp.Union): step = SetOperation.from_expression(expression, ctes) else: step = Scan() joins = expression.args.get("joins") if joins: join = Join.from_joins(joins, ctes) join.name = step.name join.add_dependency(step) step = join projections = [] # final selects in this chain of steps representing a select operands = {} # intermediate computations of agg funcs eg x + 1 in SUM(x + 1) aggregations = set() next_operand_name = name_sequence("_a_") def extract_agg_operands(expression): agg_funcs = tuple(expression.find_all(exp.AggFunc)) if agg_funcs: aggregations.add(expression) for agg in agg_funcs: for operand in agg.unnest_operands(): if isinstance(operand, exp.Column): continue if operand not in operands: operands[operand] = next_operand_name() operand.replace(exp.column(operands[operand], quoted=True)) return bool(agg_funcs) def set_ops_and_aggs(step): step.operands = tuple(alias(operand, alias_) for operand, alias_ in operands.items()) step.aggregations = list(aggregations) for e in expression.expressions: if e.find(exp.AggFunc): projections.append(exp.column(e.alias_or_name, step.name, quoted=True)) extract_agg_operands(e) else: projections.append(e) where = expression.args.get("where") if where: step.condition = where.this group = expression.args.get("group") if group or aggregations: aggregate = Aggregate() aggregate.source = step.name aggregate.name = step.name having = expression.args.get("having") if having: if extract_agg_operands(exp.alias_(having.this, "_h", quoted=True)): aggregate.condition = exp.column("_h", step.name, quoted=True) else: aggregate.condition = having.this set_ops_and_aggs(aggregate) # give aggregates names and replace projections with references to them aggregate.group = { f"_g{i}": e for i, e in enumerate(group.expressions if group else []) } intermediate: t.Dict[str | exp.Expression, str] = {} for k, v in aggregate.group.items(): intermediate[v] = k if isinstance(v, exp.Column): intermediate[v.name] = k for projection in projections: for node, *_ in projection.walk(): name = intermediate.get(node) if name: node.replace(exp.column(name, step.name)) if aggregate.condition: for node, *_ in aggregate.condition.walk(): name = intermediate.get(node) or intermediate.get(node.name) if name: node.replace(exp.column(name, step.name)) aggregate.add_dependency(step) step = aggregate order = expression.args.get("order") if order: if isinstance(step, Aggregate): for i, ordered in enumerate(order.expressions): if extract_agg_operands(exp.alias_(ordered.this, f"_o_{i}", quoted=True)): ordered.this.replace(exp.column(f"_o_{i}", step.name, quoted=True)) set_ops_and_aggs(aggregate) sort = Sort() sort.name = step.name sort.key = order.expressions sort.add_dependency(step) step = sort step.projections = projections if isinstance(expression, exp.Select) and expression.args.get("distinct"): distinct = Aggregate() distinct.source = step.name distinct.name = step.name distinct.group = { e.alias_or_name: exp.column(col=e.alias_or_name, table=step.name) for e in projections or expression.expressions } distinct.add_dependency(step) step = distinct limit = expression.args.get("limit") if limit: step.limit = int(limit.text("expression")) return step
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plan_step_to_expr(atom: clingo.Symbol) -> str:\n # The predicate and its arguments are double-quoted. Simply extract them\n matches = re.findall(r'\\\"(.+?)\\\"', str(atom))\n predicate = matches[0]\n args = f'({\",\".join(matches[1:])})' if matches[1:] else ''\n return predicate + args", "def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()", "def _make_execution_plan(\n projection: _Names,\n relations: List[str],\n condition: Optional[_Condition],\n db: tsdb.Database) -> Tuple:\n resolve_qname = _make_qname_resolver(db, relations)\n\n if projection == ['*']:\n projection = _project_all(relations, db)\n else:\n projection = [resolve_qname(name)[0] for name in projection]\n\n cond_resolved: Optional[_Condition] = None\n cond_fields: _Names = []\n if condition:\n cond_resolved, cond_fields = _process_condition_fields(\n condition, resolve_qname)\n\n joins = _plan_joins(projection, cond_fields, relations, db)\n\n return projection, joins, cond_resolved", "def _generate_sql_parts(self, node,i=0,colNames=None,sql=None):\n\t\treferencesPersonFact = False\n\t\tif i == 0:\n\t\t\tsql=[]\n\t\t\tcolNames=[]\n\t\t\t# print('\\nSELECT *\\nFROM {}'.format(node))\n\t\tfor edge in self.DiG.out_edges(node):\n\t\t\t# print('\\tedge: {}->{} {}'.format(*edge,self.DiG.get_edge_data(*edge)))\n\t\t\tcolNames.append('{}.{}'.format(edge[1],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\t# print('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tsql.append('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tself._generate_sql_parts(edge[1],i+1,colNames,sql)\n\t\t\t# if 'dbo.PersonFact' in edge[0] or 'dbo.PersonFact' in edge[1]:\n\t\t\t\t# referencesPersonFact = True\n\t\t# print('_generate_sql_parts')\n\t\t# print(colNames)\n\t\t# if referencesPersonFact and 'CommunityMart.dbo.PersonFact.PatientID' not in colNames:\n\t\t\t# colNames.append('CommunityMart.dbo.PersonFact.PatientID')\n\t\tnet_new_colNames = []\n\t\t# remove colNames of already in leaf table\n\t\tfor colName in colNames:\n\t\t\tif node not in colName:\n\t\t\t\tnet_new_colNames.append(colName)\n\t\treturn net_new_colNames,sql", "def evaluate(compiled_expression):", "def _parse_and_crawl_outer(sql):\n linter = Linter(dialect=\"ansi\")\n parsed = linter.parse_string(sql)\n # Create a crawler from the root segment.\n crawler = SelectCrawler.from_root(parsed.tree, linter.dialect)\n # Analyse the segment.\n return crawler, linter", "def sql(self):\n rule_specs = []\n\n patterns = {}\n pattern_specs = []\n\n # It's safe to unpack `self.get_rules` because it can only\n # return A) an empty list or B) a list of two-tuples with two elements in\n # them (the path and the rule for each query directive).\n for path, rule in self.rules:\n # Don't parse if this is not a properly registered rule type.\n if not self.is_rule(rule):\n pass\n rule_type = rule['_rule_type']\n sql_tuple = self.sql_generators[rule_type](path, rule)\n if sql_tuple is not None:\n rule_specs.append(sql_tuple)\n\n # The check on 'pattern' here allows us to apply a pattern filter on top of others\n if 'pattern' in rule:\n match_multiple = (rule['_rule_type'] == 'containment_multiple')\n for pattern in self.split_search_pattern(rule['pattern']):\n sql_tuple = FilterTree.text_similarity_filter(path, pattern, match_multiple)\n # add to the list of rules generated for this pattern (one per field)\n patterns.setdefault(pattern, []).append(sql_tuple)\n\n rule_string = ' AND '.join([rule[0] for rule in rule_specs])\n\n pattern_rules = patterns.values()\n pattern_strings = []\n\n # check if any of the fields for this string pattern match\n for rule_list in pattern_rules:\n pattern_strings.append(' OR '.join([rule[0] for rule in rule_list]))\n pattern_specs += rule_list\n\n # check that record has a match for all of the string patterns in some field\n pattern_string = '(' + ') AND ('.join(pattern_strings) + ')' if pattern_strings else ''\n\n if rule_string != '' and pattern_string != '':\n filter_string = '(' + (' AND ('.join([rule_string, pattern_string])) + ')' + ')'\n elif rule_string != '' or pattern_string != '':\n filter_string = '(' + ''.join([rule_string, pattern_string]) + ')'\n else:\n filter_string = ''\n\n # flatten the rule_paths\n rule_paths_first = ([rule[1] for rule in rule_specs] +\n [rule[1] for rule in pattern_specs])\n rule_paths = [item for sublist in rule_paths_first\n for item in sublist]\n\n outcome = (filter_string, tuple(rule_paths))\n return outcome", "def translate_call_to_sql(self, query, clause, state):\n node1 = clause[0]\n rel = clause[1]\n if rel.labels is None:\n return\n\n # load here so we get the uniquified name registered with the connection:\n self.load()\n old_graph = node1._graph_table\n old_graph_alias = node1._graph_alias\n new_graph = self.get_name()\n # create a new alias (which is fine given we have a unique table name),\n # this will transparently handle qualified graph table names:\n new_graph_alias = state.get_table_aliases(new_graph, new_graph + '_c')[0]\n node1._graph_table = new_graph\n node1._graph_alias = new_graph_alias\n # TO DO: support this in query.py:\n #state.unregister_table_alias(old_graph, old_graph_alias)\n state.register_table_alias(new_graph, new_graph_alias)\n # prevent the generation of a label restriction based on the virtual graph name:\n rel.labels = None\n # now finish translation with standard translator:\n query.pattern_clause_to_sql(clause, new_graph_alias, state)", "def compile(expression):", "def _generate_sql(self, keys, changed_keys):\n for key in reversed(keys):\n app_label, sql_name = key\n new_item = self.to_sql_graph.nodes[key]\n sql_deps = [n.key for n in self.to_sql_graph.node_map[key].parents]\n reverse_sql = new_item.reverse_sql\n\n if key in changed_keys:\n operation_cls = AlterSQL\n kwargs = {}\n # in case of replace mode, AlterSQL will hold sql, reverse_sql and\n # state_reverse_sql, the latter one will be used for building state forward\n # instead of reverse_sql.\n if new_item.replace:\n kwargs['state_reverse_sql'] = reverse_sql\n reverse_sql = self.from_sql_graph.nodes[key].sql\n else:\n operation_cls = CreateSQL\n kwargs = {'dependencies': list(sql_deps)}\n\n operation = operation_cls(\n sql_name, new_item.sql, reverse_sql=reverse_sql, **kwargs)\n sql_deps.append(key)\n self.add_sql_operation(app_label, sql_name, operation, sql_deps)", "def evaluateStructure(compiled_expression):", "def tree_build(sv, piece):\r\n if piece==None: return None \r\n # process various string expressions (or triplets without args for conditions and values)\r\n piece=piece[0].strip(Space) if type(piece)==tuple else piece.strip(Space) # convert to string \r\n alphabetic=Alphakwords+sv.Object_list\r\n \r\n # empty expression\r\n if not piece: return None\r\n\r\n # a string between quotes\r\n if piece[0]==Quote and piece[-1]==Quote: return (piece, None, None) # return string as a leaf\r\n \r\n # a protected string: restore without further parsing \r\n key=piece.strip(Special) \r\n if key in sv.Strings: return (Quote+sv.Strings[key]+Quote, None, None) # return string as a leaf\r\n\r\n # a bracketed expression: parse from outer ones on, RECURSIVE\r\n if key in sv.Blocks: return (Obr, tree_build(sv, sv.Blocks[key]), None)\r\n\r\n piece=save_bracketed(sv, piece) # protect outer bracketed expressions from parsing\r\n piece=Space+piece+Space # add Spaces to help detect alphabetic keys \r\n \r\n # PARSE by operator priority and descending order of position \r\n for op_group in Priority_groups+[sv.Object_list]: # ops by priority groups\r\n op_list=find_op(sv, piece, op_group, alphabetic) # detect operators of this group\r\n\r\n for o, op in op_list: # found ops from this group in reverse order of occurrence\r\n\r\n # process comma operator \r\n if o==Comma and o in piece: return make_list(sv, piece) # list will be linear (not a tree). Build RECURSIVE \r\n\r\n # process unary functions and defined objects (all unary operators are alphabetic)\r\n if o in Unary or o in sv.Object: # unary operators (non space-delimited)\r\n if piece.startswith(op): # operator must be at the start (space-delimited)\r\n res=make_unary(sv, piece, o, op)\r\n if res and (not res[1] or o in [Begin, End]):\r\n return special_unary(sv, res) # process special case \r\n return res\r\n \r\n # process binary operators (always lower priority than unary). Build RECURSIVE\r\n elif op in piece:\r\n res=make_binary(sv, piece, o, op) # binary operators (space-delimited)\r\n if res and (not res[1] or o==Isnot):\r\n return special_binary(sv, res) # process special case \r\n return res\r\n\r\n # process other (args and doubly) subscripted objects. Build RECURSIVE\r\n piece=piece.strip(Space)\r\n if Special+Bloc in piece: return make_subscripted(sv, piece) # the object is subscripted / has args\r\n\r\n # when all operators have been processed, only leaves remain\r\n return make_leaf(sv, piece)", "def _break_query(queryStr):\n logicStatements = []\n opList = []\n\n #TODO: Add check for balanced parenthesis\n\n\n if('(' in queryStr and ')' in queryStr):\n\n currentPairLevel = 0 #indicates the current nest level of parens\n pairSearchLevel = 0 #level of open paren that match is being found for\n openPairIndex = 0 #the index of the open parenthesis in queryStr\n closePairIndex = 0 #index of close parenthesis in queryStr\n outerOpenFound = False\n indexPairs = []\n for index, char in enumerate(queryStr):\n\n if(char=='('):\n currentPairLevel += 1\n #if first open parenthesis\n if(not outerOpenFound):\n openPairIndex = index\n pairSearchLevel = currentPairLevel\n outerOpenFound = True\n elif(char==')'):\n #if the parenthesis is at the same nest level as the open\n if(currentPairLevel == pairSearchLevel):\n closePairIndex = index\n indexPairs.append([openPairIndex,closePairIndex])\n outerOpenFound = False\n currentPairLevel -= 1\n\n #used the positions of the parenthesis to pull sliced from the query\n for index, pair in enumerate(indexPairs):\n logicStatements.append(queryStr[(pair[0]+1):pair[1]])\n\n #if not the last parenthesis pair then get operator after it\n if not(index == len(indexPairs)-1):\n opList.append(queryStr[pair[1]+1])\n \n return logicStatements, opList", "def _build_expression(self, exp, object_class, fields):\n if \"op\" not in exp:\n return None\n\n def autocast(o_key, value):\n \"\"\"Try to guess the type of `value` and parse it from the string.\"\"\"\n if not isinstance(o_key, (str, unicode)):\n return value\n key, _ = self.attr_name_map[object_class].get(o_key, (o_key, None))\n # handle dates\n if (\"date\" in key and \"relative\" not in key) or \\\n key in [\"end_date\", \"start_date\"]:\n if isinstance(value, datetime.date):\n return value\n try:\n month, day, year = [int(part) for part in value.split(\"/\")]\n return datetime.date(year, month, day)\n except Exception:\n raise BadQueryException(\"Field \\\"{}\\\" expects a MM/DD/YYYY date\"\n .format(o_key))\n # fallback\n return value\n\n def relevant():\n \"\"\"Filter by relevant object.\"\"\"\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )\n\n def unknown():\n raise BadQueryException(\"Unknown operator \\\"{}\\\"\"\n .format(exp[\"op\"][\"name\"]))\n\n def with_key(key, p):\n key = key.lower()\n key, filter_by = self.attr_name_map[\n object_class].get(key, (key, None))\n if hasattr(filter_by, \"__call__\"):\n return filter_by(p)\n else:\n attr = getattr(object_class, key, None)\n if attr is None:\n raise BadQueryException(\"Bad query: object '{}' does \"\n \"not have attribute '{}'.\"\n .format(object_class.__name__, key))\n return p(attr)\n\n with_left = lambda p: with_key(exp[\"left\"], p)\n\n lift_bin = lambda f: f(self._build_expression(exp[\"left\"], object_class,\n fields),\n self._build_expression(exp[\"right\"], object_class,\n fields))\n\n def text_search():\n \"\"\"Filter by text search.\n\n The search is done only in fields listed in external `fields` var.\n \"\"\"\n existing_fields = self.attr_name_map[object_class]\n text = \"%{}%\".format(exp[\"text\"])\n p = lambda f: f.ilike(text)\n return or_(*(\n with_key(field, p)\n for field in fields\n if field in existing_fields\n ))\n\n rhs = lambda: autocast(exp[\"left\"], exp[\"right\"])\n\n ops = {\n \"AND\": lambda: lift_bin(and_),\n \"OR\": lambda: lift_bin(or_),\n \"=\": lambda: with_left(lambda l: l == rhs()),\n \"!=\": lambda: not_(with_left(\n lambda l: l == rhs())),\n \"~\": lambda: with_left(lambda l:\n l.ilike(\"%{}%\".format(rhs()))),\n \"!~\": lambda: not_(with_left(\n lambda l: l.ilike(\"%{}%\".format(rhs())))),\n \"<\": lambda: with_left(lambda l: l < rhs()),\n \">\": lambda: with_left(lambda l: l > rhs()),\n \"relevant\": relevant,\n \"text_search\": text_search\n }\n\n return ops.get(exp[\"op\"][\"name\"], unknown)()", "def evaluate1(expr):\n operators = '*/+-'\n operator_stack = []\n operand_stack = []\n\n def parse_operand(s, i):\n \"\"\"\n parse the location of the string until I find an\n operator\n parse \"12\" to 12\n \"12.12\" to 12.12\n returns a float\n \"\"\"\n value = ''\n while (s[i] not in operators):\n value += s[i]\n i += 1\n if s[i] == ')':\n break\n return float(value), i-1\n\n def do_operation(operand1, operand2, operator):\n if operator == '+':\n return operand1 + operand2 \n elif operator == '*':\n return operand1 * operand2\n elif operator == '/':\n return operand1 / operand2\n elif operator == '-':\n return operand1 - operand2\n\n i = 0\n s = expr\n length = len(s)\n numbers = '0123456789'\n while i < length:\n data = s[i]\n if data == '(':\n operand_stack.append(data)\n elif data in numbers:\n # parse the operand number and modifies the index i\n number, i = parse_operand(s, i)\n operand_stack.append(number)\n elif data in operators:\n operator_stack.append(data)\n elif data is ')':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator = operator_stack.pop()\n operand_stack.pop() # remove (\n operand_stack.append(do_operation(operand1, operand2, operator))\n i += 1\n return operand_stack.pop()", "def gen_sql(runtime, query_type, target_model=None):\n\n from_table = runtime.model.table_name\n\n # if target_model not given, use from_table instead\n if target_model is None:\n target_model = runtime.model\n\n target_table = target_model.table_name\n\n data = runtime.data # alias\n\n # quick mark for parse time functions\n _where = Compiler.parse_where(data['where'])\n _set = Compiler.parse_set(data['set'])\n _orderby = Compiler.parse_orderby(data['orderby'])\n _select = Compiler.parse_select(data['select'])\n _limit = Compiler.parse_limit(data['limit'])\n _groupby = Compiler.parse_groupby(data['groupby'])\n _having = Compiler.parse_having(data['having'])\n _distinct = Compiler.parse_distinct(data['distinct'])\n\n pattern = Compiler.SQL_PATTERNS[query_type]\n\n SQL = pattern.format(**{\n 'target': target_table,\n 'set': _set,\n 'from': from_table,\n 'where': _where,\n 'select': _select,\n 'limit': _limit,\n 'orderby': _orderby,\n 'groupby': _groupby,\n 'having': _having,\n 'distinct': _distinct,\n })\n\n return SQL", "def build(self, query, metadata, allowed_platforms, allowed_capabilities, user = None):\n root = metadata.find_node(query.get_from())\n if not root:\n raise RuntimeError(\"query_plan::build(): Cannot find %s in metadata, known tables are %s\" % (\n query.get_from(),\n sorted(set(metadata.get_table_names()))\n ))\n \n root_task = ExploreTask(root, relation=None, path=[], parent=self, depth=1)\n root_task.addCallback(self.set_ast, query)\n\n stack = Stack(root_task)\n seen = {} # path -> set()\n\n missing_fields = set()\n missing_fields |= query.get_select()\n missing_fields |= query.get_where().get_field_names()\n missing_fields |= set(query.get_params().keys())\n\n while missing_fields:\n task = stack.pop()\n if not task:\n # Exploration ends here\n Log.warning(\"Exploration terminated without finding fields: %r\" % missing_fields)\n break\n\n pathstr = '.'.join(task.path)\n if not pathstr in seen:\n seen[pathstr] = set()\n\n # ROUTERV2\n # foreign_key_fields are fields added because indirectly requested by the user.\n # For example, he asked for slice.resource, which in fact will contain slice.resource.urn\n foreign_key_fields = task.explore(stack, missing_fields, metadata, allowed_platforms, allowed_capabilities, user, seen[pathstr], query_plan = self)\n\n self.foreign_key_fields.update(foreign_key_fields)\n\n while not stack.is_empty():\n task = stack.pop()\n task.cancel()\n\n # Do we need to wait for self.ast here ?", "def build_sql_cmds(sql):\n\tsql_cmds = []\n\n\t# Sql for path table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"path\", column=\"strPath\"))\n\t# SQL for movie table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"movie\", column=\"c22\"))\n\t# SQL for episode table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"episode\", column=\"c18\"))\n\t# SQL for art table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"art\", column=\"url\"))\n\t# SQL for tvshow table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"tvshow\", column=\"c16\"))\n\n\treturn sql_cmds", "def parse_queries(selection, mapping):\n out = []\n params = {}\n for i, expression in enumerate(selection):\n if isinstance(expression, str):\n id1, op, id2 = re.split(\"(<=|>=|!=|=~|>|<|=)\", expression, maxsplit=1)\n\n # a should be a variable in the children\n name1 = id1.split(\".\")[-1]\n if name1 in mapping:\n a = mapping[name1]\n else:\n raise ConstraintExpressionError(\n f'Invalid constraint expression: \"{expression}\" (\"{id1}\" is not a valid variable)'\n )\n\n # b could be a variable or constant\n name2 = id2.split(\".\")[-1]\n if name2 in mapping:\n b = mapping[name2]\n else:\n b = ast.literal_eval(id2)\n\n out.append(\n f\"({a} {op} :{i})\"\n ) # bad hack for positional args since Session.execute doesn't support them\n params[str(i)] = b\n\n return out, params", "def _next_expression(self, child_types):\n\n # Consume child_types from front to build expression\n expression = []\n while len(expression) < 3:\n # An expression can be longer than 3 tokens if some are unary-negated\n if self._has_unary_negation(child_types, expression):\n expression.append(self._subexpression_type(child_types[:2]))\n [child_types.pop(0) for _ in [0,1]]\n else:\n expression.append(child_types.pop(0))\n return expression", "def compile(r: str):\n if isinstance(r, str):\n expr = parse(r)\n else:\n expr = r\n\n states = [expr]\n state_numbers = {expr: 0}\n transitions = [[]]\n stack = [expr]\n\n while stack:\n state = stack.pop()\n state_number = state_numbers[state]\n # print(\"=> state\", state_number, \":\", state, type(state))\n for derivative_class in state.derivative_classes():\n\n assert isinstance(derivative_class, IntegerSet)\n # print(\" -> derivative_class\", derivative_class)\n\n if not derivative_class:\n continue\n\n # First symbol in this class:\n symbol = derivative_class.ranges[0][0]\n\n # Determine next state for this symbol class:\n next_state = state.derivative(symbol)\n\n # Add state if not yet present:\n if next_state not in state_numbers:\n states.append(next_state)\n state_numbers[next_state] = len(state_numbers)\n transitions.append([])\n stack.append(next_state)\n\n # Add transitions to next state:\n next_state_number = state_numbers[next_state]\n for first, last in derivative_class.ranges:\n transitions[state_number].append(\n (first, last, next_state_number)\n )\n\n transitions[state_number].sort()\n\n accepts = [state.nullable() for state in states]\n error = state_numbers[expr.null]\n\n return transitions, accepts, error", "def parse_expression(expression: str) -> nodes.ExpNode:\r\n\r\n tokens = tokenize(expression)\r\n node = build_expression_tree(tokens)\r\n\r\n return node", "def translate(expr):\n return from_python(ast.parse(expr))", "def eval_expr1(expression):\n\n output = []\n stack = []\n tokens = list(tokenize(expression))\n\n for token in tokens:\n if token == \"(\":\n stack.append(token)\n elif token == \")\":\n while stack and stack[-1] != \"(\":\n op = stack.pop(-1)\n output.append(op)\n op = stack.pop(-1)\n assert op == \"(\"\n elif token in [\"+\", \"*\"]:\n if stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n stack.append(token)\n elif isinstance(token, int):\n output.append(token)\n else:\n raise NotImplementedError(token)\n\n # print(token, output, stack)\n\n while stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n assert not stack\n\n return eval_ops(output)", "def execute(self, root):\n assert isinstance(root, Node)\n\n null = Null()\n\n def optional(expression):\n \"\"\"return True iff expression is optional\"\"\"\n return any(e.data == 'optional' for e in expression.children)\n\n def concatenate(expression, stream):\n \"\"\"evaluate query expressions and concatenate results\"\"\"\n # fork the stream for each subexpression\n streams = itertools.tee(stream, len(expression.children))\n return itertools.chain.from_iterable(\n evaluate(expression, stream)\n for expression, stream in zip(expression.children, streams)\n )\n\n def iterate(expression, stream):\n \"\"\"iterate over json stream\"\"\"\n for node in stream:\n itr = (\n iter(node)\n if isinstance(node, List) else\n iter(node.values())\n if isinstance(node, Object) else\n iter([])\n if optional(expression) else\n None\n )\n if not itr:\n raise TypeError(\n 'cannot iterate over {}'.format(\n node.__class__.__name__\n )\n )\n for child in itr:\n yield child\n\n def indexer(expression, stream):\n \"\"\"extract elements from json containers\"\"\"\n def throw(node, item):\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__,\n item.__class__.__name__,\n )\n )\n\n def mkint(expression):\n if expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n idx = float(expression.children[0])\n if not idx.is_integer():\n idx = int(idx) + 1\n return idx\n else:\n assert False, 'bad number expression {}'.format(\n expression\n )\n\n def mkslice(expression):\n s, e = None, None\n for idx in expression.children:\n if idx.data == 'start':\n s = mkint(idx.children[0])\n elif idx.data == 'end':\n e = mkint(idx.children[0])\n yield slice(s, e)\n\n def mkindex(expression):\n if expression.data == 'expression':\n return evaluate(expression, stream)\n elif expression.data == 'slice':\n return mkslice(expression)\n elif expression.data == 'cname':\n return expression.children\n elif expression.data == 'string':\n return [expression.children[0][1:-1]]\n elif expression.data in ('integer', 'float'):\n return [mkint(expression)]\n else:\n assert False, 'bad index expression {}'.format(expression)\n\n for item in mkindex(expression.children[0]):\n for node in stream:\n if isinstance(node, Object):\n if isinstance(item, Primitive):\n item = str(item)[1:-1]\n if isinstance(item, basestring):\n yield node.get(item, null)\n continue\n\n if isinstance(node, List):\n if isinstance(item, Primitive):\n item = int(str(item))\n if isinstance(item, (int, slice)):\n try:\n yield node[item]\n except IndexError:\n yield null\n continue\n\n if not optional(expression):\n throw(node, item)\n\n def properties(expression, stream):\n \"\"\"extract values from json objects\"\"\"\n def index(expression, stream):\n item = expression.children[0].children[0]\n for node in stream:\n if isinstance(node, Object):\n yield node.get(item, null)\n elif not optional(expression):\n itype = expression.children[0].data\n if itype == 'cname':\n itype = 'string'\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__, itype\n )\n )\n\n for expression in expression.children:\n stream = index(expression, stream)\n\n for node in stream:\n yield node\n\n def primitive(expression):\n \"\"\"return a primitive type\"\"\"\n expression = expression.children[0]\n if expression.data == 'null':\n return null\n elif expression.data == 'boolean':\n return expression.children[0] == 'true'\n elif expression.data == 'string':\n return expression.children[0][1:-1]\n elif expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n return float(expression.children[0])\n assert False, 'bad primitive {}'.format(expression)\n\n def evaluate(expression, stream):\n \"\"\"evaluate query expression over json stream\"\"\"\n assert expression.data == 'expression', expression\n assert len(expression.children) == 1\n\n expression = expression.children[0]\n\n if expression.data == 'identity':\n for node in stream:\n yield node\n\n elif expression.data == 'primitive':\n yield primitive(expression)\n\n elif expression.data == 'properties':\n for node in properties(expression, stream):\n yield node\n\n elif expression.data == 'indexer':\n for node in indexer(expression, stream):\n yield node\n\n elif expression.data == 'iterator':\n for node in iterate(expression, stream):\n yield node\n\n elif expression.data == 'concatenator':\n for node in concatenate(expression, stream):\n yield node\n\n else:\n assert False, 'bad expression {}'.format(expression)\n\n stream, pipeline = [root], self.tree.children[0]\n for expression in pipeline.children:\n stream = evaluate(expression, stream)\n\n for result in stream:\n yield result", "def batched_query(self, sql):\r\n\r\n result_sets = []\r\n messages = \"\"\r\n query = []\r\n last_query=\"\"\r\n\r\n batches = re.split(\"^\\s*(GO(?:\\s+[0-9]+)?)\\s*(?:--.*)?$\",sql,flags=re.M|re.I)\r\n # print(batches)\r\n for b in batches:\r\n if b.upper() == \"GO\":\r\n # execute one\r\n query.append(last_query)\r\n continue\r\n else:\r\n match = re.match(\"^GO\\s+([0-9]+)$\",b,re.I)\r\n if match is not None:\r\n #execute many\r\n for i in range(0,int(match.group(1))):\r\n query.append(last_query)\r\n else:\r\n # not a Go statment\r\n last_query = b\r\n query.append(last_query)\r\n\r\n # print(query)\r\n for q in query:\r\n r = self.query(q)\r\n if r is not None:\r\n result_sets.extend(r)\r\n messages += self.messages\r\n\r\n self.messages = messages\r\n return result_sets", "def evaluate(expr):\n def isdigit(ch):\n try:\n int(ch)\n return True\n except ValueError:\n return False\n\n def evaluate_helper(expr, index):\n ch = expr[index]\n if ch == '(':\n # complex\n index += 1 # move past (\n\n # get the left operand\n left, index = evaluate_helper(expr, index)\n opr = expr[index]\n index += 1 # move past the operator\n\n # get the right operand\n right, index = evaluate_helper(expr, index)\n index += 1 # to move past closing paranthesis\n if opr == '+':\n return left + right, index\n elif opr == '*':\n return left * right, index\n\n \n else:\n if isdigit(ch):\n value = 0\n while isdigit(ch):\n value = value * 10 + int(ch)\n index += 1\n if index < len(expr):\n ch = expr[index]\n else:\n break\n return value, index\n\n \n\n return evaluate_helper(expr, 0)[0]", "def create_expression(sv, tree): \r\n if not tree: return None # nothing to do\r\n o,A,B=tree\r\n\r\n if o==Obr: # bracketed expression: remove brackets \r\n return create_expression(sv, A) # RECURSIVE\r\n\r\n nam=tree_join(tree)\r\n verify_expression(tree, nam) # check name validity \r\n\r\n if nam in sv.Object and not sv.Object[nam].isnew: # don't replace existing name unless new user call\r\n nod=sv.Object[nam] # use old name \r\n return (nam, None, None) # replace expression with name \r\n \r\n nod=add_object(sv, nam) # create object (unless new user call)\r\n nod.isexpression=True\r\n nod.isnew=False # process only once\r\n \r\n # link expression (only for new nodes)\r\n if o==Comma: # special case: list: clause for each changing element\r\n li=[]\r\n for t in A:\r\n exprs=create_expression(sv, t) # RECURSIVE\r\n if exprs: li=li+[exprs]\r\n vlu=(Comma, li, None) # list of elements \r\n nod.clauses=[(Starttree,vlu)] # start clause for whole list ((Start, None, None), (Comma, li, None)) \r\n for t in li: # each term is a triplet\r\n if t and not is_fixed(t[0]):\r\n add_change_clause(sv, nod, t, vlu)\r\n\r\n return (nam, None, None) # name for the list\r\n\r\n # some sort of expression except a list\r\n exprsA=create_expression(sv, A)\r\n exprsB=create_expression(sv, B)\r\n vlu=(o, exprsA, exprsB) # reduce to a simple operation between two expressions \r\n\r\n # make start clauses, and change clause for non-fixed objects (do not repeat 'change')\r\n nod.clauses=[(Starttree, vlu)] # ((Start, None, None), vlu) \r\n if o in sv.Object and not is_fixed(o):\r\n add_change_clause(sv, nod, (o, None, None), vlu)\r\n if A and not is_fixed(A[0]):\r\n add_change_clause(sv, nod, exprsA, vlu)\r\n if B and B!=A and not is_fixed(B[0]):\r\n add_change_clause(sv, nod, exprsB, vlu)\r\n \r\n if o==Since: # special case: conditions for \"since\" \r\n pl=create_expression(sv, (Plus, exprsB, exprsA)) # RECURSIVE \r\n nod.clauses[-1]=((Change, pl, None), vlu) # when change(event+delay): (Since, exprsA, exprsB) \r\n add_change_clause(sv, nod, exprsB, vlu) # when change(event)...\r\n # n.b. changing delay during 'since' should have no effect\r\n \r\n return (nam, None, None) # replace expression with name \r", "def opsplit(expstr):\n\n #ops are the one char operators (sorted on precidence)\n ops = expr.getOps()\n #Remove outer parentesis if we have them\n if expstr[0] == '(' and expstr[-1] == ')' and balanced(expstr[1:-1]):\n expstr = expstr[1:-1]\n #Add a '0' to the beginning of the string if we start with an operator\n if expstr[0] in ops:\n expstr = '0'+expstr\n for op in ops:\n pc = 0\n cc = len(expstr)-1\n revexpstr = list(expstr)\n revexpstr.reverse()\n #Search for the operator backwards (to preserve operator presidence)\n for c in revexpstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if c == op and pc == 0:\n #Build the tree recursively\n return [op,opsplit(expstr[:cc]),opsplit(expstr[cc+1:])]\n cc -=1\n #if we find something that looks like a function, parse it separately \n if funcpattern(expstr):\n fnamestr = funcname(expstr)\n fargs = funcargs(expstr)\n farglist = [opsplit(arg) for arg in fargs]\n return [fnamestr]+farglist\n return expstr", "def expression_to_tree(expression):\n\n # break string into tokens, parsing parenthesized sub-expressions\n\n tokens = []\n current_token = \"\"\n in_quotes = False\n paren_count = 0\n paren_start = None\n\n for i, c in enumerate(expression):\n\n if c in QUOTES and paren_count == 0:\n if in_quotes:\n tokens.append(current_token + c)\n current_token = \"\"\n in_quotes = False\n else:\n in_quotes = True\n if current_token:\n tokens.append(current_token)\n current_token = c\n elif c == \" \" and not in_quotes and paren_count == 0:\n if current_token:\n tokens.append(current_token)\n current_token = \"\"\n elif c == \"(\":\n paren_count += 1\n if paren_count == 1:\n paren_start = i\n elif c == \")\":\n paren_count -= 1\n if paren_count == -1:\n raise RuntimeError(\"Unbalanced right parenthesis in expression\")\n if paren_count == 0:\n tokens.append(expression_to_tree(expression[paren_start + 1:i]))\n current_token = \"\"\n elif paren_count == 0:\n current_token += c\n\n if current_token:\n tokens.append(current_token)\n\n if paren_count > 0:\n raise RuntimeError(\"Unbalanced left parenthesis in expression\")\n\n # convert string tokens to ASTNodes\n\n nodes = []\n\n for token in tokens:\n\n if isinstance(token, ASTNode):\n nodes.append(token)\n continue\n\n # noinspection PyTypeChecker\n match = QUOTE_PATTERN.match(token)\n\n if token.upper() in OPERATOR_TOKENS:\n nodes.append(OPERATOR_TOKENS[token.upper()]())\n elif match:\n nodes.append(TextNode(match.group(\"text\")))\n else:\n raise RuntimeError(\n \"Invalid token `{}` in expression string\".format(token))\n\n # parse NOT tokens\n\n infix_nodes = []\n\n nodes.reverse()\n\n while nodes:\n node = nodes.pop()\n if isinstance(node, NotNode):\n node.left_child = nodes.pop()\n infix_nodes.append(node)\n\n # set up nodes as a stack\n\n infix_nodes.reverse()\n\n # shunting-yard\n\n operator_stack = []\n operand_stack = []\n\n while infix_nodes:\n node = infix_nodes.pop()\n if isinstance(node, OperatorNode):\n if operator_stack and operator_stack[-1] >= node:\n operand_stack.append(operator_stack.pop())\n operator_stack.append(node)\n else:\n operand_stack.append(node)\n\n operand_stack.extend(operator_stack[::-1])\n\n operand_stack.reverse()\n\n output_stack = []\n\n while operand_stack:\n node = operand_stack.pop()\n if isinstance(node, OperatorNode):\n node.left_child = output_stack.pop()\n node.right_child = output_stack.pop()\n output_stack.append(node)\n\n return output_stack.pop()" ]
[ "0.5711968", "0.5664347", "0.538123", "0.52660775", "0.52169806", "0.51955956", "0.51791394", "0.5158939", "0.51581794", "0.51150316", "0.51042306", "0.5073506", "0.50646734", "0.505281", "0.5020296", "0.49805304", "0.49764872", "0.4972279", "0.49443945", "0.49404278", "0.4935713", "0.49290004", "0.49067935", "0.48695728", "0.48577306", "0.4837926", "0.48368463", "0.48344174", "0.48304597", "0.48287347" ]
0.6603545
0
This will continue splitting the tree until every leaf node is pure and the training data is perfectly characterized by the decision tree
def train(self): max_tuple = self.max_gain() # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop while max_tuple.gain != 0: max_tuple.node.split(max_tuple.attribute) max_tuple = self.max_gain()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([2]), set([0, 1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:1, 1:1, 2:0})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.48)", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def decision_tree(df, dt_dict, curr_node,\r\n prev_attr = None, align_dir = None,\r\n depth = -1, no_data = False,\r\n ensemble = None):\r\n \r\n class_count = get_class_count(df)\r\n # get the class label counts for the given dataframe\r\n leaf_node_bool = check_leaf_node(df)\r\n # this function helps to check if we have a leaf node\r\n if leaf_node_bool:\r\n # if its leaf node\r\n curr_node[align_dir] = df['class'].values[0]\r\n # assign the leaf node value\r\n elif no_data:\r\n # if we are out of data points\r\n class_counts = df['class'].value_counts()\r\n # get the class counts\r\n curr_node[align_dir] = np.argmax(class_counts)\r\n # assign the majority class of prev node\r\n else:\r\n entropy_values_series = impurity.entropy_calc(df, ensemble = ensemble)\r\n # calculate the entropy values for each feature\r\n info_gain_dict = {}\r\n # empty dict for information gain\r\n for feature in entropy_values_series.index:\r\n # iterate over each features\r\n impurity.information_gain_calc(df, feature, info_gain_dict)\r\n # function call for information gain calculation\r\n for f in entropy_values_series.index:\r\n # iterate over each feature\r\n information_gain = entropy_values_series[f] - info_gain_dict[f][1]\r\n # calculation of information gain\r\n info_gain_dict[f] = (info_gain_dict[f][0], information_gain)\r\n # update the information gain dict\r\n best_feature = sorted(info_gain_dict, key = lambda x: info_gain_dict[x][1])[-1]\r\n # get the best feature on which to be splitted.\r\n #print(best_feature)\r\n node_value = (best_feature, info_gain_dict[best_feature], class_count[0],\r\n class_count[1])\r\n # get the node value\r\n \r\n if not leaf_node_bool and align_dir:\r\n # growing the tree\r\n if depth == 0:\r\n if node_value[2] > node_value[3]:\r\n node_value = 0\r\n else:\r\n node_value = 1\r\n curr_node[align_dir] = node_value\r\n return 0\r\n else:\r\n curr_node[align_dir] = {node_value:{}}\r\n curr_node = curr_node[align_dir][node_value]\r\n else:\r\n dt_dict[node_value] = {}\r\n curr_node = dt_dict[node_value]\r\n \r\n data_split(df, best_feature, info_gain_dict, \r\n dt_dict, curr_node, depth)\r\n # function call for data split\r", "def _build_tree_recursive(self, tree, cur_node, X, y, depth):\r\n n_samples, n_features = X.shape\r\n leaf_reached = False\r\n\r\n # Evaluates if all instances belong to the same class\r\n if utils.all_instances_same_class(y):\r\n leaf_reached = True\r\n\r\n # Evaluates the min_samples_split stopping criteria\r\n if n_samples < self._min_samples_split:\r\n leaf_reached = True\r\n\r\n # Evaluates the depth stopping criteria\r\n if self._max_depth is not None and depth >= self._max_depth:\r\n leaf_reached = True\r\n\r\n best_split = None\r\n if not leaf_reached:\r\n best_split = self._find_split(X, y, n_features)\r\n if best_split is None or best_split.gain < self._min_gain_split:\r\n leaf_reached = True\r\n\r\n if leaf_reached:\r\n samples = utils.bin_count(y, length=self._n_classes)\r\n result = np.argmax(samples)\r\n new_leaf = DecisionLeaf(samples=samples, depth=depth, result=result)\r\n tree.nodes.append(new_leaf)\r\n\r\n else:\r\n is_categorical = utils.categorical_data(X[:, best_split.feature_id])\r\n samples = utils.bin_count(y, length=self._n_classes)\r\n\r\n if is_categorical:\r\n new_fork = DecisionForkCategorical(samples=samples, depth=depth,\r\n feature_id=best_split.feature_id, value=best_split.value,\r\n gain=best_split.gain)\r\n X_left, X_right, y_left, y_right = split_categorical_data(X, y, best_split.feature_id, best_split.value)\r\n\r\n else:\r\n new_fork = DecisionForkNumerical(samples=samples, depth=depth,\r\n feature_id=best_split.feature_id, value=best_split.value,\r\n gain=best_split.gain)\r\n X_left, X_right, y_left, y_right = split_numerical_data(X, y, best_split.feature_id, best_split.value)\r\n\r\n tree.nodes.append(new_fork)\r\n tree.last_node_id += 1\r\n node_to_split = tree.last_node_id\r\n new_branch = self._build_tree_recursive(tree, node_to_split, X_left, y_left, depth=depth+1)\r\n tree.nodes[cur_node].left_branch = new_branch\r\n\r\n tree.last_node_id += 1\r\n node_to_split = tree.last_node_id\r\n new_branch = self._build_tree_recursive(tree, node_to_split, X_right, y_right, depth=depth+1)\r\n tree.nodes[cur_node].right_branch = new_branch\r\n\r\n return cur_node", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def build_tree(rows: list) -> DecisionNode or Leaf:\n info_gain, question = get_best_split(rows)\n\n # If no info is gained just return a leaf node with remaining rows\n if info_gain == 0:\n return Leaf(rows)\n\n true_rows, false_rows = partition(rows, question)\n false_branch = build_tree(false_rows)\n true_branch = build_tree(true_rows)\n return DecisionNode(question, rows, true_branch, false_branch)", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def test_decision_tree_min_samples_split_parameter(params, X_train, X_test, y_train, y_test):", "def prep_tree_data(self, number: int):\n filename = \"data-before-normalization-{}-out-of-7.csv\".format(number)\n path = str(DATA_PATH.joinpath(\"data-splitted\", filename))\n df = pandas.read_csv(path)\n\n df.drop(df.columns[0], axis=1, inplace=True)\n assessments = [x for x in df.columns.values if x.split(\"_\")[0] == \"assessment\"]\n df['average_score'] = df[assessments].mean(skipna=True, axis=1)\n for assessment in assessments: # somehow he doesn't want to fillna in a batch?\n df[assessment].fillna(df['average_score'], inplace=True)\n clicks = [x for x in df.columns.values if x.split(\"_\")[0] == \"vle\"]\n df['vle_click_average'] = df[clicks].mean(skipna=True, axis=1)\n for click in clicks: # somehow he doesn't want to fillna in a batch?\n df[click].fillna(df['vle_click_average'], inplace=True)\n df.dropna()\n\n self.change_oh_cat(\"gender\", df)\n self.change_oh_cat(\"highest_education\", df)\n self.change_oh_cat(\"imd_band\", df)\n self.change_oh_cat(\"age_band\", df)\n self.change_oh_cat(\"disability\", df)\n result_order = {'final_result__Fail': 0, 'final_result__Withdrawn': 2,\n 'final_result__Pass': 1, 'final_result__Distinction': 3}\n self.change_oh_cat(\"final_result\", df, result_order)\n df[\"final_result\"].replace(2, 0, inplace=True)\n df[\"final_result\"].replace(3, 1, inplace=True)\n\n target = df[\"final_result\"]\n df.drop([\"final_result\"], axis=1, inplace=True)\n\n x_train, x_test, y_train, y_test = train_test_split(df, target, test_size=0.1,\n random_state=32, shuffle=True,\n stratify=target)\n\n return x_train, x_test, y_train, y_test", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def create_subtree(self, criterion):\n\n def _get_values_to_split(splits_values):\n values_to_split = {}\n for split_index, split_values in enumerate(splits_values):\n for value in split_values:\n values_to_split[value] = split_index\n return values_to_split\n\n def _get_splits_samples_indices(num_splits, separation_attrib_index, values_to_split,\n valid_samples_indices, samples):\n splits_samples_indices = [[] for _ in range(num_splits)]\n for sample_index in valid_samples_indices:\n sample_value_in_split_attrib = samples[sample_index][separation_attrib_index]\n try:\n splits_samples_indices[values_to_split[\n sample_value_in_split_attrib]].append(sample_index)\n except KeyError:\n print('Should not get here. Sample {} has value {} at attribute # {}, '\n 'but this value is unknown to the decision tree.'.format(\n sample_index,\n sample_value_in_split_attrib,\n separation_attrib_index))\n sys.exit(1)\n return splits_samples_indices\n\n def _get_numeric_splits_samples_indices(separation_attrib_index, mid_point,\n valid_samples_indices, samples):\n splits_samples_indices = [[], []]\n for sample_index in valid_samples_indices:\n sample_value_in_split_attrib = samples[sample_index][separation_attrib_index]\n if sample_value_in_split_attrib <= mid_point:\n splits_samples_indices[0].append(sample_index)\n else:\n splits_samples_indices[1].append(sample_index)\n return splits_samples_indices\n\n def _has_multiple_nominal_values(values_num_samples):\n return sum(num_samples > 0 for num_samples in values_num_samples) > 1\n\n def _has_multiple_numeric_values(valid_samples_indices, sample, attrib_index):\n values_seen = set()\n for sample_index in valid_samples_indices:\n sample_value = sample[sample_index][attrib_index]\n if sample_value not in values_seen:\n if values_seen:\n return True\n values_seen.add(sample_value)\n return False\n\n def _has_enough_samples_in_second_largest_class(class_index_num_samples,\n most_common_int_class):\n second_largest = max(num_samples\n for class_index, num_samples in enumerate(class_index_num_samples)\n if class_index != most_common_int_class)\n return second_largest >= MIN_SAMPLES_SECOND_LARGEST_CLASS\n\n\n # Is it time to stop growing subtrees?\n if (self.max_depth_remaining <= 0\n or self.num_valid_samples < self._min_samples_per_node\n or self.number_non_empty_classes == 1\n or (USE_MIN_SAMPLES_SECOND_LARGEST_CLASS\n and not _has_enough_samples_in_second_largest_class(\n self.class_index_num_samples,\n self.most_common_int_class))):\n return None\n\n # If a valid attribute has only one value, it should be marked as invalid from this node on.\n num_valid_nominal_attributes = 0\n for attrib_index, is_valid_nominal_attribute in enumerate(self.valid_nominal_attribute):\n if not is_valid_nominal_attribute:\n continue\n if (not _has_multiple_nominal_values(\n self.contingency_tables[attrib_index].values_num_samples)):\n self.valid_nominal_attribute[attrib_index] = False\n else:\n num_valid_nominal_attributes += 1\n\n num_valid_numeric_attributes = 0\n for attrib_index in range(len(self.valid_numeric_attribute)):\n if not self.valid_numeric_attribute[attrib_index]:\n continue\n if not _has_multiple_numeric_values(self.valid_samples_indices,\n self.curr_dataset.samples,\n attrib_index):\n self.valid_numeric_attribute[attrib_index] = False\n else:\n num_valid_numeric_attributes += 1\n\n # If there are no valid attributes, this node should be a leaf.\n if not num_valid_nominal_attributes and not num_valid_numeric_attributes:\n return None\n\n if self._use_stop_conditions:\n num_valid_attributes = sum(self.curr_dataset.valid_numeric_attribute)\n # Attributes which are valid (`True`) in `new_valid_nominal_attribute` and invalid\n # (`False`) in `new_valid_nominal_attribute_incl_chi_sq_test` should not be used to\n # split at this node, but could be used to split in descendant nodes.\n new_valid_nominal_attribute = self.valid_nominal_attribute[:]\n new_valid_nominal_attribute_incl_chi_sq_test = self.valid_nominal_attribute[:]\n for (attrib_index,\n is_valid_nominal_attribute) in enumerate(self.valid_nominal_attribute):\n if is_valid_nominal_attribute:\n (is_valid_num_samples,\n is_valid_chi_sq_and_num_samples) = (self._is_attribute_valid(\n attrib_index,\n min_allowed_in_two_largest=MIN_SAMPLES_IN_SECOND_MOST_FREQUENT_VALUE))\n if is_valid_chi_sq_and_num_samples:\n num_valid_attributes += 1\n elif is_valid_num_samples:\n new_valid_nominal_attribute_incl_chi_sq_test[attrib_index] = False\n else:\n new_valid_nominal_attribute[attrib_index] = False\n new_valid_nominal_attribute_incl_chi_sq_test[attrib_index] = False\n self.valid_nominal_attribute = new_valid_nominal_attribute_incl_chi_sq_test\n if num_valid_attributes == 0:\n return None\n\n # Get best split. Note that self is the current TreeNode.\n best_split = criterion.select_best_attribute_and_split(self)\n\n if math.isinf(best_split.criterion_value):\n # Stop condition when there is no valid attribute with more than one value (then\n # best_split.criterion_value is default, which is +- inf).\n return None\n\n if self.curr_dataset.valid_numeric_attribute[best_split.attrib_index]:\n # NUMERIC ATTRIBUTE\n last_left_value = list(best_split.splits_values[0])[0]\n first_right_value = list(best_split.splits_values[1])[0]\n mid_point = 0.5 * (last_left_value + first_right_value)\n splits_samples_indices = _get_numeric_splits_samples_indices(\n best_split.attrib_index,\n mid_point,\n self.valid_samples_indices,\n self.curr_dataset.samples)\n # Save this node's split information.\n self.node_split = NodeSplit(best_split,\n None,\n mid_point)\n\n else:\n # NOMINAL ATTRIBUTE\n\n # Calculate a list containing the inverse information of best_split.splits_values: here,\n # given a value, we want to know to which split it belongs\n values_to_split = _get_values_to_split(best_split.splits_values)\n\n splits_samples_indices = _get_splits_samples_indices(len(best_split.splits_values),\n best_split.attrib_index,\n values_to_split,\n self.valid_samples_indices,\n self.curr_dataset.samples)\n # Save this node's split information.\n self.node_split = NodeSplit(best_split, values_to_split, None)\n\n # Create subtrees\n self.is_leaf = False\n if self._use_stop_conditions:\n # Any attribute that has enough samples in the second most frequent value could pass the\n # chi-square test in a descendant node, thus we don't send the information of chi-square\n # test to child nodes.\n old_valid_nominal_attribute = self.valid_nominal_attribute[:]\n self.valid_nominal_attribute = new_valid_nominal_attribute\n else:\n old_valid_nominal_attribute = self.valid_nominal_attribute\n for curr_split_samples_indices in splits_samples_indices:\n self.nodes.append(TreeNode(self.curr_dataset,\n curr_split_samples_indices,\n self.valid_nominal_attribute[:],\n self.valid_numeric_attribute[:],\n self.max_depth_remaining - 1,\n self._min_samples_per_node,\n self._use_stop_conditions,\n self._max_p_value_chi_sq))\n self.nodes[-1].create_subtree(criterion)\n self.valid_nominal_attribute = old_valid_nominal_attribute", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def split(root, Dk, maxDepth, minRows, currDepth):\n \n left, right = root['branches']\n del(root['branches'])\n \n# if not left and not right:\n# return\n \n # Check if the node is a leaf\n if not len(left): \n root['left'] = root['right'] = getLeafClass(right)\n return\n elif not len(right):\n root['left'] = root['right'] = getLeafClass(left)\n return\n \n # Check for max depth\n if(currDepth >= maxDepth):\n root['left'], root['right'] = getLeafClass(left), getLeafClass(right)\n return\n \n # Process left branch\n if(len(left) <= minRows):\n root['left'] = getLeafClass(left)\n else:\n root['left'] = findNextSplit(left, Dk)\n split(root['left'], Dk, maxDepth, minRows, currDepth + 1)\n \n # Process right branch\n if(len(right) <= minRows):\n root['right'] = getLeafClass(right)\n else:\n root['right'] = findNextSplit(right, Dk)\n split(root['right'], Dk, maxDepth, minRows, currDepth + 1)", "def _generate_leaf_node_predictions(self, data):\n\n raise NotImplementedError(\n \"_generate_leaf_node_predictions not implemented in DummyLeafNodeScaledConformalPredictor\"\n )", "def test_decision_tree(train,test,maxnodes=None):\n tree = DecisionTree()\n tree.maxnodes = maxnodes\n errors = tree.learn(train,'label')\n print \"Decision tree makes\",errors,\"errors\"\n print \"Depth\",tree.depth(),\"nodes\",tree.numNodes()\n if tree.numNodes() < 100:\n tree.pprint()\n if errors > 0:\n print \"Training errors:\"\n for id,e in enumerate(train.entries):\n res = tree.predict(e[:-1])\n if res != e[-1]:\n if len(e[:-1]) > 10:\n print \" Error on\",id,\"prediction\",res\n else:\n print \" Error on\",e[:-1],\"prediction\",res\n print \"Testing error:\"\n tp,tn,fp,fn = 0,0,0,0\n for e in test.entries:\n res = tree.predict(e[:-1])\n if res and e[-1]:\n tp += 1\n elif res and not e[-1]:\n fp += 1\n elif not res and e[-1]:\n fn += 1\n else:\n tn += 1\n Ntest = len(test.entries)\n print \"True +: %g, True -: %g\"%(float(tp)/Ntest,float(tn)/Ntest) \n print \"False -: %g, False +: %g\"%(float(fn)/Ntest,float(fp)/Ntest)\n print \"Overall error: %g\"%(float(fn+fp)/Ntest,)", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def __init__(self,\r\n max_depth=None,\r\n min_samples_split=2,\r\n min_samples_leaf=1,\r\n split_criterion=None,\r\n feature_selection=None,\r\n feature_prob=None,\r\n min_gain_split=0,\r\n split_chooser=None):\r\n self._n_classes = None\r\n self._max_depth = None\r\n self._split_criterion = None\r\n self._split_chooser = None\r\n self._feature_selection = None\r\n self._min_samples_split = None\r\n self._min_samples_leaf = None\r\n self._min_gain_split = None\r\n self._feature_prob = None\r\n\r\n if max_depth is None or max_depth > 0:\r\n self._max_depth = max_depth\r\n else:\r\n raise(ValueError(\"The depth of the tree must be greater than 0.\"))\r\n\r\n if split_criterion is not None:\r\n self._split_criterion = split_criterion\r\n else:\r\n raise (ValueError(\"The split criterion can not be None.\"))\r\n\r\n if split_chooser is not None:\r\n self._split_chooser = split_chooser\r\n else:\r\n raise (ValueError(\"The split chooser can not be None.\"))\r\n\r\n if feature_selection is not None:\r\n self._feature_selection = feature_selection\r\n else:\r\n raise (ValueError(\"The feature selection can not be None.\"))\r\n\r\n if min_samples_split is not None and min_samples_split > 1:\r\n self._min_samples_split = min_samples_split\r\n else:\r\n raise(ValueError(\"The min_samples_split must be greater than 1.\"))\r\n\r\n if min_samples_leaf is not None and min_samples_leaf > 0:\r\n self._min_samples_leaf = min_samples_leaf\r\n else:\r\n raise(ValueError(\"The min_samples_leaf must be greater than 0.\"))\r\n\r\n if min_gain_split is not None and min_gain_split >= 0:\r\n self._min_gain_split = min_gain_split\r\n else:\r\n raise(ValueError(\"The min_gain_split must be greater or equal than 0.\"))\r\n\r\n if feature_prob is not None:\r\n self._feature_prob = feature_prob", "def __traverse_tree(self, node, sample_instance):\n if node.is_leaf:\n return node.predicted_class\n split = node.integer_splitting_rule\n feature = node.feature_index_split\n\n # left node gets assigned to data that is less than the integer\n # splitting rule within that feature\n if sample_instance[feature] < split:\n prediction = self.__traverse_tree(node.left_child,\n sample_instance)\n else:\n prediction = self.__traverse_tree(node.right_child,\n sample_instance)\n return prediction", "def data_split(df, best_feature, info_gain_dict, dt_dict,\r\n curr_node, depth, continous = False):\r\n \r\n depth -= 1\r\n # decrease the depth count\r\n no_data = False\r\n # default flag for data check\r\n match_threshold_df = df[df[best_feature] == info_gain_dict[best_feature][0]]\r\n # subset the data if threshold is matched\r\n if not len(match_threshold_df):\r\n # no more data points\r\n no_data = True\r\n match_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n \r\n mismatch_threshold_df = df[df[best_feature] != info_gain_dict[best_feature][0]]\r\n # subset the data if there is a mismatch\r\n if not len(mismatch_threshold_df):\r\n # if no more data points\r\n no_data = True\r\n mismatch_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n decision_tree(match_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"equal\", depth=depth, no_data = no_data)\r\n # function call to grow tree on the left side\r\n decision_tree(mismatch_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"not_equal\", depth=depth, no_data = no_data)\r\n # function call to grow the tree on the right side\r", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)", "def prune_path(clf, X, y, max_n_leaves=10, n_iter=10,\n test_size=0.1, random_state=None, n_jobs=1):\n \n\n from sklearn.base import clone\n from sklearn.cross_validation import StratifiedShuffleSplit,ShuffleSplit\n from sklearn.metrics import roc_auc_score,mean_squared_error\n from multiprocessing.dummy import Pool as ThreadPool\n from itertools import repeat\n import pandas as pd\n #import copy\n \n #classification score\n def my_auc(estimator, X, y):\n y_score = estimator.predict_proba(X)[:,1] # You could also use the binary predict, but probabilities should give you a more realistic score.\n return roc_auc_score(y, y_score)\n \n #regression score\n def my_nmse(estimator, X, y):\n y_pre = estimator.predict(X) # You could also use the binary predict, but probabilities should give you a more realistic score.\n return -mean_squared_error(y, y_pre)\n \n\n if len(np.unique(y)) == 2: \n scoring_fuc = my_auc\n \n else:\n scoring_fuc = my_nmse\n \n def multip_run(fuction,task_zip,n_jobs = 1):\n\n #Multi-process Run\n\n pool = ThreadPool(processes=n_jobs)\n results = pool.starmap(fuction, task_zip)\n pool.close()\n pool.join()\n return results \n\n def OneFoldCut(clf,X_train, y_train,X_test,y_test,max_n_leaves):\n estimator = clone(clf)\n \n fitted = estimator.fit(X_train, y_train)\n \n if max_n_leaves < get_n_leaves(fitted):\n n_leaves = max_n_leaves\n \n else:\n n_leaves = get_n_leaves(fitted)\n \n print('###### Iters true start leaves is %d #######' % n_leaves)\n \n #cut_num = list(range(2,n_leaves, 1))\n cut_num = list(range(n_leaves-1,1,-1))\n #n = len(cut_num)\n loc_indexs = []\n loc_scores = []\n for i in cut_num:\n #clf1 = copy.deepcopy(fitted)\n #clf1 = clone(fitted)\n #clf1.prune(i)\n fitted.prune(i)\n onescore = scoring_fuc(fitted,X_test,y_test)\n #onescore = scoring_fuc(clf1,X_test,y_test)\n loc_scores.append(onescore)\n loc_indexs.append(i)\n \n S = pd.DataFrame(loc_scores,index=loc_indexs)\n\n return S\n\n\n #scores = list()\n if len(np.unique(y)) == 2: \n kf = StratifiedShuffleSplit(y,\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n else:\n kf = ShuffleSplit(len(y),\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n \n X_trains = [X[tr] for tr,ts in kf]\n y_trains = [y[tr] for tr,ts in kf]\n \n X_tests = [X[ts] for tr,ts in kf]\n y_tests = [y[ts] for tr,ts in kf]\n \n task_zip = zip(repeat(clf),\n X_trains,\n y_trains,\n X_tests,\n y_tests,\n repeat(max_n_leaves))\n \n scores = multip_run(OneFoldCut,task_zip,n_jobs = n_jobs)\n \n df = pd.concat(scores,axis=1)\n df.columns = range(len(df.columns))\n\n return df #zip(*scores)", "def split(self, place_leaf_splitted):\n raise NotImplementedError", "def build_tree(self, rows, attribute_list, depth=1, parent_rows=None):\n if len(rows) == 0:\n if parent_rows is not None:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n else:\n raise ValueError(\"Reached a decision node which had zero rows but was not\"\n \"provided with a parent node\")\n if self.max_depth is not None and depth == self.max_depth:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n try:\n splitting_func = {\"entropy\": self.get_entropy,\n \"gini\": self.get_gini}.get(self.splitting_criteria)\n except KeyError:\n print(\"Program only supports entropy and gini as splitting criteria. Provided criteria was \" +\n self.splitting_criteria)\n raise ValueError(\"Incorrect parameter value passed for splitting criteria\")\n\n value_before_split = splitting_func(rows)\n\n if len(attribute_list) == 0 or value_before_split == 0:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n if len(attribute_list) == 1 and attribute_list[0] == self.target_attribute:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n best_gain = -np.inf\n best_criteria = None\n best_attribute_partitions = None\n\n # Find the attribute having the best split \"\n\n best_attribute_partitions, best_criteria = self.get_best_attribute_for_split(attribute_list,\n best_attribute_partitions,\n best_criteria, best_gain,\n rows, splitting_func,\n value_before_split)\n branches = {}\n for domain_value in self.attribute_domains[best_criteria]:\n branch_attr_list = list(attribute_list)\n branch_attr_list.remove(best_criteria)\n if domain_value in best_attribute_partitions.keys():\n partition_dataset = best_attribute_partitions[domain_value]\n branches[domain_value] = self.build_tree(rows=partition_dataset,\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n else:\n branches[domain_value] = self.build_tree(rows=[],\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n return DecisionTree.DecisionNode(attribute_name=best_criteria, branches=branches)", "def make_tree(self, X_subset, y_subset, depth):\n \n # YOUR CODE HERE\n #self.depth += 1\n if depth < self.max_depth and X_subset.shape[0] >= self.min_samples_split:\n \n best_feature, best_threshold = self.choose_best_split(X_subset, y_subset)\n print('depth = {}, size parent node = {}'.format(depth, len(X_subset)))\n print('best_feature = {}, best_threshold = {}'.format(best_feature, best_threshold))\n new_node = Node(best_feature, best_threshold)\n \n left_child, right_child = self.make_split(best_feature, best_threshold, X_subset, y_subset)\n new_node.left_child = self.make_tree(*left_child, depth+1)\n new_node.right_child = self.make_tree(*right_child, depth+1)\n \n else: # we have a leaf\n new_node = Node(-1, -1) # We flag leaf nodes by setting feature_index and threshold to -1\n new_node.value = self.predicted_values(y_subset)\n \n if self.classification:\n new_node.proba = np.mean(y_subset, axis=0)\n \n # We reduce the depth to compensate for the two calls to self.depth += 1 we make on\n # the same level for left_child and right_child.\n #self.depth -= 1\n \n return new_node" ]
[ "0.736419", "0.6879376", "0.68064624", "0.66134644", "0.65553546", "0.6515525", "0.6498684", "0.6492946", "0.6436828", "0.6393142", "0.6376377", "0.6344093", "0.63422775", "0.62961286", "0.62934136", "0.62801987", "0.62608254", "0.6223276", "0.61976653", "0.6194716", "0.61382735", "0.6126852", "0.6083017", "0.6081737", "0.6076603", "0.60597557", "0.6033142", "0.6023072", "0.6019416", "0.6017664" ]
0.7007727
1
If the node has children it will return the (node, attribute, gain) tuple of the child with the highest gain If the node does not have children and is not pure it will return the (node, attribute, gain) tuple with itself as the node and the highest heuristic score of splitting on any of its attributes as the gain If the node is pure it will return (None, '', 0) as it can no longer be split
def max_gain(self): if self.val1: val1_gain_tuple, val0_gain_tuple = self.val1.max_gain(), self.val0.max_gain() if val1_gain_tuple.gain > val0_gain_tuple.gain: return val1_gain_tuple else: return val0_gain_tuple elif self.attributes: filtered_data = filter_data(self.data,self.ancestors) max_attribute, max_gain = max([(attribute, self.heuristic(self,attribute)) for attribute in self.attributes], key = lambda x: x[1]) return gain_tuple(self, max_attribute, max_gain) return gain_tuple(None, '', 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_child(self, node):\n ucb_values = []\n for action, child in node.children.items():\n if node.state.player_turn == 1:\n if child.n_visits == 0:\n ucb_max = float('inf')\n else:\n ucb_max = self.calculate_ucb_max(node, action)\n ucb_values.append((ucb_max, action, child))\n else:\n if child.n_visits == 0:\n ucb_min = float('-inf')\n else:\n ucb_min = self.calculate_ucb_min(node, action)\n ucb_values.append((ucb_min, action, child))\n # Sort the list based on the ucb score\n ucb_values.sort(key=lambda t: t[0])\n if node.state.player_turn == 1:\n best_ucb, best_action, best_child = ucb_values[-1]\n else:\n best_ucb, best_action, best_child = ucb_values[0]\n return best_action, best_child", "def select_leaf(self):\n current = self\n best_child = None\n selected_nodes_R = 0\n while current.isExpanded:\n maxUCT = - float('inf')\n for child in current.children.values():\n UCT = child.compute_uct()\n if UCT > maxUCT:\n maxUCT = UCT\n best_child = child\n\n current = best_child\n selected_nodes_R += current.score\n return current, selected_nodes_R", "def findBestChild(node, is_exploration):\n best_score = -maxsize\n best_child_node = None\n for child_node in node.getChildrenNodes():\n if is_exploration:\n const_c = 1 / sqrt(2)\n else:\n const_c = 0\n \n score = child_node.getQualityValue() / child_node.getVisitedTimes() + \\\n const_c * sqrt(2 * log(node.getVisitedTimes()) / child_node.getVisitedTimes())\n \n if score > best_score:\n best_score = score\n best_child_node = child_node\n\n return best_child_node", "def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr", "def _select(node):\n return max(node.children.items(), key=lambda child: Tree._cal_uct(child[1]))", "def calculate_best_child(self):\n if not len(self.children):\n raise Exception(\"No actions can be done.\")\n # Get a list of (child, maxWinStrength) tuples. Then sort it.\n childValueTupleList = [(child, child.get_max_win_strength()) for child in self.children]\n childValueTupleList.sort(reverse=True, key=lambda tup: tup[1])\n # Find all children with wStrength equal to the best, then shuffle them to choose a random choice.\n equalButBestTup = [tup for tup in childValueTupleList if tup[1] == childValueTupleList[0][1]]\n shuffle(equalButBestTup)\n bestChild = equalButBestTup[0][0]\n self.bestChild = bestChild", "def get_best_child(self):\n if self.bestChild is None:\n self.calculate_best_child()\n return self.bestChild", "def selection(self):\n bestScore = -10000000.0\n bestChildren = None\n\n for child in self.childNodes:\n score = child.wins / child.visits + np.sqrt(2) * np.sqrt(\n np.log(self.visits) / child.visits)\n if score > bestScore:\n bestChildren = child\n bestScore = score\n return bestChildren", "def best_split1(self,X,attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = information_gain1(valc,X[attribute],X[\"Output\"],self.type)\n if (cur_if>global_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val\n else:\n global_if = float('inf') # the lowest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = gini_gain1(X[\"Output\"],X[attribute], valc)\n if (global_if>cur_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val", "def get_mostest_children(item, props=('npix','f_sum'), mostest='f_sum'):\n item.get_dens = lambda: item.values().sum()/item.get_npix()**(1.5)\n item.get_f_sum = lambda: item.values().sum()\n if item.is_leaf:\n d = dict([(p,[get(item,p)]) for p in props])\n d['branch'] = [item]\n return d\n else:\n brightest = item.children[0]\n brightest.get_dens = lambda: brightest.values().sum()/item.get_npix()**(1.5)\n brightest.get_f_sum = lambda: brightest.values().sum()\n for child in item.children[1:]:\n child.get_dens = lambda: child.values().sum()/item.get_npix()**(1.5)\n child.get_f_sum = lambda: child.values().sum()\n if get(child,mostest) > get(brightest,mostest):\n brightest = child\n brightest_props = get_mostest_children(brightest)\n d = dict([(p,[get(item,p)] + brightest_props[p])\n for p in props])\n d['branch'] = [item] + brightest_props['branch']\n return d", "def max_child(self, index):\n if self.empty():\n return None\n if self._has_left(index):\n left = self._left(index)\n large = left\n if self._has_right(index):\n right = self._right(index)\n if self._data[right] == self._data[left]:\n large = right\n if self._data[right] > self._data[left]:\n large = right\n return large\n return None", "def best_split(self):\n sub_group = []\n\n current_entropy = self.entropy(self._Passengers)\n best_gain = 0 # holds the best entropy difference so far\n best_split = self._Attr[0].get_name()\n relative_entropy = 0 # entropy while taking account for the size of the population\n\n for Attribute in self._Attr:\n relative_entropy = 0\n print(\"Attr considered: \" + Attribute.get_name())\n for Attr_option in Attribute.get_options():\n sub_group = []\n for Passenger in self._Passengers:\n if self.passenger_attr_option_check(Passenger,\n Attribute.get_name(),\n Attr_option): # if P.A = V\n sub_group.append(Passenger)\n if len(sub_group) > 0 and len(self._Passengers) > 0:\n relative_entropy += self.entropy(sub_group) * (len(sub_group)/len(self._Passengers))\n\n if current_entropy - relative_entropy > best_gain:\n best_gain = current_entropy - relative_entropy\n best_split = Attribute.get_name()\n\n print(f\"best split:{best_split} \\n with entropy gain of:\\n {best_gain}\")\n\n return best_split", "def deterministical_decide(self):\n children = self.root_node.my_children\n best = children[0]\n for c in children:\n if c.visit_time > best.visit_time:\n best = c\n return best.parent_action", "def choose_split_value(attrs, classes):\n indices = np.argsort(attrs)\n classes = classes[indices]\n attrs = attrs[indices]\n max_gain = 0.0\n max_gain_value = None\n for i in range(len(attrs) - 1):\n if classes[i] != classes[i+1]:\n mean = (attrs[i] + attrs[i+1]) / 2.0\n gain = inform_gain(attrs, classes, mean)\n if gain > max_gain:\n max_gain = gain\n max_gain_value = mean\n return max_gain_value, max_gain", "def find_significant_children(tree, node):\n if node not in tree.children:\n return None\n smax = 1\n c1, c2 = tree.children[node]\n sch = c1, c2\n while tree.population[c1] > 1 or tree.population[c2] > 1:\n if tree.population[c1] >= tree.population[c2]:\n small, big = c2, c1\n else:\n small, big = c1, c2\n if tree.population[small] >= smax:\n smax = tree.population[small]\n sch = small, big\n c1, c2 = tree.children[big]\n return sch", "def best_move(self) -> tuple:\n if self.root_state.winner != GameMeta.PLAYERS['none']:\n return GameMeta.GAME_OVER\n\n # choose the move of the most simulated node breaking ties randomly\n max_value = max(self.root.children.values(), key=lambda n: n.N).N\n max_nodes = [n for n in self.root.children.values() if n.N == max_value]\n bestchild = choice(max_nodes)\n return bestchild.move", "def select(self):\n best_qsa_star_add = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n qsa_star_add = qsa_star + 0.2 * self.c * sqrt(log(self.visits) / c.visits)\n if qsa_star_add > best_qsa_star_add:\n best_qsa_star_add = qsa_star_add\n best_node = c\n return best_node", "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def _best(self, Population_Child_score):\r\n return np.array( Population_Child_score ).argsort()[::-1][:self.ParentsSize]", "def _search(self, node: Node, search_depth: int = 1) -> (float, list):\n if not node.children or search_depth == 0:\n return node.tot_reward / node.num_samples, []\n elif search_depth == 1:\n max_val = -math.inf\n max_actions = []\n for action, child in node.children.items():\n node_val = child.tot_reward / child.num_samples\n if node_val > max_val:\n max_val = node_val\n max_actions = [action]\n elif node_val == max_val:\n max_actions.append(action)\n max_action = random.choice(max_actions)\n child = node.children[max_action]\n return child.tot_reward / child.num_samples, [max_action]\n best_reward = -math.inf\n best_act_seq = []\n for action, child in node.children.items():\n child_reward, child_act_seq = self._search(child, search_depth - 1)\n if child_reward > best_reward:\n best_act_seq = [action] + child_act_seq\n best_reward = child_reward\n return best_reward, best_act_seq", "def getBestAction(self, currNode):\n bestScore = None\n bestAction = None\n for child in currNode.children:\n childScore = child.cumulativeScore\n if (bestAction is None or childScore > bestScore):\n bestScore = childScore\n bestAction = child.lastAction\n return bestAction", "def selection_policy(node, c_uct):\n\n if node.is_leaf():\n return node\n\n max_child = None\n max_val = float('-inf')\n total_visit = 1\n\n for edge in node.child_edges:\n total_visit += edge.visit_count\n\n # Determine best child to move to according to UCT\n for edge in node.child_edges:\n if edge.visit_count == 0:\n edge_val = 0\n else:\n edge_val = edge.total_reward / edge.visit_count\n edge_val += c_uct * math.sqrt(2 * math.log(total_visit) / (1 + edge.visit_count))\n\n if edge_val > max_val:\n max_val = edge_val\n max_child = edge.child\n\n return max_child", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def select_final(self):\n best_qsa_star = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n if qsa_star > best_qsa_star:\n best_qsa_star = qsa_star\n best_node = c\n return best_node.action", "def max(self):\n no = self.root\n if no:\n no = self.__search_node_max_esq(no)\n if no:\n return no.valor\n return None", "def mt_score_CHILD(signame):\n return ((signame, score(DE, LINCS, signame)))", "def most_visited_child(self):\n return max(self.children, key=lambda c: c.explore_count)", "def selection(self):\n selectednode = self.children.values()[0]\n selectedaction = self.children.keys()[0]\n maxValue = selectednode.toValue()\n \n for child in self.children.items():\n if(child[1].toValue() > maxValue):\n selectednode = child[1]\n maxValue = child[1].toValue()\n selectedaction = child[0]\n return selectednode, selectedaction", "def two_best_children(self,instance):\n\t\tif len(self.tree.children) == 0:\n\t\t\traise Exception(\"No children!\")\n\t\t\n\t\tself.utility.increment_counts(instance)\n\t\tchildren_cu = []\n\t\tfor i in range(len(self.tree.children)):\n\t\t\tself.tree.children[i].utility.increment_counts(instance)\n\t\t\tchildren_cu.append((self.utility.category_utility(self.tree.children[i]),i))\n\t\t\tself.tree.children[i].utility.decrement_counts(instance)\n\t\tself.utility.decrement_counts(instance)\n\t\tchildren_cu.sort(reverse=True)\n\n\t\tif len(self.tree.children) == 1:\n\t\t\treturn children_cu[0], None \n\n\t\treturn children_cu[0], children_cu[1]", "def deep_max(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[-1]\r\n return node.keys[-1] if node.keys else None" ]
[ "0.64669144", "0.63168865", "0.6179859", "0.6137272", "0.6127355", "0.6071936", "0.6051509", "0.6038578", "0.60306424", "0.5946005", "0.59143674", "0.5897368", "0.5833015", "0.5821131", "0.5816206", "0.58110994", "0.5801434", "0.57993925", "0.57883", "0.5769697", "0.57651377", "0.575078", "0.57408524", "0.57112914", "0.5690238", "0.568821", "0.5676862", "0.5673345", "0.5665715", "0.5658177" ]
0.6359626
1
This splits a node on the attribute "attribute"
def split(self, attribute): if attribute not in self.attributes: raise KeyError('Attribute not present in node') self.split_attr = attribute # list() is used to make a copy of the list instead of pointing to the same list child_attributes = list(self.attributes) child_attributes.remove(attribute) child1_ancestors = list(self.ancestors) child0_ancestors = list(self.ancestors) child1_ancestors.append(attribute_value(attribute, 1)) child0_ancestors.append(attribute_value(attribute, 0)) self.val1 = Node(child_attributes, child1_ancestors, self.data, self.heuristic) self.val0 = Node(child_attributes, child0_ancestors, self.data, self.heuristic)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitAttribute(self, atr, divider=0.5):\n big, lit = DecisionTree(None, self.atr), DecisionTree(None, self.atr)\n for d in self:\n if d[atr] > divider: big.append(d)\n else: lit.append(d)\n return lit, big", "def split_by_attribute(dbsession, group, attr):\n values = []\n for item in group.items:\n if attr in item.attributes and item.attributes[attr]:\n values.extend(item.attributes[attr])\n categories = [\n (v, c) for v, c in Counter(values).most_common() if c < len(group.items) * 0.6666 and c >= 15 # noqa: PLR2004\n ]\n if categories:\n category_values = [v for v, _ in categories]\n has_values = 0\n for item in group.items:\n found = False\n for value in item.attributes[attr]:\n if value in category_values:\n found = True\n break\n if found:\n has_values = has_values + 1\n if has_values / len(group.items) > 0.9: # noqa: PLR2004\n categories.reverse()\n for category in categories:\n new_group = Group(\n value=category[0], label=f\"{group.label} - {category[0]}\", parent=group, split=\"attribute\"\n )\n dbsession.add(new_group)\n for item in list(group.items):\n if category[0] in item.attributes[attr]:\n item.group = new_group\n new_group = Group(value=group.label, label=group.label, parent=group, split=\"attribute\")\n dbsession.add(new_group)\n for item in list(group.items):\n item.group = new_group\n return True\n return False", "def split_attrs(s, *a, **kw):\n return split_attrs(s, *a, **kw)", "def on_visit_attribute(self, node: \"CSTNode\", attribute: str) -> None:\n visit_func = getattr(self, f\"visit_{type(node).__name__}_{attribute}\", None)\n if visit_func is not None:\n visit_func(node)", "def on_visit_attribute(self, node: \"CSTNode\", attribute: str) -> None:\n visit_func = getattr(self, f\"visit_{type(node).__name__}_{attribute}\", None)\n if visit_func is not None:\n visit_func(node)", "def on_leave_attribute(self, original_node: \"CSTNode\", attribute: str) -> None:\n leave_func = getattr(\n self, f\"leave_{type(original_node).__name__}_{attribute}\", None\n )\n if leave_func is not None:\n leave_func(original_node)", "def on_leave_attribute(self, original_node: \"CSTNode\", attribute: str) -> None:\n leave_func = getattr(\n self, f\"leave_{type(original_node).__name__}_{attribute}\", None\n )\n if leave_func is not None:\n leave_func(original_node)", "def visit_Attribute(self, node):\n new_node = ast.Name(\"%s.%s\" % (node.value.id, node.attr), node.ctx)\n return ast.copy_location(new_node, node)", "def splitBy(data, attribute_id):\n \n col = getColumn(data, attribute_id)\n values = set(col)\n split_data = [] \n for i in values:\n subset = [row for row in data if row[attribute_id] == i]\n split_data.append(subset)\n \n return split_data", "def visit(self, node):\n super(_GetattrNodeVisitor, self).visit(node)", "def prepare_node_attrs(self):", "def splitmetric(self, dataset, attr, target_attr):\n raise NotImplementedError('Subclass should implement this method')", "def extractTree(nodesList, rootNode):\n if len(nodesList) == 0:\n return\n if nodesList[0] == '!':\n return nodesList[1:]\n\n splitAttribute, splitValue, attributeValue = nodesList[0].strip().split('-')\n nodesList = nodesList[1:]\n \n if splitAttribute != splitValue or splitAttribute != '$' or splitValue != '$':\n rootNode.setSplit((splitAttribute, splitValue))\n else:\n rootNode.setSplit(\"Base case\")\n rootNode.setData(attributeValue)\n return nodesList[2:]\n \n \n leftTree = Tree()\n rightTree = Tree()\n rootNode.setLesser(leftTree)\n rootNode.setGreater(rightTree)\n nodesList = extractTree(nodesList, leftTree)\n\n \n \n nodesList = extractTree(nodesList, rightTree)\n\n return nodesList", "def test_split_adds_children(mock_amg):\n\n mock_amg.cells[0].split()\n assert mock_amg.cells[0].children['bl'] is mock_amg.cells[-4]\n assert mock_amg.cells[0].children['br'] is mock_amg.cells[-3]\n assert mock_amg.cells[0].children['tl'] is mock_amg.cells[-2]\n assert mock_amg.cells[0].children['tr'] is mock_amg.cells[-1]", "def parse_nodeline(nodeline):\n start = '['\n end = ']'\n if not (start in nodeline and end in nodeline):\n return nodeline.replace('\\t', '').replace(' ', ''), {}\n \n attrs = {}\n start_ind = nodeline.find(start)\n end_ind = nodeline.find(end)\n\n node_label = nodeline[:start_ind].replace('\\t', '').replace(' ', '')\n\n attrstr = nodeline[start_ind+1:end_ind]\n attrpairs = lex_dot_attrstr(attrstr)\n\n for name, val in attrpairs:\n attrs[name] = val\n \n return node_label, attrs", "def split(self, X):", "def _parseAttributeString(self, line):\n attribute, value = line.partition(' ')[::2]\n self._setAttribute(attribute, value)", "def test_group_attribute(self):\n\n star = '''\n<sr:StorageUsageRecords xmlns:sr=\"http://eu-emi.eu/namespaces/2011/02/storagerecord\">\n <sr:StorageUsageRecord>\n <sr:RecordIdentity sr:createTime=\"2016-06-09T02:42:15Z\" sr:recordId=\"c698\"/>\n <sr:StorageSystem>test-sys.ac.uk</sr:StorageSystem>\n <sr:SubjectIdentity>\n <sr:Group>ops</sr:Group>\n <sr:Site>EXAMPLE</sr:Site>\n </sr:SubjectIdentity>\n <sr:StorageMedia>disk</sr:StorageMedia>\n <sr:StartTime>2016-06-08T02:42:15Z</sr:StartTime>\n <sr:EndTime>2016-06-09T02:42:15Z</sr:EndTime>\n <sr:FileCount>4630</sr:FileCount>\n <sr:ResourceCapacityUsed>0</sr:ResourceCapacityUsed>\n <sr:ResourceCapacityAllocated>0</sr:ResourceCapacityAllocated>\n <sr:LogicalCapacityUsed>30</sr:LogicalCapacityUsed>\n </sr:StorageUsageRecord>\n <sr:StorageUsageRecord>\n <sr:RecordIdentity sr:createTime=\"2016-06-09T02:42:15Z\" sr:recordId=\"c69b\"/>\n <sr:StorageSystem>test-sys.ac.uk</sr:StorageSystem>\n <sr:SubjectIdentity>\n <sr:Group>cms</sr:Group>\n <sr:GroupAttribute sr:attributeType=\"authority\">/O=Grid/OU=eg.org/CN=host/auth.eg.org</sr:GroupAttribute>\n <sr:Site>EXAMPLE</sr:Site>\n </sr:SubjectIdentity>\n <sr:StorageMedia>disk</sr:StorageMedia>\n <sr:StartTime>2016-06-08T02:42:15Z</sr:StartTime>\n <sr:EndTime>2016-06-09T02:42:15Z</sr:EndTime>\n <sr:FileCount>346298</sr:FileCount>\n <sr:ResourceCapacityUsed>0</sr:ResourceCapacityUsed>\n <sr:ResourceCapacityAllocated>0</sr:ResourceCapacityAllocated>\n <sr:LogicalCapacityUsed>26770352879563</sr:LogicalCapacityUsed>\n </sr:StorageUsageRecord>\n <sr:StorageUsageRecord>\n <sr:RecordIdentity sr:createTime=\"2016-06-09T02:42:15Z\" sr:recordId=\"lc69\"/>\n <sr:StorageSystem>test-sys.ac.uk</sr:StorageSystem>\n <sr:SubjectIdentity>\n <sr:GroupAttribute sr:attributeType=\"role\">cmsphedex</sr:GroupAttribute>\n <sr:Group>cms</sr:Group>\n <sr:Site>EXAMPLE</sr:Site>\n </sr:SubjectIdentity>\n <sr:StorageMedia>disk</sr:StorageMedia>\n <sr:StartTime>2016-06-08T02:42:15Z</sr:StartTime>\n <sr:EndTime>2016-06-09T02:42:15Z</sr:EndTime>\n <sr:FileCount>132742</sr:FileCount>\n <sr:ResourceCapacityUsed>0</sr:ResourceCapacityUsed>\n <sr:ResourceCapacityAllocated>0</sr:ResourceCapacityAllocated>\n <sr:LogicalCapacityUsed>194962053020199</sr:LogicalCapacityUsed>\n </sr:StorageUsageRecord>\n <sr:StorageUsageRecord>\n <sr:RecordIdentity sr:createTime=\"2016-09-08T12:16:10Z\" sr:recordId=\"0715\"/>\n <sr:StorageSystem>test-sys.ac.uk</sr:StorageSystem>\n <sr:SubjectIdentity>\n <sr:GroupAttribute sr:attributeType=\"role\">poweruser1</sr:GroupAttribute>\n <sr:Group>atlas</sr:Group>\n <sr:GroupAttribute sr:attributeType=\"subgroup\">uk</sr:GroupAttribute>\n <sr:Site>EXAMPLE</sr:Site>\n </sr:SubjectIdentity>\n <sr:StorageMedia>disk</sr:StorageMedia>\n <sr:StartTime>2016-09-07T12:16:10Z</sr:StartTime>\n <sr:EndTime>2016-09-08T12:16:10Z</sr:EndTime>\n <sr:FileCount>8</sr:FileCount>\n <sr:ResourceCapacityUsed>0</sr:ResourceCapacityUsed>\n <sr:ResourceCapacityAllocated>0</sr:ResourceCapacityAllocated>\n <sr:LogicalCapacityUsed>6000437876</sr:LogicalCapacityUsed>\n </sr:StorageUsageRecord>\n</sr:StorageUsageRecords>\n\n'''\n\n parser = StarParser(star)\n\n for record in parser.get_records():\n if record.get_field('RecordId') == 'c698':\n # No group attributes.\n self.assertEqual(record.get_field('Group'), 'ops')\n elif record.get_field('RecordId') == 'c69b':\n # GroupAttribute authority defined, which goes in record below.\n self.assertEqual(record.get_field('Group'), 'cms')\n elif record.get_field('StarRecordID') == 'c69b':\n # Only authority defined (so GroupAttribute record created).\n self.assertEqual(record.get_field('AttributeType'), 'authority')\n self.assertEqual(record.get_field('AttributeValue'),\n '/O=Grid/OU=eg.org/CN=host/auth.eg.org')\n elif record.get_field('RecordId') == 'lc69':\n # Only role defined.\n self.assertEqual(record.get_field('Group'), 'cms')\n self.assertEqual(record.get_field('SubGroup'), None)\n self.assertEqual(record.get_field('Role'), 'cmsphedex')\n elif record.get_field('RecordId') == '0715':\n # Both subgroup and role defined.\n self.assertEqual(record.get_field('Group'), 'atlas')\n self.assertEqual(record.get_field('SubGroup'), 'uk')\n self.assertEqual(record.get_field('Role'), 'poweruser1')\n else:\n # If it's not in the list, something's gone wrong.\n self.fail(\"Record with ID %s doesn't match test cases\" %\n record.get_field('RecordId'))", "def partition_instances(instances, split_attribute, attribute_domains):\n # this is a group by split_attribute's domain, not by\n # the values of this attribute in instances\n # example: if split_attribute is \"level\"\n attribute_domain = attribute_domains[split_attribute] # [\"Senior\", \"Mid\", \"Junior\"]\n # Build a dictionary\n partitions = {} # key (attribute value): value (list of instances with this attribute value)\n # For loop through attributes in dictionary\n for attribute_value in attribute_domain:\n partitions[attribute_value] = []\n for instance in instances:\n index = int(split_attribute[3:])\n if instance[index] == attribute_value:\n partitions[attribute_value].append(instance)\n return partitions", "def visit_Getattr(self, node):\n self.getattr_nodes.add(node)", "def _pull_child_attribs(node):\n attr = node.attrib\n for child in node:\n attr.update(child.attrib)\n return attr", "def splitmetric(self, dataset, attr, target_attr):\n freq = {}\n splitinfo = 0.0\n \n #Call information gain\n gain = ID3.splitmetric(self, dataset, attr, target_attr);\n samplenumbers = len(dataset)\n # Calculate the frequency of each of the values in the split attribute\n for record in dataset:\n if (record[attr] in freq):\n freq[record[attr]] += 1.0\n else:\n freq[record[attr]] = 1.0\n \n #Calculate split info, entropy of splitter\n for val in list(freq.values()):\n splitinfo += (- val / samplenumbers) * math.log(val / samplenumbers, 2)\n \n #Split info equals 0 when there only one class in data set\n if splitinfo == 0:\n splitinfo = 0.00000001\n \n return gain / splitinfo", "def cross_link_attribute(self, attribute_name, node_list1, node_list2):\n W = self.link_attribute(attribute_name)\n return W[node_list1, :][:, node_list2]", "def split_records(dom):\n return dom.getElementsByTagName(\"record\")", "def csv_attribute_unpacker(self, attribute_tuples: List[Tuple[str, str]], separator: str = \",\") \\\n -> Tuple[List[str], List[str]]:\n\n if not attribute_tuples:\n raise ValueError(\"The list of tuples containing the attributes is missing.\")\n\n join_attributes_set: set = set()\n selection_attributes_set: set = set()\n\n for j_attribute_string, s_attribute_string in attribute_tuples:\n for j_attribute in j_attribute_string.split(separator):\n join_attributes_set.add(j_attribute.strip())\n\n for operator in self.operators:\n s_attribute_string = s_attribute_string.replace(separator + operator + separator, operator)\n\n for s_attribute in s_attribute_string.split(separator):\n for operator in self.operators:\n if operator in s_attribute:\n s_attribute = s_attribute.split(operator)[0].strip()\n selection_attributes_set.add(s_attribute)\n break\n\n return list(join_attributes_set), list(selection_attributes_set)", "def mineral_attr(attribute):\n return attribute[0]", "def _get_attribute(self):\n return self.split_text[1] if len(self.split_text) > 1 else \"\"", "def remove_attribute(self, attribute: str) -> None:\n attr_index = self.__attr_index(attribute)\n if attr_index is not None:\n self.yaml_node.value.pop(attr_index)", "def visit_Attribute(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n args = [ node.value, ast.Str(node.attr) ]\n return to_call(to_name('getattr'), args)\n return node", "def _create_split(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n helper.make_attribute('split', op.parts),\n ])\n return node" ]
[ "0.5715175", "0.5710266", "0.5545711", "0.55031914", "0.55031914", "0.5446711", "0.5446711", "0.54354566", "0.5370533", "0.53344154", "0.5330768", "0.5245361", "0.52420187", "0.5240586", "0.5158735", "0.5151447", "0.5148021", "0.5079045", "0.50706065", "0.50669575", "0.5060434", "0.50595826", "0.50372094", "0.5028784", "0.5015953", "0.5004996", "0.49860206", "0.49803564", "0.49738145", "0.4966806" ]
0.77221286
0
sort and retrieve top rows of df
def get_top_recipes(df, sort_params=None, count=10): if not sort_params: logging.warning("Column names to soty by are not defined.") return df return df.sort_values(sort_params["names"], ascending=sort_params["order"]).head(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_topn(df, top_n=25):\n assert df.columns.str.contains(\"ranking\").any(), \"select_topn failed. Missing 'ranking' column.\"\n \n # top-n by ranking\n topn_idx = df.groupby(\"ranking\").value_normalized.nlargest(top_n).droplevel(0).index\n \n return df.loc[topn_idx, : ]", "def analyse_and_sort(self, df):\n if (type(df) is pd.Series):\n df = df.to_frame(\"score\")\n elif (type(df) is pd.DataFrame):\n df.columns = [\"score\"]\n df = self._filter(df)\n df = self._argrelmax(df)\n df = self._drop_close_extrema(df) # by = [deb1[0]]\n return df.sort_values(by=[\"score\"])[::-1]", "def get_top_10(df):\n\n grouped_df = df.groupby(\"country\").max()\n\n # Confirmed cases\n print(grouped_df.sort_values(\"confirmed\",\n ascending=False)[\"confirmed\"][:10])\n\n # Deaths\n print(grouped_df.sort_values(\"deaths\", ascending=False)[\"deaths\"][:10])\n\n # Recoveries\n print(grouped_df.sort_values(\"recovered\",\n ascending=False)[\"recovered\"][:10])\n\n a = grouped_df.sort_values(\"recovered\", ascending=False)[\"recovered\"][:10]\n print(a.to_markdown())", "def top_products(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['product_name']).agg({'total_payment': 'sum'})\n data_set = data_set.nlargest(10, 'total_payment')\n return data_set", "def top_n_rows(dataframe, rownumber):\n df = dataframe.head(rownumber)\n return df", "def get_top_k_items(\n dataframe, col_user=DEFAULT_USER_COL, col_rating=DEFAULT_RATING_COL, k=DEFAULT_K\n):\n # Sort dataframe by col_user and (top k) col_rating\n top_k_items = (\n dataframe.groupby(col_user, as_index=False)\n .apply(lambda x: x.nlargest(k, col_rating))\n .reset_index(drop=True)\n )\n # Add ranks\n top_k_items[\"rank\"] = top_k_items.groupby(col_user, sort=False).cumcount() + 1\n return top_k_items", "def top_groups():\n groups = Group.objects.filter(country='PT').order_by('-members')[:10]\n df = pd.DataFrame.from_records(groups.values())\n return df", "def filter_rows_by_max_abs_val(df, max_=MAX_NUM_ROWS):\n df_temp = df.abs()\n top_rows = df_temp.max(axis=1).nlargest(max_)\n return df.ix[top_rows.index]", "def topCountries(top=10):\r\n #top 10 deadly countries\r\n countries = agg('country')[:top].index\r\n #grab aggregated data for these countries\r\n dataOfTop10 = agg(['year','country']).query(\"country in @countries\")### interesting...\r\n #unstack data\r\n dataOfTop10 = dataOfTop10.unstack(1)\r\n #remove multiindexes\r\n dataOfTop10 = dataOfTop10.transpose().reset_index(level=0, drop=True).transpose()\r\n #sort by year\r\n dataOfTop10.sort_index(inplace=True)\r\n return dataOfTop10", "def selectTop(dfProcess,period,periodNumber,ranking):\n #creating new columns\n dfProcess['timeCreated'] = pd.to_datetime(dfProcess['timeCreated'], unit='s')\n if period == 'week':\n dfProcess['weekNumber'] = dfProcess['timeCreated'].dt.week\n dfProcess = dfProcess[dfProcess.weekNumber == periodNumber]\n elif period == 'month':\n dfProcess['monthNumber'] = dfProcess['timeCreated'].dt.month\n dfProcess = dfProcess[dfProcess.monthNumber == periodNumber]\n else:\n print(\"Period parameter is unknown\")\n #select useful columns\n columns_name = ['id','commentCount','likeCount','playCount','shareCount']\n dfProcess = dfProcess[columns_name]\n #calculating score\n dfProcess = dfProcess.apply(lambda x: x/x.max() if x.name in columns_name[1:] else x) #normalisation\n if ranking == 'trending':\n score = (35/100 * dfProcess['likeCount'] + 20/100*dfProcess['playCount'] + 35/100* dfProcess['shareCount']\n + 10/100*dfProcess['commentCount'])*100\n elif ranking == 'share':\n score = dfProcess['shareCount']\n elif ranking == 'like':\n score = dfProcess['likeCount']\n elif ranking == 'view':\n score = dfProcess['playCount']\n else:\n print('ranking unknown')\n score = dfProcess['playCount']\n dfProcess['score'] = score\n #selecting top\n dfProcess = dfProcess.sort_values('score',ascending=False)\n dfProcess = dfProcess.head(50)\n return dfProcess", "def __get_top(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productid', keep=\"first\")\n print(result)\n result = result[:top].sort_values(by=\"final_score\", ascending=False).productid\n\n return list(result)", "def filter_rows_by_highest_abs_val_mean(df, max_=MAX_NUM_ROWS):\n top_rows = numpy.abs(df.mean(axis=1)).nlargest(max_)\n return df.ix[top_rows.index]", "def top_five_customers(data_frame):\n data_set = data_frame.groupby(['customer_id']).agg({'quantity_rented': 'sum'})\n data_set = data_set.nlargest(5, 'quantity_rented')\n return data_set", "def get_popularity_based_topk(self, top_k=10, sort_top_k=False):\n\n test_scores = np.array([self.item_frequencies])\n\n logger.info('Getting top K')\n top_items, top_scores = get_top_k_scored_items(\n scores=test_scores, top_k=top_k, sort_top_k=sort_top_k\n )\n\n return pd.DataFrame(\n {\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )", "def closest_row(dataframe, column, value):\n sort = dataframe.iloc[(dataframe[column]-value).abs().argsort()[:1]]\n return sort", "def __get_top_with_detail(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productId', keep=\"first\")[\n :top]\n\n return result", "def get_top_most_successfuls(df, field, top=10, click_thld=30):\n df_clk = get_field_click_success_df(df, field)\n # Keep only values above threshold\n df_clk = df_clk[df_clk.clicked > click_thld]\n if len(df_clk) < 1:\n print '\\nError: not enough clicks to satisfy threshold conditions\\n'\n return None\n top_successful = df_clk.index[:top].tolist()\n\n # Get all the results statistically compatible with the top results\n df_clk['succ_minus_std'] = df_clk['success'] - df_clk['success_std']\n df_clk['succ_plus_std'] = df_clk['success'] + df_clk['success_std']\n lowest_compatible = df_clk[:top]['succ_minus_std'].min()\n stat_compatible = df_clk[df_clk['succ_plus_std'] >= lowest_compatible].index.tolist()\n # Exclude elements in the top list\n stat_compatible = [x for x in stat_compatible if x not in top_successful]\n return {'top_successful': top_successful, 'stat_compatible': stat_compatible}", "def top(self):", "def get_item_based_topk(self, items, top_k=10, sort_top_k=False):\n\n # convert item ids to indices\n item_ids = items[self.col_item].map(self.item2index)\n\n # if no ratings were provided assume they are all 1\n if self.col_rating in items.columns:\n ratings = items[self.col_rating]\n else:\n ratings = pd.Series(np.ones_like(item_ids))\n\n # create local map of user ids\n if self.col_user in items.columns:\n test_users = items[self.col_user]\n user2index = {x[1]: x[0] for x in enumerate(items[self.col_user].unique())}\n user_ids = test_users.map(user2index)\n else:\n # if no user column exists assume all entries are for a single user\n test_users = pd.Series(np.zeros_like(item_ids))\n user_ids = test_users\n n_users = user_ids.drop_duplicates().shape[0]\n\n # generate pseudo user affinity using seed items\n pseudo_affinity = sparse.coo_matrix(\n (ratings, (user_ids, item_ids)), shape=(n_users, self.n_items)\n ).tocsr()\n\n # calculate raw scores with a matrix multiplication\n test_scores = pseudo_affinity.dot(self.item_similarity)\n\n # remove items in the seed set so recommended items are novel\n test_scores[user_ids, item_ids] = -np.inf\n\n top_items, top_scores = get_top_k_scored_items(scores=test_scores, top_k=top_k, sort_top_k=sort_top_k)\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(test_users.drop_duplicates().values, top_items.shape[1]),\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()", "def top_indices(preds, num):\n sort_preds = np.sort(preds, 1)\n sort_preds = np.flip(sort_preds)\n sort_index = np.argsort(preds, 1)\n sort_index = np.flip(sort_index)\n\n print(f\"Top {num} results:\")\n for i in range(num):\n print(sort_index[0][i], sort_preds[0][i])\n\n return 0", "def get_tops(similarities, k):\n tops = similarities.argsort(axis=1)[:, :k].tolist()\n return tops", "def _sort_rows(matrix, num_rows):\n tmatrix = array_ops.transpose(matrix, [1, 0])\n sorted_tmatrix = nn_ops.top_k(tmatrix, num_rows)[0]\n return array_ops.transpose(sorted_tmatrix, [1, 0])", "def sort_processes(ps_df: pd.DataFrame, user_sort: list) -> pd.DataFrame:\n result_df = ps_df.sort_values(by=user_sort, ascending=True)\n return result_df", "def top_k(m, k):\n ml = m.tolil()\n ms = [_top_k(d, r, k) for d, r in zip(ml.data, ml.rows)]\n return zip(*ms)", "def best_validation_rows(log_df, valid_col='valid_accuracy', second_criterion='iterations_done'):\n return log_df.sort_values([valid_col,second_criterion],ascending=False).drop_duplicates(['log'])", "def get_top10(dataset, contrib_type):\n return dataset.order_by('-{0}'.format(contrib_type))[:10]", "def top_ten_customers(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['customer_id']).agg({'total_payment': 'sum'})\n data_set = data_set.nlargest(10, 'total_payment')\n return data_set", "def filter_rows_by_variance(df, max_=MAX_NUM_ROWS):\n top_rows = df.var(axis=1).nlargest(max_)\n return df.ix[top_rows.index]", "def row_major_sort(dataframe):\n t1_column = dataframe.columns.get_loc(\"t1\")\n winding_column = dataframe.columns.get_loc(\"winding\")\n t_array = np.flip(dataframe.iloc[:,t1_column:winding_column].values, axis=1)\n n_ts = t_array.shape[1]\n for column in range(n_ts):\n sorted_args = np.argsort(t_array[:,column], kind = \"mergesort\")\n dataframe = dataframe.iloc[sorted_args,:]\n t_array = t_array[sorted_args]\n return dataframe", "def get_user_top_choices( self, user_id, n = 5 ):\t\n\t\tuser_df = ( self.df[ self.df['user_id'] == user_id ][[ 'business_id', 'stars' ]]\n\t\t\t\t\t.sort_values( ['stars'], ascending = False )\n\t\t\t\t\t.head(n) )\n\t\treturn user_df" ]
[ "0.68262124", "0.6588031", "0.6443308", "0.6321405", "0.6281697", "0.62319", "0.6184617", "0.6135005", "0.6129716", "0.6061375", "0.6024057", "0.60073507", "0.59771293", "0.59485084", "0.5929012", "0.5920936", "0.5898448", "0.5891113", "0.5877641", "0.5877335", "0.5831138", "0.5824702", "0.582148", "0.5821212", "0.5770579", "0.5757015", "0.57282275", "0.5690332", "0.5683321", "0.56796277" ]
0.6937662
0
1. parse the json object and extract name, headline, prepTime, ratingsCount, favoritesCount, nutrition and export to a csv file 2. retrieve top 10 recipes based on ratingsCount, favoritesCount and export to a csv file
def read_recipes(year, week): # read config file cp = ConfigParser() cp.read("config.ini") # load menu data fname_json = cp["other"]["json_out_fname"] if not os.path.exists(fname_json): logging.error("JSON file not found.") return with open(fname_json) as f: menu = json.load(f) # read recipes: items >> [courses] >> [recipes] recipes = [] for item in menu["items"]: for course in item["courses"]: recipes.append(course["recipe"]) logging.info("%d recipes found", len(recipes)) data = [] for recipe in recipes: recipe_data = [] recipe_data.append(recipe["name"]) recipe_data.append(recipe["headline"]) recipe_data.append(recipe["prepTime"]) recipe_data.append(recipe["ratingsCount"]) recipe_data.append(recipe["favoritesCount"]) # nutritions for i in range(7): recipe_data.append(recipe["nutrition"][i]["amount"]) data.append(recipe_data) column_names = ["Name","Headline","PrepTime","RatingsCount","FavoritesCount","Nutrition-Energy(KJ)","Nutrition-Fat", "Nutrition-of which saturates","Nutrition-Carbohydrate","Nutrition-of which sugars","Nutrition-Protein","Nutrition-Sodium"] df_recipes = pd.DataFrame(data, columns = column_names) # save recipe data into csv fname_csv = str(year) + "_" + str(week) + "_menu.csv" df_recipes.to_csv(fname_csv, index=False) logging.info("recipes exported to csv.") # extract top 10 recipes based on RatingsCount and FavoritesCount params = {"names": ["RatingsCount","FavoritesCount"], "order": [False,False] } df_top_recipes = get_top_recipes(df_recipes, sort_params=params, count=10) # save top 10 recipes into csv fname_out = str(year) + "_" + str(week) + "_TOP_10.csv" df_top_recipes.to_csv(fname_out, index=False) logging.info("top 10 recipes exported to csv.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTopMovies(endpoint, date, count=10):\n\n try:\n response = urlreq.urlopen(endpoint.format(date))\n soup = BeautifulSoup(response.read(), \"html.parser\")\n table = soup.find('table', border=\"0\", cellpadding=\"5\", cellspacing=\"1\")\n tdata = []\n\n for i, row in enumerate(table.find_all('tr')[1:], start=1):\n if i > count:\n break\n\n cells = row.find_all('td')\n tdict = {}\n\n tdict['rank'] = i\n tdict['title'] = cells[2].text.strip()\n tdict['daily_gross'] = int(re.sub(r'[^\\d]', '', cells[4].text))\n tdict['theaters'] = int(re.sub(r'[^\\d]', '', cells[7].text))\n tdict['todate_gross'] = int(re.sub(r'[^\\d]', '', cells[9].text))\n tdict['release_day'] = int(cells[10].text)\n\n tdata.append(tdict)\n\n tdata = pd.DataFrame(tdata)\n tdata['gross_date'] = date\n return tdata\n\n except urlerr.URLError as err:\n print(\"\\nThere was an error retrieving daily revenue information\")\n print(err)\n return None\n except Exception:\n print(\"\\nThere's something wrong with the BOMojo daily revenue page\")\n return None", "def fetch_per_category(n, path=os.path.join('data', 'yelp_academic_dataset_review.json.zip')):\n\n subsample = []\n counts = {}\n\n # Read zipped JSON\n with zipfile.ZipFile(path, 'r') as z:\n for filename in z.namelist():\n with z.open(filename) as f:\n\n # Iterate over the reviews\n for line in f:\n review = json.loads(line.decode('utf-8'))\n\n # Collect records and update the count\n if review['stars'] not in counts:\n subsample.append(review)\n counts[review['stars']] = 1\n elif counts[review['stars']] < n:\n subsample.append(json.loads(line.decode('utf-8')))\n counts[review['stars']] += 1\n\n # Break when n records are gathered for all star ratings\n if all(c == n for c in counts.values()) == n:\n break\n\n return subsample", "def process(document):\n df = document\n df['freshness'] = df.iloc[:, 0].str.split(' - ').str.get(0)\n df['review'] = df.iloc[:, 0].str.split(' - ').str.get(1)\n df = df.loc[df['review'].str.len() >= 18]\n df = df.loc[:, ['freshness', 'review']]\n\n num_to_keep = (df.shape[0] - df.freshness.astype(np.int32).sum()) // 10_000 * 10_000\n rotten = df.loc[df.freshness == '0'].sample(num_to_keep)\n fresh = df.loc[df.freshness == '1'].sample(num_to_keep)\n\n df = pd.concat([rotten, fresh], axis=0, sort=False)\n df = df.sample(frac=1).reset_index(drop=True)\n\n df.to_csv('all_rotten_tomatoes_reviews.csv', index=False)\n\n print('\\nThe web scraper has finished.',\n '\\nCheck your directory: {}'.format(os.getcwd()),\n '\\nThe file with all reviews is named: all_rotten_tomatoes_reviews.csv')\n return df", "def top_artists_from_API(api_results):\r\n df = pd.DataFrame(api_results[\"items\"])\r\n cols = [\"name\",\"id\",\"genres\",\"popularity\",\"uri\"]\r\n return df[cols]", "def top_ten(subreddit):\n import requests\n\n url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)\n\n r = requests.get(url)\n json_content = r.json()\n\n if (r.status_code == 200):\n count = 0\n for post in json_content['data']['children']:\n print(post['data']['title'])\n count += 1\n if count == 10:\n break\n else:\n print(None)", "def top_ten(subreddit):\n url = \"https://www.reddit.com/r/\" + subreddit + \"/hot.json?limit=10\"\n identify = {\"User-Agent\": \"Requests library from Python\",\n \"From\": \"[email protected]\"}\n to_print = []\n hot = requests.get(url, headers=identify, allow_redirects=False)\n if hot.status_code == 404:\n print(\"None\")\n return 0\n if hot.status_code == 200:\n hot = hot.json()\n hot = hot[\"data\"]\n hot = hot[\"children\"]\n for items in hot:\n del items[\"kind\"]\n for data in hot:\n to_print.append(data[\"data\"])\n hot = to_print\n to_print = []\n for dictio in hot:\n to_print.append(dictio[\"title\"])\n for itera in to_print:\n print(itera)", "def top_ten(subreddit):\n\n if subreddit is None or not isinstance(subreddit, str):\n print(\"None\")\n\n user_agent = {'User-agent': 'Google Chrome Version 81.0.4044.129'}\n params = {'limit': 10}\n url = 'https://www.reddit.com/r/{}/hot/.json'.format(subreddit)\n\n response = get(url, headers=user_agent, params=params)\n all_data = response.json()\n\n try:\n raw1 = all_data.get('data').get('children')\n\n for i in raw1:\n print(i.get('data').get('title'))\n\n except:\n print(\"None\")", "def parse(self, response):\r\n recipes = json.loads(response.text)['response']['results']\r\n # test json data\r\n # fp = open(\"./food.json\", \"w\", encoding=\"utf-8\")\r\n # json.dump(recipes, fp=fp, ensure_ascii=False)\r\n for recipe in recipes:\r\n if recipe['record_type'] == 'Recipe':\r\n item = RecipespidersItem()\r\n\r\n self.recipe_count += 1\r\n item['id'] = self.recipe_count\r\n item['name'] = recipe['main_title']\r\n item['description'] = recipe['main_description']\r\n\r\n item['rating_num'] = int(recipe['main_num_ratings'])\r\n item['rating_star'] = int(recipe['main_rating_mapping'])\r\n item['rating_score'] = float(recipe['main_rating'])\r\n\r\n item['total_time'] = int(recipe['recipe_totaltime'])\r\n\r\n if recipe.get('recipe_photo_url') is None:\r\n continue\r\n else:\r\n item['photo_url'] = recipe['recipe_photo_url']\r\n\r\n item['record_url'] = recipe['record_url']\r\n\r\n yield scrapy.Request(url=recipe['record_url'], callback=self.parse_detail, meta={'item': item})\r\n\r\n # process remaining pages\r\n if self.page_num <= 21000:\r\n print(self.page_num)\r\n new_url = format(self.base_url % self.page_num)\r\n self.page_num += 1\r\n\r\n yield scrapy.Request(url=new_url, callback=self.parse)", "def populate(json_response: list) -> list:\n output = []\n for movie in json_response:\n imdb_id = movie[\"imdbID\"]\n output.append(dict(\n title = movie[\"Title\"],\n imdbID = imdb_id,\n rottenTomatoesPercentage = rotten_rate_fetcher(imdb_id)\n ))\n return output", "def scrapeSpotify():\n # Set Spotify authentication token \n token = util.prompt_for_user_token(username, scope, clientid, clientsecret, redirecturi)\n \n if token: # Authenticate with Spotify\n # Store dictionary of scraped values from scraping function\n if debugging == True:\n cities = DataCollection.test() # DEBUGGING ONLY\n #cities = DataCollection.scrape_spotify_info(limiting, limit_cities) \n #return jsonify(cities)\n else:\n cities = DataCollection.scrape_spotify_info(limiting, limit_cities) # THE REAL THING\n\n # Loop through all cities in dataset\n i = 0\n for city in cities:\n # Exit out of for loop at 2 if we are limiting city loop iterations\n if limiting == True and i == limit_cities:\n break \n #\n # Begin Spotify analysis (e.g., determine popularity for each artist in city list, top track)\n #\n sp = spotipy.Spotify(auth=token)\n\n # Loop through the top artists for this city, and determine the popularity values\n i = 0\n top_artists = []\n artist_names = []\n for top_artist in city[\"top_artists\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting artist iterations\n if limiting == True and i == limit_artists:\n break\n\n i += 1\n # *** Example artist value in dictionary ***\n # { 'artist': 'Bobby Pulido',\n # 'tracks': ['spotify:track:1tg7ZzCAkjDNENdWL7WuIr',\n # 'spotify:track:2JJSGhPpATm8lXeYjD95fw',\n # 'spotify:track:5iuGn3RXvfvHIyIe8fyxBE'\n # ],\n # 'popularity': 99 <--------- *** BEING ADDED ***\n # }\n # Get info about the first artist track\n urn = top_artist[\"tracks\"][0]\n track = sp.track(urn)\n\n # Get the artist's Spotify URI & name\n artist_uri = track['artists'][0]['uri']\n artist_name = track['artists'][0]['name']\n\n # Set the artist name to the first artist attributed to the song\n top_artist[\"artist\"] = artist_name\n\n # Get the artist popularity, and add it to their 'top_artist' item\n artist_info = sp.artist(artist_uri)\n artist_popularity = artist_info[\"popularity\"]\n top_artist[\"popularity\"] = artist_popularity\n\n # Get the artist genres, and add it to their 'top_artist' item\n artist_genres = artist_info[\"genres\"]\n top_artist[\"genres\"] = artist_genres\n\n # If not already added, append updated top_artist object to master collection\n if artist_name not in artist_names:\n top_artists.append(top_artist)\n \n # Track current artists in flat list to avoid duplicates\n artist_names.append(artist_name) \n\n # Sort 'top_artists' by popularity in descending order, update the field in the city object\n top_artists.sort(key=lambda x: x[\"popularity\"], reverse=True)\n city[\"top_artists\"] = top_artists\n\n # Artist & song popularity logic:\n # Build 'top_5_artists' list: grab top 5 (by popularity) from 'top_artists' \n top_10_artists = []\n i_art = 0\n for art in top_artists:\n if i_art < 10:\n top_10_artists.append(art[\"artist\"])\n \n i_art += 1\n \n # Update 'top_5_artists' field in the city object\n city[\"top_5_artists\"] = top_10_artists[:5]\n\n # Loop through all tracks for this city, and create a new list of objects with the track popularity\n # BEFORE: [trk1, trk2, trk3, ...]\n # AFTER: [\n # {'track': trk1, 'popularity': pop1, 'name': 'El Baile de Gorila', 'artist': 'Mossino'}\n # {'track': trk2, 'popularity': pop2}\n # ...\n # ] \n i = 0\n tracks = []\n highest_popularity = 0\n most_popular_track = \"\"\n for trk in city[\"track_ids\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting track iterations\n if limiting == True and i == limit_tracks:\n break\n\n i += 1\n # Get Spotify track metadata \n track = sp.track(trk)\n \n # Get the track name, artist, and popularity -- and add it to the object\n current_track_name = track['name']\n current_track_artist = track['artists'][0]['name']\n current_track_popularity = track['popularity']\n track_info = { \n \"track\": trk, \n \"popularity\": current_track_popularity,\n \"artist\": current_track_artist,\n \"name\": current_track_name\n }\n \n # Append updated object to track_ids array\n tracks.append(track_info)\n\n # For the top 10 artists, determine the song with the highest popularity\n if current_track_artist in top_10_artists:\n # Determine most popular track\n if highest_popularity < current_track_popularity:\n most_popular_track = trk\n highest_popularity = current_track_popularity\n most_popular_artist = current_track_artist\n most_popular_track_name = current_track_name \n \n #print(\"most popular track: \" + most_popular_track)\n #print(\"highest popularity: \" + str(highest_popularity))\n #print(\"current track: \" + trk )\n \n # Update current city value with updated 'tracks' array info\n city[\"track_ids\"] = tracks\n\n # Update current city's 'top_track' field with the most popular track info\n mostpopular_track_info = { \n \"track\": most_popular_track, \n \"popularity\": highest_popularity,\n \"artist\": most_popular_artist,\n \"name\": most_popular_track_name\n }\n city[\"top_track\"] = mostpopular_track_info\n\n if debugging == True:\n # **** Print out resulting object (TESTING ONLY) ****\n pprint.pprint(city)\n else:\n # **** Insert the current city record into the MongoDB collection ****\n db = connectToMongo()\n db.Cities.update( { \"city\": city[\"city\"] }, \n city,\n upsert=True\n )\n \n # Iterate counter\n i += 1\n else: \n print(\"Connection to Spotify API failed - token invalid.\")\n\n return getJSON(wrapGeoJSON(cities))", "def get_popularity(rest_data, item_dict):\n max_review_count = rest_data.review_count.max()\n min_review_count = rest_data.review_count.min()\n result = np.zeros((len(rest_data), 2))\n for i in range(len(rest_data)):\n result[i, 0] = item_dict[rest_data.business_id[i]]\n result[i, 1] = (((rest_data.review_count[i] - min_review_count)/(max_review_count - min_review_count))*4 + 1)\n result = result[result[:, 0].argsort()]\n return result", "def top_ten(subreddit):\n header = {\"User-Agent\": \"Holberton\"}\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(subreddit)\n response = requests.get(url, headers=header, allow_redirects=False)\n if response.status_code == 200:\n\n for item in response.json().get(\"data\", None).get(\"children\", None):\n print(item.get(\"data\", None).get(\"title\", None))\n else:\n print(None)\n return", "def top_ten(subreddit):\n\n user_agent = {'User-agent': 'Mozilla/5.0 (Macintosh; \\\nIntel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) \\\nChrome/39.0.2171.95 Safari/537.36'}\n\n res = requests.get('https://www.reddit.com/r/{}/hot.json?limit=10'.format(\n subreddit), headers=user_agent)\n\n if res.status_code == 404:\n print(None)\n\n else:\n for sub in res.json().get(\"data\").get(\"children\"):\n print(sub.get(\"data\").get(\"title\"))", "def top_ten(subreddit):\n\n limit = \"10\"\n\n url = \"https://www.reddit.com/r/{}/hot.json?limit={}\".format(subreddit,\n limit)\n\n user_agent = {\"User-Agent\": \"Python\"}\n response = requests.get(url, headers=user_agent, allow_redirects=False)\n if response.status_code >= 300:\n print(\"None\")\n else:\n for elem in response.json().get(\"data\").get(\"children\"):\n print(elem.get(\"data\").get(\"title\"))", "def top_ten(subreddit):\n headers = {\"User-Agent\": \"Holberton\"}\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(\n subreddit)\n req = requests.get(url, headers=headers)\n\n if req.status_code != 200:\n print(None)\n return\n redit = req.json().get(\"data\").get(\"children\")\n for chil in redit:\n print(chil.get(\"data\").get(\"title\"))", "def read_tweets(data_path):\n\n json_list = []\n with open(data_path, 'r') as json_file_:\n for line in json_file_:\n json_file = json.dumps(ast.literal_eval(line))\n json_list += json_file,\n \n header = ['tweet_id', 'tweet', 'date', 'lang_twitter', 'retweeted', 'user_id']\n required_cols = itemgetter(*header)\n\n #with open(data_path) as f_input, open('out/'+data_path[:-4]+'.csv', 'w', newline='') as f_output:\n output = data_path.split(\"/\")[-1]\n output = 'out/{}.csv'.format(output[:-4])\n with open(output, 'w', newline='') as f_output:\n csv_output = csv.writer(f_output)\n csv_output.writerow(header)\n for row in json_list:\n if row.strip():\n tweet = json.loads(row)\n tweet['tweet_id'] = tweet['id_str']\n tweet['tweet'] = tweet['extended_tweet']['full_text'] if (\"extended_tweet\" in tweet or \"full_text\" in tweet) and bool(tweet[\"truncated\"]) else tweet['text']\n tweet['date'] = tweet['created_at']\n tweet['lang_twitter'] = tweet['lang']\n tweet['user_id'] = tweet['user']['id_str']\n csv_output.writerow(required_cols(tweet))\n \n return True", "def top_ten(subreddit):\n url = 'https://api.reddit.com/r/{}/hot.json'.format(subreddit)\n header = {'User-agent': 'your bot 0.1'}\n subred = requests.get(url, headers=header, allow_redirects=False)\n\n if subred.status_code != 200:\n print(None)\n return None\n\n try:\n theme = subred.json()\n except:\n print(\"Not a valid JSON\")\n return 0\n\n try:\n maindata = theme.get(\"data\")\n children = maindata.get(\"children\")\n for child in children[:10]:\n data = child.get(\"data\")\n print(data.get(\"title\"))\n except:\n return None", "def parse_json( file ):\n \n reviews = []\n ratings = []\n count = 0\n words = 0\n with open( file, 'r' ) as f:\n start = time.time()\n for line in f.readlines():\n j = json.loads( line )\n words += len( j[ 'reviewText' ].split( ' ' ) )\n reviews.append( j[ 'reviewText' ] )\n ratings.append( int( j[ 'overall' ] ) )\n count += 1\n if count % 10000 == 0:\n sys.stdout.write( '\\r processed: {}/{} reviews in {}s'.format( count, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\r processed: {}/{} reviews in {}s\\n'.format( count, NO_REVIEWS, time.time() - start ) )\n\n print( 'total number of words:', words )\n print( 'avg words/sample:', words / NO_REVIEWS )\n\n return reviews, ratings", "def process_weather(forecast_file):\n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n\n min_temp_store = {}\n max_temp_store = {}\n weather_results = str()\n header_results = str()\n\n for day_in_forecast in json_data['DailyForecasts']:\n day_date = day_in_forecast['Date']\n min_temp = day_in_forecast['Temperature']['Minimum'][\"Value\"]\n min_temp_c = convert_f_to_c(min_temp)\n min_temp_store[day_date] = min_temp_c\n max_temp = day_in_forecast['Temperature']['Maximum'][\"Value\"]\n max_temp_c = convert_f_to_c(max_temp)\n max_temp_store[day_date] = max_temp_c\n\n day_time_phrase = day_in_forecast['Day']['LongPhrase']\n rain_chance_day = day_in_forecast['Day']['RainProbability']\n night_time_phrase = day_in_forecast['Night']['LongPhrase']\n rain_chance_night = day_in_forecast['Night']['RainProbability']\n weather_results = weather_results + (f\"-------- {convert_date(day_date)} --------\\nMinimum Temperature: {format_temperature(round(min_temp_c,1))}\\nMaximum Temperature: {format_temperature(round(max_temp_c,1))}\\nDaytime: {day_time_phrase}\\n Chance of rain: {rain_chance_day}%\\nNighttime: {night_time_phrase}\\n Chance of rain: {rain_chance_night}%\\n\")+ \"\\n\"\n\n\n max_day = max(max_temp_store, key=max_temp_store.get)\n max_value = max_temp_store[max_day]\n min_day = min(min_temp_store, key=min_temp_store.get)\n min_value = min_temp_store[min_day]\n max_totals = (sum(max_temp_store.values()))\n min_totals = (sum(min_temp_store.values()))\n num_items = len(min_temp_store)\n mean_min = round(calculate_mean(min_totals,num_items),1)\n mean_max = round(calculate_mean(max_totals,num_items),1)\n\n save_header = (f\"{len(json_data['DailyForecasts'])} Day Overview\\n The lowest temperature will be {format_temperature(round((min_value),1))}, and will occur on {convert_date(min_day)}.\\n The highest temperature will be {format_temperature(round((max_value),1))}, and will occur on {convert_date(max_day)}.\\n The average low this week is {format_temperature(mean_min)}.\\n The average high this week is {format_temperature(mean_max)}.\\n\")\n\n header_results = save_header + \"\\n\"+ weather_results\n \n return(header_results)", "def get_popularHashtags_unitedairlines():\n fields = ['hashtags'] # files to read\n # get any desird day as you want\n date_Day = ['04', '05', '06', '07', '08', '09', '10', '11', '12', '13']\n #\n popularity_cols=[[] for i in range(10)]\n dateCol=[]\n for date_DD in xrange(10):\n csv_input = pd.read_csv('United_Airlines_'+ \"2017-04-\" + \n date_Day[date_DD] + '.csv', skipinitialspace=True, usecols=fields)\n dateCol.append(\"2017-04-\" + date_Day[date_DD])\n print 'processing file: '+ 'United_Airlines_'+ \"2017-04-\" + date_Day[date_DD] + '.csv'\n \n # add all hashtags of all tweets from all files to list\n hashtags = []\n for htext in csv_input.hashtags:\n hstr = htext.translate(None, string.punctuation)\n for hashtag in hstr.split():\n hashtags.append(hashtag)\n \n c=Counter(hashtags)\n\n # take most popular 10 per day\n for i in range(10):\n popularity_cols[i].append(c.most_common(10)[i])\n\n # add dates\n popularity_cols.insert(0,dateCol)\n # headers\n headers=['date']\n for i in range(9,-1,-1):\n headers.append(i)\n \n # to dataframe and csv\n df = pd.DataFrame(popularity_cols)\n df=df.transpose()\n df.columns = headers\n\n df.to_csv('United_Airlines_Popular_Hashtags_'+ \"2017-04-\" + \n date_Day[0] +\"_to_2017-04-\"+date_Day[len(date_Day)-1] + '.csv', index=False)", "def top_ten(subreddit):\n h = requests.utils.default_headers()\n h.update({'User-Agent': 'My User Agent 1.0'})\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(subreddit)\n r = requests.get(url, headers=h).json()\n result = r.get('data', {}).get('children', [])\n if not result:\n print(None)\n for i in result:\n print(i.get('data').get('title'))", "def main() -> None:\r\n\r\n with open('main/NLP/LDA/IHE_RESULTS/scopus_prediction_results.json', 'r') as f:\r\n results = json.load(f)\r\n\r\n lst = {}\r\n cols = []\r\n for i in range(20):\r\n lst[str(i)] = []\r\n cols.append(str(i))\r\n\r\n for i in range(20):\r\n for doi, vals in results.items():\r\n if vals[str(i)] >= THRESHOLD:\r\n lst[str(i)].append(doi)\r\n\r\n generate_csv(lst, cols, \"main/NLP/LDA/IHE_RESULTS/pub_analyse_20.csv\")", "def get_reddit_data(target_items, total_threads):\n if not os.path.exists('refresh_token.txt'):\n f = open('refresh_token.txt', 'x')\n f.close()\n print('Please fill in required information in \\'refresh_token.txt\\'')\n sys.exit()\n if not os.path.exists('bot.txt'):\n f2 = open('bot.txt', 'x')\n f2.close()\n print('Please fill in required information in \\'bot.txt\\'')\n sys.exit()\n\n # Authenticate to Reddit\n refresh_token_manager = FileTokenManager('refresh_token.txt') # Refer to praw documentation for obtaining a refresh token from reddit here: https://praw.readthedocs.io/en/latest/getting_started/authentication.html\n reddit = praw.Reddit(token_manager=refresh_token_manager, user_agent=open('bot.txt', 'r').read()) # Get bot token\n\n # Scrape Reddit data\n posts = []\n target_reddit = reddit.subreddit(target_reddit_str)\n for post in target_reddit.hot(limit=total_threads): # Search from top posts in 'hot' category in specified subreddit, limit based on user specification.\n posts.append(\n [post.title,\n post.score,\n post.num_comments,\n post.url,\n post.id,\n post.created])\n posts = pd.DataFrame(posts, columns=['title', 'score', 'num_comments', 'url', 'id', 'created']) # Build a pandas dataframe\n # Parse useful stuff in the dataframe\n\n # Type of product\n titles = []\n for i in range(posts.shape[0]): # df.shape[0] = number of rows\n titles.append(posts.at[i, 'title'])\n part_type = []\n for i in range(len(titles)): # Get only the part types from title of post\n name = titles[i]\n index = -1\n for j in range(len(name)):\n if name[j] == '[' and index > -2:\n index = j+1\n elif name[j] == ']' and index > -1:\n part_type.append(name[index:j].lower())\n index = -2 # Prevents string from getting screwed up while parsing extra ]\n if index == -1:\n part_type.append('')\n # Certain part types require additional parsing for formatting. Ex. 'm.2 ssd' can be parsed to just 'ssd'.\n for i in range(len(part_type)):\n for j in range(len(formatted_strings)):\n if formatted_strings[j] in part_type[i]:\n part_type[i] = formatted_strings[j]\n\n # Certain part types aren't always labelled correctly. Go through terms and set them to term[0] (see redefined_terms definition for more info)\n for i in range(len(part_type)):\n for j in range(len(redefined_terms)):\n for k in range(len(redefined_terms[j])):\n if redefined_terms[j][k] in part_type[i]:\n part_type[i] = redefined_terms[j][0]\n\n posts['part_type'] = part_type # add part types to dataframe\n\n # Price range\n prices = []\n found = False\n for i in range(len(titles)):\n skip_rest = False\n for j in range(len(titles[i])):\n if titles[i][j] == '$' and not skip_rest:\n found = True\n skip_rest = True\n prices.append(titles[i][j:])\n if not found:\n prices.append('')\n\n posts['prices'] = prices # add prices to dataframe\n # posts = posts[2:] # remove the top posts on the subreddit pinned by moderators\n # posts.to_csv('posts.csv')\n\n # Get target products\n target_nums = []\n for i in range(len(part_type)):\n for j in range(len(target_items)):\n if part_type[i] == target_items[j]:\n target_nums.append(i)\n # print(target_nums)\n # Make a new dataframe with just target products\n targets = pd.DataFrame(columns=['title', 'score', 'num_comments', 'url', 'id', 'part_type', 'prices'])\n if len(target_nums) > 0:\n for i in range(len(target_nums)):\n targets.loc[posts.index[target_nums[i]]] = posts.iloc[target_nums[i]] # Copy everything with target numbers over to new dataframe\n # Change indexing of new dataframe to be 0-n\n size = targets.shape[0]\n indicies = [i for i in range(size)]\n targets['index'] = indicies\n targets.set_index('index', inplace=True)\n posts = posts[2:] # remove the top posts on the subreddit pinned by moderators\n else:\n sys.exit() # No products to show\n\n # Get urls to original posts\n\n post_urls = []\n # print(targets.shape[0])\n # print(targets)\n for i in range(targets.shape[0]):\n # post_urls.append(targets.at[i, 'id'])\n post_urls.append('https://www.reddit.com/r/' + target_reddit_str + '/comments/' + targets.at[i, 'id'] + '/')\n targets['post_url'] = post_urls # add post urls to dataframe\n\n # Calculate the SAVR score. Function of 1000 + comment_weight * comments + upvote_weight * upvotes\n scores = []\n for i in range(targets.shape[0]):\n scores.append(math.floor(1000 + targets.at[i, 'score'] * savr_score_upvote_weight + targets.at[i, 'num_comments'] * savr_score_comment_weight))\n targets['scores'] = scores\n targets = targets.sort_values(by=['scores'], ascending=False) # Sort the dataframe by scores determined by the program.\n # targets.to_csv('targets.csv')\n return targets", "def top_ten(subreddit):\n url = \"https://api.reddit.com/r/{}/hot?limit=10\".format(subreddit)\n response = requests.get(url, headers={\"User-Agent\": \"Python3\"})\n if str(response) != \"<Response [200]>\": # response.status_code != 200\n print(None)\n return\n response = response.json()\n child = response[\"data\"][\"children\"]\n for tittle in child:\n print(tittle[\"data\"][\"title\"])", "def get_popular_titles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select articles.title, count(*) as views \"\n \"from articles, log \"\n \"where log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by articles.title \"\n \"order by views desc limit 3\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The three most popular articles of all time are:\\n\\n\")\n # for loop to print each article\n for title, views in results:\n text_file.write(\"\\\"\" + title + \"\\\"\" + \" - \" + str(views) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def Pull_Relevant(flist, DateRange, TermList, OutFile):\n\n TweetCount=0\n for Filename in flist:\n Tweetset_Current = \"Start\"\n print(Filename)\n input_file = open(Filename, 'r')\n raw_batch = islice(input_file, None)\n with open(OutFile, 'a') as f: # append the batch, and close file each time.\n for current_line in raw_batch:\n tweet_item = json.loads(current_line)\n if RelevantTweet(tweet_item, TermList, DateRange):\n f.write(json.dumps(tweet_item))\n f.write('\\n')\n TweetCount=TweetCount+1\n return(TweetCount)", "def create_dataset(df, genre_count, image_count, output_fname):\n\n counts = df['Class'].value_counts()\n filter = list(counts[counts > image_count].index)\n\n # filter for genres that appear image_count times or more\n more_than_img_count = (df[df['Class'].isin(filter)])\n topn = more_than_img_count.groupby('Class').head(image_count)\n topn.sort_values(by='Class', inplace=True, ascending=True)\n topn = topn.head(genre_count*image_count)\n\n topn['Path'] = os.getenv('dataset_location') + \"/\" + topn['Painting']\n\n topn.to_csv(os.path.join(os.getenv('dataset_location'), output_fname), sep=';', index=False)", "def get_top_ratings(self, DataType,UserId, item_count):\n users = self.df[DataType-1].select(self.als.getUserCol())\n #temp = self.ratingsdf.select(self.ratingsdf.GameId,self.ratingsdf.Title)\n #temp.show()\n users = users.filter(users.UserId == UserId)\n userSubsetRecs = self.model[DataType-1].recommendForUserSubset(users, item_count)\n userSubsetRecs = userSubsetRecs.withColumn(\"recommendations\", explode(\"recommendations\"))\n userSubsetRecs = userSubsetRecs.select(func.col('recommendations')['GameId'].alias('GameId')).drop('recommendations')\n \n #userSubsetRecs = userSubsetRecs.join(temp, (\"GameId\"), 'inner')\n userSubsetRecs = userSubsetRecs.toPandas()\n userSubsetRecs = userSubsetRecs.to_json()\n return userSubsetRecs", "def top_ten(subreddit):\n\n settings = {'allow_redirects': False, 'headers': {'User-agent': ''}}\n url = \"https://www.reddit.com/r/{}/hot.json\".format(subreddit)\n\n try:\n responses = get(url, **settings).json().get('data').get('children')\n for post in responses[:10]:\n print(post['data']['title'])\n except:\n print(\"None\")", "def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )" ]
[ "0.5909842", "0.57680744", "0.5694507", "0.56098086", "0.5597891", "0.556882", "0.551984", "0.5511607", "0.550861", "0.547752", "0.54691005", "0.5456482", "0.54545885", "0.54326177", "0.54270613", "0.54165226", "0.54148346", "0.5412836", "0.5410096", "0.53659177", "0.5352422", "0.5339919", "0.5326568", "0.5325788", "0.5323614", "0.5309625", "0.530348", "0.5283091", "0.52655303", "0.52638966" ]
0.6317173
0
Check whether the test has passed by comparing its stdout to what is expected.
def check_test(self, test): (stdout, stderr) = (out.decode('ascii').strip() for out in test.process.communicate()) self.assertEqual(stderr, "") self.assertEqual(stdout, EXPCT_RESULTS[test.number], "Test {} failed".format(test.number)) print("Test {} passed".format(test.number))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_stdout(self, expected: str):\n assert self._std_out is not None, f\"You first need to `execute` the program before checking stdout!\"\n self._test.assertEqual(self._std_out.strip(), expected.strip())", "def testStdoutAndStderr(self):\n with self.OutputCapturer():\n print('foo')\n print('bar', file=sys.stderr)\n self.AssertOutputContainsLine('foo')\n self.AssertOutputContainsLine('bar', check_stdout=False, check_stderr=True)", "def check_cot_output(self, expected):\n sys.stdout = StringIO.StringIO()\n output = None\n try:\n self.instance.run()\n except (TypeError, ValueError, SyntaxError, LookupError):\n self.fail(traceback.format_exc())\n finally:\n output = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n self.maxDiff = None\n self.assertMultiLineEqual(expected.strip(), output.strip())", "def test_runSuccess(self):\n builder = BookBuilder()\n self.assertEquals(\n builder.run([\n sys.executable, '-c',\n 'import sys; '\n 'sys.stdout.write(\"hi\\\\n\"); '\n 'sys.stdout.flush(); '\n 'sys.stderr.write(\"bye\\\\n\"); '\n 'sys.stderr.flush()']),\n \"hi\\nbye\\n\")", "def test_capture_stdout():\n\n sys.stdout.write('Print to stdout')\n\n assert False", "def MayPassTest(self):\n session.console.info('Test results for output volume %r: %r',\n self._output_volumes[self._output_volume_index],\n self._test_results[self._output_volume_index])\n if self._test_results[self._output_volume_index]:\n return True\n return False", "def run_test_tool(self, cmd, expected_status=0 ):\n\n status, output = self.target.run(cmd)\n self.assertEqual(status, expected_status, msg='\\n'.join([cmd, output]))", "def test_subprocess_calls(self):\n self.assertTrue(uut.get_stdout(['echo', 'this']) == 'this\\n')\n self.assertTrue(\n uut.get_outstreams(['python', 'test/str_in_stdout_stderr.py']) ==\n [\"In stdout.\\n\", \"In stderr.\\n\", 17])", "def success(self):\n return self.status == 0 and self.stdout", "def Checktest(self, expectedoutput):\n\n if expectedoutput == 0:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"The configuration file does not exist.\", result.output)\n return\n\n if expectedoutput == 1:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Name: Name\", result.output)\n self.assertIn(\"Email: [email protected]\", result.output)\n self.assertIn(\"Github username: GhUser\", result.output)", "def CheckExpectedOutput(output, expected):\n failures = 0\n for ex in expected:\n match = re.search(ex, output)\n if not match:\n print('Test match failed:')\n print('Searching for regex:', ex)\n failures += 1\n if failures:\n print('output:\\n', output)\n return failures", "def test_capture_both():\n\n sys.stdout.write('Print to stdout')\n sys.stderr.write('Print to stderr')\n\n assert False", "def evaluate(self, expected_output, actual_output, command=''):\n\n expected_lines = strip_text(expected_output)\n actual_lines = strip_text(actual_output)\n\n if expected_lines != actual_lines:\n print(f'\\nTest \\'{command}\\' failed.\\nDiff:')\n diff = difflib.Differ().compare(expected_lines, actual_lines)\n print('\\n'.join(diff))\n\n self.fail('Test failed.')", "def out_test(self, func, arg, expect):\n std_out = StringIO()\n sys.stdout = std_out\n func(arg)\n output = std_out.getvalue()\n self.assertEqual(output, expect + '\\n')\n return output", "def verify_output(self, output):\n return output == self.output", "def test_capture_output(capsys):\n print(\"hello world\")\n out, err = capsys.readouterr()\n assert out == \"hello world\\n\"\n assert err == \"\"", "def check_output(self, cmd, nonzero_e = tc.error_e):\n _exitcode, stdoutf, _stderrf = self.run(cmd, nonzero_e = nonzero_e)\n return stdoutf.read()", "def assert_console_output(self, *output, **kwargs):\r\n self.assertEqual(sorted(output), sorted(self.execute_console_task(**kwargs)))", "def assertOutput(self, toExec, argList, expectedStdout=None, \n\t\t\texpectedStderr=\"\", expectedRetcode=0, input=None,\n\t\t\tstdoutStrings=None):\n\t\tfor name in [\"output.stderr\", \"output.stdout\"]:\n\t\t\ttry:\n\t\t\t\tos.unlink(name)\n\t\t\texcept os.error:\n\t\t\t\tpass\n\n\t\tif isinstance(toExec, basestring):\n\t\t\tp = subprocess.Popen([toExec]+argList, executable=toExec, \n\t\t\t\tstdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\telse:\n\t\t\tp = ForkingSubprocess([\"test harness\"]+argList, executable=toExec, \n\t\t\t\tstdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tout, err = p.communicate(input=input)\n\t\tretcode = p.wait()\n\n\t\ttry:\n\t\t\tself.assertEqual(expectedRetcode, retcode)\n\n\t\t\tif isinstance(expectedStderr, basestring):\n\t\t\t\tself.assertEqual(err, expectedStderr)\n\t\t\telse:\n\t\t\t\tself.failUnless(expectedStderr(err))\n\t\texcept AssertionError:\n\t\t\twith open(\"output.stdout\", \"w\") as f:\n\t\t\t\tf.write(out)\n\t\t\twith open(\"output.stderr\", \"w\") as f:\n\t\t\t\tf.write(err)\n\t\t\traise\n\n\t\ttry:\n\t\t\tif isinstance(expectedStdout, basestring):\n\t\t\t\tself.assertEqual(out, expectedStdout)\n\t\t\telif expectedStdout is not None:\n\t\t\t\tself.failUnless(expectedStdout(out))\n\t\t\tif stdoutStrings:\n\t\t\t\tfor s in stdoutStrings:\n\t\t\t\t\tself.failIf(s not in out, \"%s missing\"%s)\n\t\texcept AssertionError:\n\t\t\twith open(\"output.stdout\", \"w\") as f:\n\t\t\t\tf.write(out)\n\t\t\traise", "def test_main_minimal(self, capsys):\n UI.main(**self.args)\n captured = capsys.readouterr().out\n assert self.stdout_output in captured", "def assert_console_output(self, *output, **kwargs):\n self.assertEqual(sorted(output), sorted(self.execute_console_task(**kwargs)))", "def test_execute_or_bail_ok(self):\n with self.assertLogs(level=\"INFO\") as cm:\n with etl.commands.execute_or_bail(\"unittest\"):\n pass\n self.assertEqual(len(cm.output), 1)\n self.assertTrue(\"finished successfully\" in cm.output[0])", "def test_output_interception(self):\n expected_output = 'testing, 1, 2, 3 ..'\n actual_output = capture(['echo', expected_output])\n assert actual_output.strip() == expected_output.strip()", "def print_test_results(func_tested, expected, actual):\r\n\r\n if not callable(func_tested):\r\n raise Exception(\"{} is not a function\".format(func_tested))\r\n\r\n func_name = func_tested.__name__\r\n desc = func_tested.__doc__\r\n\r\n if expected == actual:\r\n print \"PASSED: {}\".format(func_name)\r\n else:\r\n print \"FAILED: {}\".format(func_name)\r\n print \"Expect: {}\".format(expected)\r\n print \"Actual: {}\".format(actual)\r\n print \"Desc: {}\".format(desc)\r\n\r\n print \"\"", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True", "def test_example(self, _, cmd):\n out = subprocess.run(cmd, shell=True)\n self.assertFalse(out.returncode)", "def testStdoutReadDuringCapture(self):\n with self.OutputCapturer():\n print('foo')\n self.AssertOutputContainsLine('foo')\n print('bar')\n self.AssertOutputContainsLine('bar')\n self.AssertOutputContainsLine('foo')\n self.AssertOutputContainsLine('bar')", "def check_for_output_match(output, test_suite):\n\n # requirement for this challenge\n if len(test_suite) != 1:\n raise Exception('ERROR IN TEST SUITE. Number of test cases not equal to 1')\n test_case = test_suite[0]\n\n correct_outputs = test_case['output'].splitlines()\n exe_outputs = output.splitlines()\n\n if len(exe_outputs) != len(correct_outputs):\n return None # the user printed something\n\n for exe_output, correct_output in zip(exe_outputs, correct_outputs):\n # check if exe_output has format \"RESULT: <integer>\"\n prefix = \"RESULT: \"\n if (not exe_output.startswith(prefix)):\n return None # the user printed something\n exe_value = exe_output[len(prefix):]\n try:\n int(exe_value)\n except ValueError:\n return None # the user printed something\n \n if (correct_output != exe_value):\n return [False]\n\n return [True]", "def run_tests(self):\n total_tests = len(self.tests)\n this_test_passed = True\n\n with self.board as board:\n board.repl.session = b\"\"\n\n for test in self.tests:\n # we likely had a REPL reset, so make sure we're\n # past the \"press any key\" prompt.\n board.repl.execute(b\"\\x01\", wait_for_response=True)\n\n this_test_passed = True\n\n self.log.write(f\"Starting test: {test.test_file}\")\n\n test_file_path = os.path.join(test.test_dir, test.test_file)\n test_cmds = []\n\n with open(test_file_path, 'r') as current_test:\n test_cmds = current_test.readlines()\n\n for line_no, line in enumerate(test_cmds, start=1):\n if line == \"\\n\":\n continue\n\n self.log.write(\n \"running line: ({0}) {1}\".format(line_no,\n line.rstrip('\\n'))\n )\n\n try:\n if line_no in test.interactions:\n action = test.interactions[line_no][\"action\"]\n value = test.interactions[line_no][\"value\"]\n #print(f\"ACTION: {action}; VALUE: {value}\")\n if action == \"output\":\n self.log.write(\n f\"- Testing for output of: {value}\"\n )\n\n try:\n result = exec_line(board, line)\n except Exception as exc:\n raise pyboard.CPboardError(exc) from Exception\n\n result = str(result,\n encoding=\"utf-8\").rstrip(\"\\r\\n\")\n if result != value:\n this_test_passed = False\n\n self.log.write(\" - Passed!\")\n\n elif action == \"input\":\n self.log.write(f\"- Sending input: {value}\")\n\n try:\n exec_line(board, line, echo=False)\n exec_line(board, value, input=True)\n except Exception as exc:\n raise pyboard.CPboardError(exc) from Exception\n\n elif action == \"verify\":\n self.log.write(f\"- Verifying with: {value}\")\n\n try:\n # import the referenced module\n module_name, func_name = value.split(\".\")\n imprt_stmt = [\".verifiers.\", module_name]\n verifier = importlib.import_module(\n \"\".join(imprt_stmt),\n package=\"rosiepi.rosie\"\n )\n\n # now get the function object using inspect\n # so that we can dynamically run it.\n ver_func = [\n func[1] for func in\n inspect.getmembers(verifier)\n if func[0] == func_name\n ][0]\n #self.log.write(ver_func)\n\n exec_line(board, line)\n result = ver_func(board)\n if not result:\n raise pyboard.CPboardError(\n f\"'{value}' test failed.\"\n )\n except Exception as exc:\n raise pyboard.CPboardError(exc) from Exception\n\n self.log.write(\" - Passed!\")\n\n else:\n board.repl.execute(line)\n\n except pyboard.CPboardError as line_err:\n this_test_passed = False\n err_args = [str(arg) for arg in line_err.args]\n err_msg = [\n \"Test Failed!\",\n \" - Last code executed: '{}'\".format(line.strip('\\n')),\n f\" - Line: {line_no}\",\n f\" - Exception: {''.join(err_args)}\",\n ]\n self.log.write(\"\\n\".join(err_msg))\n break\n\n if this_test_passed != True:\n break\n\n test.test_result = this_test_passed\n self.tests_run += 1\n test.repl_session = board.repl.session\n #print(board.repl.session)\n self.log.write(\"-\"*60)\n board.repl.reset()\n\n for test in self.tests:\n if test.test_result == None:\n continue\n elif test.test_result == True:\n self.tests_passed += 1\n elif test.test_result == False:\n self.tests_failed += 1\n\n end_msg = [\n f\"Ran {self.tests_run} of {total_tests} tests.\",\n f\"Passed: {self.tests_passed}\",\n f\"Failed: {self.tests_failed}\",\n ]\n self.log.write(\"\\n\".join(end_msg))", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno\n if did_pass:\n msg = 'Test at line {0} ok.'.format(linenum)\n else:\n msg = 'Test at line {0} FAILED.'.format(linenum)\n print(msg)" ]
[ "0.80546683", "0.7428795", "0.7355473", "0.717167", "0.7111023", "0.7000859", "0.6972296", "0.6900554", "0.682078", "0.6813413", "0.67746073", "0.67362183", "0.67312825", "0.6682966", "0.6635578", "0.66335154", "0.66038454", "0.6588386", "0.65542763", "0.65454364", "0.65435594", "0.65344733", "0.6509144", "0.65088624", "0.6504758", "0.6495158", "0.64903986", "0.647746", "0.64749163", "0.6465074" ]
0.78012776
1
Start the next test.
def start_next_test(self): next_test_num = self.test_numbers.popleft() self.tests.append( self.TEST( process=Popen(COMMANDS[next_test_num], stdout=PIPE, stderr=PIPE), number=next_test_num))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startTestRun(self):", "def startTest(self, test):\n self._timer = time()", "def test_run_started(self):", "def startTestRun(self, test):\n self.runTime= time.time()\n self.logger.debug(\"\\nBeginning ForceBalance test suite at %s\\n\" % time.strftime('%x %X %Z'))", "def startTest(asset):", "def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))", "def run_self_test(self):\n par_values = list(SELF_TEST_LIST)\n\n\n if True:\n # Only ERASynth+ and ERASynth++ have this functionality\n par_values += [(\"reference_tcxo_ocxo\", \"tcxo\")]\n\n num_tests = len(par_values)\n for i, (name, val) in enumerate(par_values):\n print(f\"\\r[{i+1:2d}/{num_tests}] Running...\", end=\"\")\n self.set(name, val)\n\n print(\"\\nDone!\")", "def test_start(self):\n self.fail(\"write a test\")", "def test_start_scan(self):\n pass", "def runTests(self):\n \n pass", "def start(self):\n for trial in self._trials:\n self._run(trial)", "def start_test(self, request):\n request.worker.start_test(request.message.test_id)\n\n return SuccessReply()", "def startTestHook(self):", "def startTestRun(self):\n self.startTime = time.time()\n # Really verbose information\n if self.verbose > 2:\n self.stream.writeln(self.colors.bold(pretty_version() + \"\\n\"))", "def resumeTests(self):\n self.setState('running')\n self.after(100, self.runOneTest)\n return", "def runtest(self):", "def test_begin(self):", "def run(self):\n self.speed_test.start()", "def start_test_mode():\n \n test_string = get_manual_test_string()\n\n while test_string != \"q\":\n test_string = args.bot_name + \" \" + test_string # mimic a reddit comment requester\n new_message = testing_message.Manual_Testing_Message(test_string)\n manage_message(testing_message.Manual_Testing_Message(test_string))\n print(new_message.get_result())\n\n test_string = get_manual_test_string()\n\n print (\"Done testing...\")\n sys.exit()", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def begin(self, tests):\r\n raise NotImplementedError", "def TestOneStep(self):\n pass", "def start_new_test_thread(self):\n GObject.timeout_add(100, self.on_timeout, None)\n def run_tests():\n with self.lock:\n for l, t in zip(self.liststore, self.liststore.tests):\n if l[2]:\n t.test()\n for l, t in zip(self.liststore, self.liststore.tests):\n if l[2]:\n print(t.results)\n \n if not self.test_thread:\n self.test_thread = threading.Thread(target=run_tests)\n self.test_thread.start()", "def next_run(self):\n self.load_run(run=self.run+1)", "def startTest(self, test):\n test = proto_test(test)\n self.start_time = time.time()\n self.reinitialize()\n if self.start_callback:\n self.start_callback(test)", "def start(self, total: int, name: str = None):\n\n # Clean the run\n self.test_run = RunElements()\n\n if name is not None:\n self.test_run.name = name\n\n self.test_run.total = total\n\n # Init the start run date\n from datetime import datetime\n self.test_run.date = datetime.now().strftime(\"%d-%m-%Y (%H:%M)\")\n\n self.__send_all()", "def startTestRun(self, event):\n self.prof = cProfile.Profile()\n event.executeTests = self.prof.runcall", "def startTest(self, test):\n\n super(ForceBalanceTestResult, self).startTest(test)\n self.logger.debug(\"\\n>>> Starting %s\\n\" % test.id())\n self.logger.info(\">>> \" + test.shortDescription())", "def process_test_start(self, config, results, result_id, db):\n pass", "def start_prime_test():" ]
[ "0.7740946", "0.71101433", "0.7070812", "0.70663023", "0.7035438", "0.6931316", "0.6830204", "0.6806938", "0.6774849", "0.6727763", "0.669437", "0.66846573", "0.6676877", "0.6663228", "0.66558033", "0.6652603", "0.6652206", "0.66263163", "0.6597661", "0.6591751", "0.6574502", "0.6558498", "0.6557169", "0.65321344", "0.6519005", "0.6507112", "0.6445289", "0.6445147", "0.64383745", "0.6433389" ]
0.8345263
0
Poll tests for completion. When one finishes, start another one if there are more to run. Stop when all are finished.
def poll_tests(self): for i, test in enumerate(self.tests): if test.process.poll() is not None: self.check_test(test) self.tests.pop(i) if self.test_numbers: self.start_next_test()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _wait_for_all_operations_done(self):\n while self._test_names_to_processes:\n time.sleep(10)\n running_test_names = list(self._test_names_to_processes.keys())\n for test_name in running_test_names:\n running_proc = self._test_names_to_processes.get(test_name)\n return_code = running_proc.poll()\n if return_code is not None:\n test_case_state = self._test_names_to_test_states.get(test_name)\n self._handle_failure(running_proc, test_case_state.running_test)\n del self._test_names_to_processes[test_name]\n print('Started validating: {}'.format(test_name))\n test_case_state.running_test.validate_result()\n self._run_test(test_case_state.remaining_tests)", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def test_job(self):\r\n while not self._coordinator.should_stop():\r\n with self._condition:\r\n self._condition.wait_for(self._true_if_testing)\r\n if self._coordinator.should_stop():\r\n break\r\n should_stop = False\r\n try:\r\n should_stop = self.do_full_test()\r\n except:\r\n traceback.print_exc()\r\n self._is_testing = False\r\n if should_stop is True:\r\n break\r\n logger.debug('Exiting thread %s' % threading.current_thread().name)", "def resumeTests(self):\n self.setState('running')\n self.after(100, self.runOneTest)\n return", "def finished_tests(self):\n self.testing = 0", "def test_finished(self):\n\n # We'll start the next test in an idle, so that the current one is\n # properly terminated, and we do not execute in its context\n\n GLib.idle_add(self._do_test)", "def test_is_finished(self):\n experiment = Experiment(TasksMock())\n self.assertEquals(False, experiment.is_finished())\n for _ in range(0, 17):\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(True, experiment.is_finished())", "def __execute_tests(self, lst_tests):\n tests_pass = tests_fail = 0\n queue_of_result = multiprocessing.Queue()\n for test in lst_tests:\n process = multiprocessing.Process(\n target=TestRunner.__helper_execute_test,\n kwargs={\"test_cls\": test,\n \"time_out\": self.__args.timeout,\n \"channel\": queue_of_result})\n process.start()\n process.join()\n temp_result = {}\n if not queue_of_result.empty():\n temp_result = queue_of_result.get_nowait()\n\n if \"status\" in temp_result:\n if temp_result[\"status\"] == result.Status.PASSED:\n tests_pass += 1\n else:\n tests_fail += 1\n\n if \"json_path\" in temp_result:\n self.__lst_json_files.append(temp_result[\"json_path\"])\n\n if \"log_path\" in temp_result:\n self.__lst_log_files.append(temp_result[\"log_path\"])\n\n return tests_pass, tests_fail", "def finished_tests(self):\n self.testing = 0\n if not self.closing:\n self.enable_menus(1)\n self.parent.finished_tests()", "def test_concurrent_test_runs(self):\n num_passing_tests = 20\n num_failing_tests = 20\n num_error_tests = 20\n total_num_tests = num_passing_tests + num_failing_tests + num_error_tests\n\n times = [0] + [i for i in range(2 * total_num_tests)\n ] + [2 * total_num_tests - 1]\n result = self._make_result(times)\n threads = []\n names = []\n result.startTestRun()\n for i in range(num_passing_tests):\n name = 'passing_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n # xml_reporter uses id(test) as the test identifier.\n # In a real testing scenario, all the test instances are created before\n # running them. So all ids will be unique.\n # We must do the same here: create test instance beforehand.\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_passing_test, args=(test, result)))\n for i in range(num_failing_tests):\n name = 'failing_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_failing_test, args=(test, result)))\n for i in range(num_error_tests):\n name = 'error_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_error_test, args=(test, result)))\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n result.stopTestRun()\n result.printErrors()\n tests_not_in_xml = []\n for tn in names:\n if tn not in self.xml_stream.getvalue():\n tests_not_in_xml.append(tn)\n msg = ('Expected xml_stream to contain all test %s results, but %s tests '\n 'are missing. List of missing tests: %s' % (\n total_num_tests, len(tests_not_in_xml), tests_not_in_xml))\n self.assertEqual([], tests_not_in_xml, msg)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def Waive(self):\n self.waived = True\n\n for test in self.subtests:\n test.Waive()\n\n if self.GetState().status == TestState.FAILED:\n self.UpdateState(status=TestState.FAILED_AND_WAIVED)", "def test_run_ended(self):", "def test_all(self, func):\n passes = 0\n fails = []\n start = time.time()\n futures = {}\n # open an executor\n with getattr(concurrent.futures, self.executor)(max_workers=self.workers) as exec:\n # walk through datasets\n for pdir, sdir, files in os.walk(self.DATA_DIR):\n for file in files:\n # if the file needs processing, submit it into the queue\n filepath = osp.join(pdir, file)\n if self.file_should_be_processed(filepath):\n future = exec.submit(func, filepath)\n futures[future] = filepath\n\n # return results\n for test_num, future in enumerate(concurrent.futures.as_completed(futures)):\n stuff_to_print = [test_num, future.result()]\n if future.result() == 'Success':\n passes += 1\n if self.print_success_path:\n stuff_to_print.append(futures[future])\n else:\n fails += [futures[future]]\n print(*stuff_to_print)\n\n end = time.time() - start\n print('Processing of {} files took {:3.1f}s ({:3.2f}s/item). {} passed; {} failed.'.format(test_num, end, end/test_num, passes, len(fails)))\n if len(fails) > 0:\n pprint.pprint(\"Failures: {}\".format(fails))\n if self.write_failures_to_file:\n with open('failures_{}.txt'.format(osp.basename(self.DATA_DIR)), mode='w') as f:\n for file in fails:\n f.write(file + '\\n')\n print(\"Failures written to file\")", "def _RunTests(tests: List[_Test], parallelism: int) -> bool:\n running_tests = set()\n finished_tests = set()\n tests_to_run = sorted(tests, reverse=True)\n while tests_to_run or running_tests:\n time.sleep(0.2) # 200ms\n updated_finished = set(t for t in running_tests if t.Finished())\n running_tests = running_tests - updated_finished\n while tests_to_run and len(running_tests) < parallelism:\n t = tests_to_run.pop()\n t.Run()\n running_tests.add(t)\n\n newly_finished = updated_finished - finished_tests\n finished_tests.update(updated_finished)\n for test in newly_finished:\n logging.info(\"%s\\t%s\\t%.1fs\", test,\n \"PASSED\" if test.Succeeded() else \"FAILED\",\n test.finish_time - test.begin_time)\n if newly_finished:\n logging.flush()\n\n failed_tests = sorted([t for t in tests if not t.Succeeded()])\n logging.info(\"Ran %d tests. %d failed.\", len(tests), len(failed_tests))\n logging.flush()\n\n for ft in failed_tests:\n ft.PrintLogs()\n\n return not failed_tests", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def testRunnerClose(runfixture, vagrant_box, runner_class, channel_class, gw):\n\n # Make some sleepy jobs\n for j in runfixture.jobs:\n with open(os.path.join(j.path, \"job_files\", \"runjob\"), \"a\") as runjob:\n runjob.write(\"sleep 1200\\n\")\n\n runner = _createRunner(runner_class, runfixture, vagrant_box, None)\n indyrunner = _createRunner(runner_class, runfixture, vagrant_box, None)\n\n f1 = runner.runBatch(runfixture.jobs[:6])\n f2 = runner.runBatch(runfixture.jobs[6:8])\n\n # Create a second runner to make sure that closing one runner doesn't affect the other.\n if3 = indyrunner.runBatch(runfixture.jobs[8:])\n\n assert gevent.wait(\n [\n gevent.spawn(waitcb, f1),\n gevent.spawn(waitcb, f2),\n gevent.spawn(waitcb, if3),\n ],\n 60,\n )\n\n jr1 = f1._submittedQSRecords[0]\n jr2 = f2._submittedQSRecords[0]\n ij3 = if3._submittedQSRecords[0]\n\n assert jr1.submit_event.wait(60)\n assert jr1.jobId\n\n assert jr2.submit_event.wait(60)\n assert jr2.jobId\n\n assert ij3.submit_event.wait(60)\n assert ij3.jobId\n\n gevent.sleep(0)\n\n\n # Spin up a pbs_channel and check we can see the two jobs\n ch = channel_class(gw, \"check_channel\", nocb=True)\n try:\n\n def qsel():\n ch.send({\"msg\": \"QSELECT\"})\n msg = next(ch)\n assert \"QSELECT\" == msg.get(\"msg\", None)\n running_pbsids = set(msg[\"job_ids\"])\n return running_pbsids\n\n pbsids = set([jr1.jobId, jr2.jobId])\n running_pbsids = qsel()\n assert pbsids.issubset(running_pbsids)\n assert pbsids != running_pbsids\n\n # Check the job directories exist\n for j in f1.jobs:\n assert _remoteIsDir(gw, j.remotePath)\n\n for j in f2.jobs:\n assert _remoteIsDir(gw, j.remotePath)\n\n for j in if3.jobs:\n assert _remoteIsDir(gw, j.remotePath)\n\n # Now close the runner\n closevent = runner.close()\n assert closevent.wait(60)\n attempts = 5\n delay = 5\n for i in range(5):\n try:\n assert qsel() == set([ij3.jobId])\n except AssertionError:\n if i == attempts - 1:\n raise\n else:\n gevent.sleep(delay)\n delay *= 2.0\n\n # Check the job directories exist\n for j in f1.jobs:\n assert not _remoteIsDir(gw, j.remotePath)\n\n for j in f2.jobs:\n assert not _remoteIsDir(gw, j.remotePath)\n\n for j in if3.jobs:\n assert _remoteIsDir(gw, j.remotePath)\n\n # Now close the first batch\n closevent = f1.terminate()\n assert closevent.wait(60)\n attempts = 5\n delay = 1\n for i in range(5):\n try:\n assert qsel() == set([ij3.jobId])\n except AssertionError:\n if i == attempts - 1:\n raise\n else:\n gevent.sleep(delay)\n delay *= 2.0\n\n # Check the job directories exist\n for j in f1.jobs:\n assert not _remoteIsDir(gw, j.remotePath)\n\n for j in f2.jobs:\n assert not _remoteIsDir(gw, j.remotePath)\n\n for j in if3.jobs:\n assert _remoteIsDir(gw, j.remotePath)\n\n try:\n runner._inner._pbschannel.send({\"msg\": \"QSELECT\"})\n assert False, \"IOError not raised\"\n except IOError:\n pass\n\n finally:\n ch.send(None)", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def test_serial_runs(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_delayed, use_instances=True)\n\n run_info = TrialInfo(config=2, instance=\"test2\", seed=0, budget=0.0)\n runner.submit_trial(run_info)\n\n run_info = TrialInfo(config=3, instance=\"test3\", seed=0, budget=0.0)\n runner.submit_trial(run_info)\n\n results = runner.iter_results()\n\n first = next(results, None)\n assert first is not None\n\n second = next(results, None)\n assert second is not None\n\n # To make sure runs launched serially, we just make sure that the end time of a run\n # is later than the other # Results are returned in left to right\n _, first_run_value = first\n _, second_run_value = second\n assert int(first_run_value.endtime) <= int(second_run_value.starttime)", "def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)", "async def test_timings(hass: ha.HomeAssistant, skip_setup):\n\n for fname in glob.glob(test_config_dir + \"timing_*.yaml\"):\n print(f\"Processing: {fname}\")\n config = CONFIG_SCHEMA(load_yaml_config_file(fname))\n\n if ha.DOMAIN in config:\n await async_process_ha_core_config(hass, config[ha.DOMAIN])\n coordinator = IUCoordinator(hass).load(config[DOMAIN])\n\n next_time = dt_util.utcnow()\n interval = coordinator.track_interval()\n while not coordinator.tester._initialised or coordinator.tester.is_testing:\n await coordinator._async_timer(next_time)\n next_time += interval\n\n assert (\n coordinator.tester.total_events\n == coordinator.tester.total_checks\n == coordinator.tester.total_results\n )\n assert coordinator.tester.total_errors == 0\n print(\n \"Finished: {0}; events: {1}; checks: {2}; errors: {3}; time: {4:.2f}s\".format(\n fname,\n coordinator.tester.total_events,\n coordinator.tester.total_checks,\n coordinator.tester.total_errors,\n coordinator.tester.total_time,\n )\n )", "def runTestSuites(self):\n \n self.testsuitesToXML()\n \n\n tss = []\n jobStatus = {}\n for t in self.testsuites:\n d = t.testsuitedir\n runner = os.path.join(self.basepath, 'testSuiteRunner.py')\n tdir = os.path.join(d, 'testsuite.out')\n cmd = 'python %s %s>& %s' % (runner, d,tdir)\n #print 'about to popen the cmd: %s' % cmd\n tss.append((t.name, popen2.Popen3(cmd)))\n jobStatus[t.name] = ('running', nowSecs())\n ntests = len(tss)\n printJobStatus(jobStatus)\n\n while tss:\n toRemove = [p for p in tss if p[1].poll() != -1]\n if toRemove:\n [tss.remove(p) for p in toRemove]\n for p in toRemove:\n jobStatus[p[0]] = ('completed', nowSecs())\n\n printJobStatus(jobStatus)\n time.sleep(10)\n\n print 'all %d tests have completed' % ntests", "def start_next_test(self):\n next_test_num = self.test_numbers.popleft()\n self.tests.append(\n self.TEST(\n process=Popen(COMMANDS[next_test_num],\n stdout=PIPE,\n stderr=PIPE),\n number=next_test_num))", "def test_concurrent_add_and_delete_pending_test_case_result(self):\n result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,\n None)\n def add_and_delete_pending_test_case_result(test_name):\n test = MockTest(test_name)\n result.addSuccess(test)\n result.delete_pending_test_case_result(test)\n\n for i in range(50):\n add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)\n self.assertEqual(result.pending_test_case_results, {})", "def runTillVerdict(self):\n while True:\n #we wait for a maximum of 60 seconds for each test to complete\n i = self.localShell.expect_list(self.logPatterns, 60)\n if i == 0:\n testCaseNumber = int(self.localShell.match.group(1))\n \n if (self.results['testIndex'] != testCaseNumber):\n print 'Warning: Missing tests between %s and %s' % (self.results['testIndex'], testCaseNumber)\n \n self.results['testIndex'] = testCaseNumber+1\n \n print '\\ttest %s completed' % testCaseNumber\n self.qmtTestEndHook(testCaseNumber)\n \n elif i == 1:\n print 'End reached:%s:' % self.localShell.match.group()\n self.results['verdict'] = 'PASS'\n break\n \n else:\n print 'EOF or timeout'\n self.results['verdict'] = 'FAIL'\n break\n\n return self.results", "def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)", "def run_tests():\n passed_tests = 0\n failed_tests = 0\n for case in TEST_CASES:\n start_date, end_date = [datetime.strptime(x, \"%d/%m/%Y\") for x in case[0]]\n experiment = Experiment(start_date, end_date)\n if experiment.duration() == case[1]:\n result = \"passed\"\n passed_tests += 1\n else:\n result = \"failed\"\n failed_tests += 1\n print(f\"\"\"{\"-\".join(case[0])}, {case[1]} days: Test {result}\"\"\")\n\n print(\n f\"All tests completed\\n\"\n f\"Number of tests passed: {passed_tests}\\n\"\n f\"Number of tests failed: {failed_tests}\"\n )" ]
[ "0.73554826", "0.674941", "0.65526724", "0.6549344", "0.6529588", "0.64772254", "0.64764065", "0.6411808", "0.6316884", "0.62365717", "0.6232344", "0.6232344", "0.6232344", "0.6232344", "0.6192078", "0.6168625", "0.6148079", "0.6129336", "0.60931456", "0.609054", "0.6083866", "0.60397404", "0.6016598", "0.6011115", "0.6008898", "0.6004731", "0.5997826", "0.59812635", "0.59662956", "0.59332925" ]
0.7962783
0
Parse the tests to be run. These may be given as a single number, a commaseperated list or two numbers seperated by a dash.
def parse_tests(tests_input): if '-' in tests_input: limits = tests_input.partition('-') tests = list(range(int(limits[0]), int(limits[2]) + 1)) else: tests = [int(t) for t in tests_input.split(',')] return tests
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_parser():\n return parser(\"Testing\", \"Use this from a test\", \"\")", "def test_multiple_series(self):\n assert parse_command('test{{A,B}}{{1,2}}') == [\n ('testA1', {}), ('testA2', {}), ('testB1', {}), ('testB2', {})]", "def parse(lines):\n num_tests = int(lines.next())\n tests = [TreasureTest(lines) for _i in range(num_tests)]\n return tests", "def test_parse(self): \n\n results = self.parser.parse()\n self.assertEqual(results, test_case_data['parse_output'])", "def tests() -> None:\n assert input_parser(\"123\") == '123'\n assert input_parser(\"(add 12 12)\") == '24'\n assert input_parser(\"(add 0 (add 3 4))\") == '7'\n assert input_parser(\"(add 3 (add (add 3 3) 3))\") == '12'\n assert input_parser(\"(multiply 3 (multiply (multiply 3 3) 3))\") == '81'\n assert input_parser(\"(multiply 2 (multiply 3 4))\") == '24'\n assert input_parser(\"(multiply 0 (multiply 3 4))\") == '0'\n\n assert input_parser(\"(add 4 1)\") == '5'\n assert input_parser(\"(multiply 4 1)\") == '4'\n \n assert input_parser(\"(add 4 (add 1 8))\") == '13'\n assert input_parser(\"(add (add 1 8) 4)\") == '13'\n assert input_parser(\"(multiply (multiply 1 2) 12)\") == '24'\n assert input_parser(\"(multiply 4 (multiply 8 12))\") == '384'\n\n assert input_parser(\"(add (multiply 4 5) (multiply 10 10))\") == '120'\n assert input_parser(\"(add (multiply (add 4 (add 3 (add 3 (add 3 (add 1 (multiply 4 5)))))) 5) (multiply 10 10))\") == '270'\n \n assert input_parser(\"(add (multiply 4 5) (multiply 10 10) (add 1 2 3 4 5 6 7 (add 4 4) 9) (multiply 4 5))\") == '185'\n\n assert input_parser('(subtract 2 1)') == '1'\n assert input_parser(\"(divide 55 5)\") == '11'", "def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)", "def test_parse_example1(example1):\n assert example1 == [12, 14, 1969, 100756]", "def run_tests():\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Suite or test name\")\n parser.add_argument('-b','--bin-dir',help=\"Directory where Firebird binaries tools are\")\n parser.add_argument('-d','--db-dir',help=\"Directory to use for test databases\")\n parser.add_argument('--archive',action='store_true',help=\"Save last run results to archive\")\n parser.add_argument('--rerun',action='store_true',help=\"Run only tests that don't PASSed in last run\")\n parser.add_argument('--untested',action='store_true',help=\"Run only tests that were UNTESTED in last run\")\n parser.add_argument('-v','--verbose',action='store_true',help=\"Be more verbose\")\n parser.add_argument('--verbosity',type=int,choices=[0,1,2],default=1,help=\"Set verbosity; --verbosity=2 is the same as -v\")\n parser.add_argument('-q','--quiet',action='store_true',help=\"Be less verbose\")\n parser.add_argument('-x','--xunit',action='store_true',help=\"Provides test results also in the standard XUnit XML format\")\n parser.add_argument('-e','--expect',type=str,metavar=\"FILENAME\",help=\"Test results file to be used as expeted outcomes\")\n if rpyc_available:\n parser.add_argument('--remote',action='store_true',help=\"Connect to remote fbtest server\")\n\n parser.add_argument('-u','--update',action='store_true',help=\"Update last run results with re-run results\")\n parser.add_argument('-w','--password',help=\"SYSDBA password\")\n parser.add_argument('-o','--host',help=\"Remote Firebird or fbtest host machine identification\")\n parser.add_argument('-p','--person',help=\"QA person name\")\n parser.add_argument('-a','--arch',help=\"Firebird architecture: SS, CS, SC, EM\")\n parser.add_argument('-s','--sequence',type=int,help=\"Run sequence number for this target\")\n parser.add_argument('-k','--skip',help=\"Suite or test name or name of file with suite/test names to skip\")\n parser.add_argument('-c','--client',help=\"Use specified Firebird client library\")\n parser.set_defaults(rerun=False,untested=False,update=False,server=False,register=False,\n remote=False,host='localhost',password='masterkey',\n sequence=1,arch='SS',person=UNKNOWN)\n\n script_runner.run_tests(parser.parse_args())", "def test_args(self):\n self.assertEqual(self.parser.N_particles, 500)\n self.assertEqual(self.parser.reduced_T, 0.9)\n self.assertEqual(self.parser.reduced_rho, 0.9)\n self.assertEqual(self.parser.n_steps, 1000000)\n self.assertEqual(self.parser.freq_ener, 1000)\n self.assertEqual(self.parser.freq_traj, 1000)\n self.assertEqual(self.parser.max_d, 0.1)\n self.assertEqual(self.parser.energy, 'UnitlessLJ')", "def run_and_parse(test_description: Tuple[str, str, List[str]]):\n test_executable, test_name, performance_counters = test_description\n try:\n test_output = run_test(test_executable, test_name, performance_counters)\n print(f'Finished running test {test_name}', file=sys.stderr)\n return (test_name, parse_perf_stat_output(test_output,\n performance_counters))\n except RuntimeError:\n return None", "def test_basic_parsers():", "def run_tests():\r\n p = Parser(b\"0About prices\\tPrices/aboutus\\tserver.example.com\\t70\\r\\n\".decode(\"ascii\"))\r\n print(p._parse_dir())", "def test_simple_series(self):\n assert parse_command('test{{A,B}}') == [('testA', {}), ('testB', {})]", "def test_smoke():\n raise SkipTest\n parse('[8]')\n parse('[show \"hey\"]')\n parse('[frob thing with thong]')\n parse('[\"this\" \"thing\"]')\n parse('[[] []]')\n parse('[key: value key2: value2 orphan]')\n parse('[1 + (2 + 3)]')\n parse('[funcs: [term/on-red 8 \"foo\"]]')", "def execute_tests():\n\n if len(sys.argv) > 1:\n # Filter test list based on command line requests\n tests_to_run = []\n for requested in sys.argv[1:]:\n for func, param in registered_tests:\n if param == requested:\n tests_to_run += [(func, param)]\n break\n else:\n print('Unknown test ' + requested)\n sys.exit(1)\n else:\n tests_to_run = registered_tests\n\n failing_tests = []\n for func, param in tests_to_run:\n print(param + (' ' * (OUTPUT_ALIGN - len(param))), end='')\n sys.stdout.flush()\n try:\n func(param)\n print(COLOR_GREEN + 'PASS' + COLOR_NONE)\n except KeyboardInterrupt:\n sys.exit(1)\n except TestException as exc:\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, exc.args[0])]\n except Exception as exc: # pylint: disable=W0703\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, 'Test threw exception:\\n' +\n traceback.format_exc())]\n\n if failing_tests:\n print('Failing tests:')\n for name, output in failing_tests:\n print(name)\n print(output)\n\n print(str(len(failing_tests)) + '/' +\n str(len(tests_to_run)) + ' tests failed')\n if failing_tests != []:\n sys.exit(1)", "def test_numbers(number):\n print(\"\\nRunning test_numbers with {}\".format(number))", "def _run_unittest(tests, verbose, use_gdb, gtest_filter, gtest_list_tests):\n failed_tests = []\n unfound_tests = []\n for test in tests:\n index = 1\n while True:\n test_info = _read_test_info('%s.%d.json' % (test, index))\n if not test_info:\n # The format of test info file is [test name].[index].json, where index\n # is one of consecutive numbers from 1. If the test info file for index\n # 1 is not found, that means the corresponding test does not exist.\n if index == 1:\n unfound_tests.append(test)\n break\n command = _construct_command(test_info, gtest_filter, gtest_list_tests)\n if verbose:\n print 'Running:', command\n args = shlex.split(command)\n if use_gdb:\n unittest_util.run_gdb(args)\n else:\n returncode = subprocess.call(args)\n if returncode != 0:\n print 'FAILED: ' + test\n failed_tests.append('%s.%d' % (test, index))\n index += 1\n if unfound_tests:\n print 'The following tests were not found: \\n' + '\\n'.join(unfound_tests)\n if failed_tests:\n print 'The following tests failed: \\n' + '\\n'.join(failed_tests)\n if unfound_tests or failed_tests:\n return -1\n return 0", "def run_tests(tests):\n return [test(t) for t in tests]", "def parse_input(command_input=None):\n parser = argparse.ArgumentParser(\n prog='python3 ok',\n description=__doc__,\n usage='%(prog)s [--help] [options]',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n testing = parser.add_argument_group('running tests')\n testing.add_argument('-q', '--question', type=str, action='append',\n help=\"run tests for a specific question\")\n testing.add_argument('--suite', type=str, default=None,\n help=\"run cases from a specific suite\")\n testing.add_argument('--case', type=str, action='append',\n help=\"run specific cases\")\n testing.add_argument('-u', '--unlock', action='store_true',\n help=\"unlock tests interactively\")\n testing.add_argument('-i', '--interactive', action='store_true',\n help=\"start the Python interpreter after a failed test\")\n testing.add_argument('-v', '--verbose', action='store_true',\n help=\"show all tests (not just passing tests) up to failing line (if any)\")\n testing.add_argument('-t', '--testing', nargs='?', type=str, const='mytests.rst',\n help='run tests from rst file (default: mytests.rst)')\n testing.add_argument('--all', action='store_true',\n help=\"run tests for all questions in config file\")\n testing.add_argument('--submit', action='store_true',\n help=\"submit the assignment\")\n testing.add_argument('--backup', action='store_true',\n help=\"attempt to reliably backup your work\")\n testing.add_argument('--revise', action='store_true',\n help=\"submit composition revision\")\n testing.add_argument('--timeout', type=int, default=10,\n help=\"set the timeout duration (in seconds) for running tests\")\n testing.add_argument('-cov', '--coverage', action='store_true',\n help=\"get suggestions on what lines to add tests for\")\n testing.add_argument('--autobackup', action='store_true',\n help=\"back up your work every minute in the background\")\n # runs an autobackup in the foreground. Used by `--autobackup`,\n # if you specify this other options will be ignored.\n testing.add_argument('--autobackup-actual-run-sync', action='store_true',\n help=argparse.SUPPRESS)\n # Debugging\n debugging = parser.add_argument_group('debugging tools for students')\n\n debugging.add_argument('--trace', action='store_true',\n help=\"trace code and launch python tutor\")\n debugging.add_argument('--trace-print', action='store_true',\n help=\"print the trace instead of visualizing it\")\n\n # Experiments\n experiment = parser.add_argument_group('experiment options')\n experiment.add_argument('--no-experiments', action='store_true',\n help=\"do not run experimental features\")\n experiment.add_argument('--hint', action='store_true',\n help=\"give a hint (if available)\")\n experiment.add_argument('--style', action='store_true',\n help=\"run AutoStyle feedback system\")\n experiment.add_argument('--collab', action='store_true',\n help=\"launch collaborative programming environment\")\n\n # Debug information\n debug = parser.add_argument_group('ok developer debugging options')\n debug.add_argument('--version', action='store_true',\n help=\"print the version number and exit\")\n debug.add_argument('--tests', action='store_true',\n help=\"display a list of all available tests\")\n debug.add_argument('--debug', action='store_true',\n help=\"show debugging output\")\n\n # Grading\n grading = parser.add_argument_group('grading options')\n grading.add_argument('--lock', action='store_true',\n help=\"lock the tests in a directory\")\n grading.add_argument('--score', action='store_true',\n help=\"score the assignment\")\n grading.add_argument('--score-out', type=str,\n nargs='?', const=None, default=None,\n help=\"write scores to a file\")\n grading.add_argument('--config', type=str,\n help=\"use a specific configuration file\")\n grading.add_argument('--ignore-empty', action='store_true',\n help=\"ignore empty doctests\")\n\n # Encrypt\n crypt = parser.add_argument_group('encryption')\n crypt.add_argument('--generate-encryption-key', type=str,\n help='generates a JSON file containing a list of [(file, key)] pairs. Path is a keyfile')\n crypt.add_argument('--encrypt', type=str,\n help='encrypt each problem. provide a path to a keyfile generated by --generate-encryption-key')\n crypt.add_argument('--encrypt-padding', type=int,\n help='If provided, pads all plaintexts to this size in bytes.'\n 'Errors if any of the files are longer than this')\n crypt.add_argument('--decrypt', type=str, nargs='*',\n help='decrypt all problems where the given keys apply')\n\n # Server parameters\n server = parser.add_argument_group('server options')\n server.add_argument('--local', action='store_true',\n help=\"disable any network activity\")\n server.add_argument('--nointeract', action='store_true',\n help=\"disable prompts to user\")\n server.add_argument('--server', type=str,\n default='okpy.org',\n help=\"set the server address\")\n server.add_argument('--authenticate', action='store_true',\n help=\"authenticate, ignoring previous authentication\")\n server.add_argument('--no-browser', action='store_true',\n help=\"do not use a web browser for authentication\")\n server.add_argument('--get-token', action='store_true',\n help=\"get ok access token\")\n server.add_argument('--insecure', action='store_true',\n help=\"use http instead of https\")\n server.add_argument('--no-update', action='store_true',\n help=\"do not check for ok updates\")\n server.add_argument('--update', action='store_true',\n help=\"update ok and exit\")\n # used in faded-parsons-frontend repo\n server.add_argument('--parsons', action='store_true', \n help=\"run parsons problems in browser\") \n\n return parser.parse_args(command_input)", "def run_tests():\n passed_tests = 0\n failed_tests = 0\n for case in TEST_CASES:\n start_date, end_date = [datetime.strptime(x, \"%d/%m/%Y\") for x in case[0]]\n experiment = Experiment(start_date, end_date)\n if experiment.duration() == case[1]:\n result = \"passed\"\n passed_tests += 1\n else:\n result = \"failed\"\n failed_tests += 1\n print(f\"\"\"{\"-\".join(case[0])}, {case[1]} days: Test {result}\"\"\")\n\n print(\n f\"All tests completed\\n\"\n f\"Number of tests passed: {passed_tests}\\n\"\n f\"Number of tests failed: {failed_tests}\"\n )", "def test_parse_devide(self):\n self.assertEqual(parse_input.parse([\"8\", \"/\", \"4\"]), 2)", "def alltests(opts):\n \n print \"API Root: %s\" % options.apiroot\n print \"Token: %s\" % options.token\n print \"Output dir: %s\" % options.output\n print \"Running %d%% of tests\" % options.percent\n print\n \n # need to use DEPT-001, not ID#\n coursehistory_tests = [\n # basic tests:\n \"cis-120\", \"math-114\", \"engl-101\", \"econ-001\",\n # miscellaneously somewhat problematic:\n \"engl-016\", \"law-205\", \"hpr-612\", \"rels-414\", \"nurs-322\",\n \"writ-030\", \"be-310\", \"psci-010\", \"psci-136\",\n # crosslistings:\n \"engl-135\", \"writ-135\", \"fnar-264\", \"cogs-001\", \"russ-048\", \"hist-048\",\n # no reviews?:\n \"afam-271\", \"ames-071\", \"slav-532\", \"afam-285\", \"prtg-213\", \"slav-533\",\n # errors:\n \"99999\", \"moo\",\n ]\n\n instructor_tests = [\n # basic tests:\n \"403\", \"631\", \"1883\", \"2217-FERNANDO-C--PEREIRA\", \"1602-BENJAMIN-PIERCE\",\n # crosslistings:\n \"1034-LYLE-H-UNGAR\", \"2709-DAVID-P--COMBERG\",\n # miscellaneously somewhat problematic:\n \"1040-DAVID-FOX\", \"4268-BART-GERARD-C-DE-JONGHE\",\n # the instructors w/ the most sections\n \"1883\", \"1619\", \"2869\", \"942\", \"1644\", \"541\", \"767\", \"434\",\n # concerned citizens:\n \"1759-MAX-C--CAVITCH\", \"2824-TIMOTHY-CORRIGAN\",\n \"1763-EMILY-R-STEINER\", \"1624-VALERIE-ROSS\",\n # no reviews?:\n \"416-LUDO-ROCHER\", \"715-ELIZABETH-ANN-POLLARD\", \"1094-MARIA-A-COWLES\",\n \"1500-ANDREW-GALLIA\", \"1888-RUSSELL-DILEO\",\n \"1450-SORMANE-PEREIRA-GOMES\", \"2188-HUI-YI-CHEN\", \"1165-DOMENIC-VITIELLO\",\n \"2359-CLAUDIA-CANCINO\", \"2737-SHEN-WANG\", \"3229-BERLE-WHITBY\",\n # errors:\n \"99999\", \"moo\",\n ]\n\n dept_tests = [\n #fast\n \"CSE\", \"LAW\", \"ANAT\", \"KORN\", \"LATN\", \"COGS\", \"MSCI\", \"GAS\",\n #medium\n \"CIS\", \"MATH\", \"FNAR\", \"ACCT\", \"FNCE\", \"BE\", \"MUSC\", \"OPIM\",\n #slow\n #\"SPAN\", \"NURS\", \"ENGL\",\n #error\n \"EROR\"]\n\n index_tests = [\"\", \"instructors\", \"coursehistories\", \"depts\",\n \"semesters\", \"semesters/2010c\"]\n\n course_tests = [] # filled in by coursehistory_tests\n\n for t in fraclist(index_tests, options.percent):\n test(opts, t)\n \n for t in fraclist(coursehistory_tests, options.percent):\n obj = test(opts, \"coursehistories/%s\" % t)\n test(opts, \"coursehistories/%s/reviews\" % t)\n\n # now \"find\" some courses\n course_tests.append(\"2010c-%s\" % t)\n try:\n courseid = sorted(obj[\"result\"][\"courses\"])[0][\"id\"]\n course_tests.append(courseid)\n except (TypeError, KeyError, IndexError):\n pass\n \n for t in course_tests: # don't fraclist an autogenerated list\n # Some of the autogenerated courses don't exist, so ignore errors.\n root_success = test(opts, \"courses/%s\" % t, lderror_ok=True)\n if root_success:\n # Course exists, don't expect errors.\n test(opts, \"courses/%s/reviews\" % t)\n test(opts, \"courses/%s/coursehistories/\" % t)\n test(opts, \"courses/%s/sections\" % t)\n \n if test(opts, \"courses/%s/sections/001\" % t, lderror_ok=True):\n test(opts, \"courses/%s/sections/001/reviews\" % t) \n if '-' in str(t): # if we have a yyyys-dept-num test\n test(opts, \"sections/%s-001\" % t)\n # not tested: sections/001/reviews/instructor-id\n test(opts, \"courses/%s/sections/401\" % t, lderror_ok=True)\n \n for t in fraclist(instructor_tests, options.percent):\n test(opts, \"instructors/%s\" % t)\n test(opts, \"instructors/%s/sections\" % t)\n test(opts, \"instructors/%s/reviews\" % t)\n \n for t in fraclist(dept_tests, options.percent):\n test(opts, \"depts/%s\" % t)\n test(opts, \"depts/%s/reviews\" % t)\n test(opts, \"semesters/2010c/%s\" % t)", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_spec(\"test_cmd_parser\"))\n return testSuite", "def run_test_cases(self):\n line = (\n '{reindeer} can fly {speed} km/s for {time} seconds'\n ', but then must rest for {rest} seconds.'\n )\n inputs = (\n line.format(reindeer='Comet', speed=14, time=10, rest=127),\n line.format(reindeer='Dancer', speed=16, time=11, rest=162),\n line.format(reindeer='Vixen', speed=18, time=12, rest=207),\n line.format(reindeer='Prancer', speed=20, time=13, rest=264),\n )\n test_cases = (\n solver.TestCase('\\n'.join(inputs[:1]), 2660, 2503),\n solver.TestCase('\\n'.join(inputs[:2]), 2660, 1564),\n solver.TestCase('\\n'.join(inputs[:3]), 2660, 1101),\n solver.TestCase('\\n'.join(inputs), 2660, 994),\n solver.TestCase('\\n'.join(inputs[1:]), 2640, 1201),\n solver.TestCase('\\n'.join(inputs[2:]), 2592, 1517),\n solver.TestCase('\\n'.join(inputs[3:]), 2540, 2503),\n )\n for test_case in test_cases:\n self._run_test_case(test_case)", "def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Discover and run your tests\")\n parser.add_argument('test_path', metavar='test_path', type=str, nargs='+',\n help='one or more space-delimited strings indicating where to search for tests')\n parser.add_argument(\"--collect-only\", action=\"store_true\", help=\"display collected tests, but do not run\")\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"pipe more verbose test output to stdout\")\n parser.add_argument(\"--exit-first\", action=\"store_true\", help=\"exit after first failure\")\n\n args = parser.parse_args()\n return args", "def parse_args() -> argparse.Namespace:\n desc = 'run integration tests.'\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\n '--only', dest='regex', type=regex,\n default='.*', help=\"Regular expression to filter which tests to run\"\n )\n parser.add_argument('-j', '--jobs', type=int, dest=\"jobs\",\n default=multiprocessing.cpu_count(),\n help='max number of concurrent jobs')\n parser.add_argument('-v', '--verbose', default=False, dest=\"verbose\",\n help='enable verbose output')\n return parser.parse_args()", "def test_parse_arguments1():\n args = []\n parsed_args = parse_arguments.parse_arguments(args)\n assert parsed_args.logging_level == logging.ERROR\n assert parsed_args.group_size == defaults.DEFAULT_GRPSIZE\n assert parsed_args.students_file == defaults.DEFAULT_CSVFILE\n assert (parsed_args.grouping_method == group_random) is False", "def describe_tests(remit, sourcelist):\n print('* run tests with \"%s\"' % remit)\n print('* tests to run: ')\n for line in pretty_list(sourcelist, 7):\n print(' ' + line)\n print('* writers to test: ')\n for line in pretty_list(spec.TEST['writer'], 7):\n print(' ' + line)\n print('* options to test: ')\n for line in pretty_list(spec.TEST['pandoc_options'], 7):\n print(' ' + line)\n print('* blacklisted: ')\n for config in spec.BLACKLIST:\n command = make_command(remit=remit,\n source='TEST',\n writer=config['writer'],\n pandoc_options=config['pandoc_options'],\n extension=config['extension'])\n print(' ' + ' '.join(command))\n print(' (' + config['comment'] + ')')", "def parse_arguments():\n parser = ArgumentParser(description=\"Run tests in parallel.\")\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug logging\"\n )\n parser.add_argument(\n \"-l\", \"--layer\", help=\"Greedy match test layer name.\", action=\"append\"\n )\n parser.add_argument(\n \"-m\", \"--module\", help=\"Greedy match module name.\", action=\"append\"\n )\n return parser.parse_args()" ]
[ "0.64865124", "0.64598006", "0.64436996", "0.624886", "0.62173283", "0.61449796", "0.61414963", "0.61308944", "0.6120973", "0.6089176", "0.6052001", "0.60171705", "0.59748244", "0.5970603", "0.59605867", "0.59500784", "0.5947035", "0.59386694", "0.5920873", "0.59173906", "0.58935297", "0.5875736", "0.5839464", "0.5835992", "0.5815541", "0.579288", "0.5788493", "0.57684124", "0.5757857", "0.5752266" ]
0.7508628
0
Find which aggregator will be use, accordly cli args
def setup(self, args): for key, ags in self._mapp.items(): arg = args.get(key) if arg: #if exist, turn aggregator actived and create a new instance a new aggregator class self.active = True return ags(arg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aggregator_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"aggregator_name\")", "def aggregator_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aggregator_name\")", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--process_queue\", action='store_true',\n dest=\"process_queue\",\n help=\"Process also schedd queue (Running/Idle/Pending jobs)\")\n parser.add_argument(\"--feed_es\", action='store_true',\n dest=\"feed_es\",\n help=\"Feed to Elasticsearch\")\n parser.add_argument(\"--feed_es_for_queues\", action='store_true',\n dest=\"feed_es_for_queues\",\n help=\"Feed queue data also to Elasticsearch\")\n parser.add_argument(\"--feed_amq\", action='store_true',\n dest=\"feed_amq\",\n help=\"Feed to CERN AMQ\")\n\n parser.add_argument(\"--schedd_filter\", default='',\n type=str, dest=\"schedd_filter\",\n help=(\"Comma separated list of schedd names to process \"\n \"[default is to process all]\"))\n parser.add_argument(\"--skip_history\", action='store_true',\n dest=\"skip_history\",\n help=\"Skip processing the history. (Only do queues.)\")\n parser.add_argument(\"--read_only\", action='store_true',\n dest=\"read_only\",\n help=\"Only read the info, don't submit it.\")\n parser.add_argument(\"--dry_run\", action='store_true',\n dest=\"dry_run\",\n help=(\"Don't even read info, just pretend to. (Still \"\n \"query the collector for the schedd's though.)\"))\n parser.add_argument(\"--keep_full_queue_data\", action='store_true',\n dest=\"keep_full_queue_data\",\n help=\"Drop all but some fields for running jobs.\")\n parser.add_argument(\"--amq_bunch_size\", default=5000,\n type=int, dest=\"amq_bunch_size\",\n help=(\"Send docs to AMQ in bunches of this number \"\n \"[default: %(default)d]\"))\n parser.add_argument(\"--es_bunch_size\", default=250,\n type=int, dest=\"es_bunch_size\",\n help=(\"Send docs to ES in bunches of this number \"\n \"[default: %(default)d]\"))\n parser.add_argument(\"--query_queue_batch_size\", default=50,\n type=int, dest=\"query_queue_batch_size\",\n help=(\"Send docs to listener in batches of this number \"\n \"[default: %(default)d]\"))\n parser.add_argument(\"--upload_pool_size\", default=8,\n type=int, dest=\"upload_pool_size\",\n help=(\"Number of parallel processes for uploading \"\n \"[default: %(default)d]\"))\n parser.add_argument(\"--query_pool_size\", default=8,\n type=int, dest=\"query_pool_size\",\n help=(\"Number of parallel processes for querying \"\n \"[default: %(default)d]\"))\n\n parser.add_argument(\"--es_hostname\", default='es-cms.cern.ch',\n type=str, dest=\"es_hostname\",\n help=\"Hostname of the elasticsearch instance to be used \"\n \"[default: %(default)s]\")\n parser.add_argument(\"--es_port\", default=9203,\n type=int, dest=\"es_port\",\n help=\"Port of the elasticsearch instance to be used \"\n \"[default: %(default)d]\")\n parser.add_argument(\"--es_index_template\", default='cms',\n type=str, dest=\"es_index_template\",\n help=(\"Trunk of index pattern. \"\n \"Needs to start with 'cms' \"\n \"[default: %(default)s]\"))\n parser.add_argument(\"--log_dir\", default='log/',\n type=str, dest=\"log_dir\",\n help=\"Directory for logging information [default: %(default)s]\")\n parser.add_argument(\"--log_level\", default='WARNING',\n type=str, dest=\"log_level\",\n help=\"Log level (CRITICAL/ERROR/WARNING/INFO/DEBUG) \"\n \"[default: %(default)s]\")\n parser.add_argument(\"--email_alerts\", default=[], action='append',\n dest=\"email_alerts\",\n help=\"Email addresses for alerts [default: none]\")\n\n args = parser.parse_args()\n set_up_logging(args)\n\n # --dry_run implies read_only\n args.read_only = args.read_only or args.dry_run\n\n main_driver(args)", "def __init__(__self__, *,\n aggregator_name: pulumi.Input[str],\n description: pulumi.Input[str],\n aggregator_accounts: Optional[pulumi.Input[Sequence[pulumi.Input['AggregatorAggregatorAccountArgs']]]] = None,\n aggregator_type: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"aggregator_name\", aggregator_name)\n pulumi.set(__self__, \"description\", description)\n if aggregator_accounts is not None:\n pulumi.set(__self__, \"aggregator_accounts\", aggregator_accounts)\n if aggregator_type is not None:\n pulumi.set(__self__, \"aggregator_type\", aggregator_type)", "def aggregation(cls, *args):\n return cls.path_finder('aggregation', *args)", "def aggregator_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"aggregator_name\")", "def aggregator_name(self) -> str:\n return pulumi.get(self, \"aggregator_name\")", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Aggregate Elasticsearch Log data.')\n parser.add_argument(\n '--host',\n default='https://logging-es',\n type=str,\n action='store',\n help='Host name or IP of the Elasticsearch server.'\n )\n parser.add_argument(\n '--port',\n default=9200,\n type=int,\n action='store',\n help='Port number of the Elasticsearch server.'\n )\n parser.add_argument(\n '--ca_certs',\n default='secret/admin-ca',\n type=str,\n action='store',\n help='Path to the CA certificates file'\n )\n parser.add_argument(\n '--cert',\n default='secret/admin-cert',\n type=str,\n action='store',\n help='Path to the client certificate file'\n )\n parser.add_argument(\n '--key',\n default='secret/admin-key',\n type=str,\n action='store',\n help='Path to the client key file'\n )\n\n return parser.parse_args()", "def get_argparser():\n\n parser = argparse.ArgumentParser(\"garc\")\n parser.add_argument('command', choices=commands)\n parser.add_argument('query', nargs='?', default=None)\n parser.add_argument(\"--log\", dest=\"log\",\n default=\"garc.log\", help=\"log file\")\n parser.add_argument(\"--user_account\",\n default=None, help=\"Gab account name\")\n parser.add_argument(\"--user_password\",\n default=None, help=\"Gab account password\")\n parser.add_argument('--config',\n help=\"Config file containing Gab account info\")\n parser.add_argument('--profile', default='main',\n help=\"Name of a profile in your configuration file\")\n parser.add_argument('--warnings', action='store_true',\n help=\"Include warning messages in output\")\n parser.add_argument(\"--connection_errors\", type=int, default=\"0\",\n help=\"Number of connection errors before giving up\")\n parser.add_argument(\"--http_errors\", type=int, default=\"0\",\n help=\"Number of http errors before giving up\")\n parser.add_argument(\"--output\", action=\"store\", default=None,\n dest=\"output\", help=\"write output to file path\")\n parser.add_argument(\"--format\", action=\"store\", default=\"json\",\n dest=\"format\", choices=[\"json\"],\n help=\"set output format\")\n parser.add_argument(\"--search_type\", action=\"store\", default=\"date\",\n dest=\"search_type\", choices=[\"date\"],\n help=\"set search type\")\n parser.add_argument(\"--number_gabs\", action=\"store\", type=int, default=-1,\n dest=\"number_gabs\",\n help=\"approximate number of gabs to return\")\n parser.add_argument(\"--gabs_after\", action=\"store\", default=\"2000-01-01\",\n dest=\"gabs_after\",\n help=\"approximate date of earliest gab you wish to collect\")\n\n\n return parser", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n aggregator_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AggregatorAggregatorAccountArgs']]]]] = None,\n aggregator_name: Optional[pulumi.Input[str]] = None,\n aggregator_type: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def parseargs() -> argparse.ArgumentParser:\n\n parser = worker.parseargs(\"ACT hybrid-analysis.com Client\")\n\n parser.add_argument(\n \"--feed\", action=\"store_true\", help=\"Download the public feed only, no lookup\"\n )\n\n parser.add_argument(\n \"--apikey\", default=\"\", help=\"community apikey for hybrid-analysis.com\"\n )\n\n parser.add_argument(\n \"--user-agent\", default=\"Falcon Sandbox\", help=\"User agent while talking to API\"\n )\n\n parser.add_argument(\n \"--no-check-certificate\",\n action=\"store_true\",\n help=\"Do not check SSL certificate\",\n )\n\n return parser", "def __init__(self, arg_list):\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n \"catalogs\",\n nargs=\"+\",\n help=\"Path to input catalog(s)\"\n )\n\n parser.add_argument(\n \"-a\", \"--aggregate\",\n dest=\"aggregate\",\n action=\"store_true\",\n help=\"Produce NcML aggregations and add OPeNDAP endpoints\"\n )\n parser.add_argument(\n \"-w\", \"--wms\",\n dest=\"wms\",\n action=\"store_true\",\n help=\"Add WMS and WCS endpoint for aggregations\"\n )\n parser.add_argument(\n \"-o\", \"--output-dir\",\n dest=\"output_dir\",\n default=\"output_catalogs\",\n help=\"Directory to write modified catalog(s) to [default: %(default)s]\"\n )\n parser.add_argument(\n \"-n\", \"--ncml-dir\",\n dest=\"ncml_dir\",\n default=\"aggregations\",\n help=\"Directory to write NcML aggregations to if using --aggregate \"\n \"[default: %(default)s]\"\n )\n parser.add_argument(\n \"-s\", \"--server\",\n dest=\"thredds_server\",\n default=\"cci-odp-data.ceda.ac.uk\",\n help=\"The hostname of the THREDDS server on which the data will \"\n \"hosted. This is required to construct URLs to THREDDS \"\n \"catalogs in global attributes in aggregations \"\n \"[default: %(default)s]\"\n )\n parser.add_argument(\n \"--remote-agg-dir\",\n default=\"/usr/local/aggregations/\",\n help=\"Directory under which NcML aggregations are stored on the \"\n \"THREDDS server [default: %(default)s]\"\n )\n parser.add_argument(\n \"--data-dir\",\n dest=\"data_dir\",\n default=\"/neodc/esacci\",\n help=\"Directory under which data is stored, so that the THREDDS \"\n \"dataset root can be translated to give the real path on \"\n \"disk [default: %(default)s]\"\n )\n\n self.args = parser.parse_args(arg_list)\n\n if self.args.wms and not self.args.aggregate:\n parser.error(\"Cannot add WMS/WCS aggregations without --aggregate\")", "def main():\n parser = argparse.ArgumentParser(description=\"\"\"Tester for YT Data API and different inputs\"\"\")\n parser.add_argument('-a', '--analytics', help='Performs a basic analytics lookup for the user\\'s channel entered')\n parser.add_argument('-c', '--comments', help='Performs a lookup of comments for the video id entered')\n args = parser.parse_args()\n\n if args.analytics:\n analytics = args.analytics\n analyt(analytics)\n\n if args.comments:\n comments = args.comments\n get_comments(comments)", "def get_argument_parser(self):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n fetch_parser = subparsers.add_parser('fetch', help='fetches and displays a release from discogs')\n fetch_parser.add_argument('discogs_id', help='the ID of the release')\n rip_parser = subparsers.add_parser('rip', help='rips the current CD to WAV')\n rip_parser.add_argument('--destination', help='optional destination for the CD rip')\n search_parser = subparsers.add_parser(\n 'search',\n prog='search',\n help='performs a very simple search on discogs')\n search_parser.add_argument('term', help='the term to search for')\n encode_parser = subparsers.add_parser(\n 'encode', help='Encodes a CD or a set of WAV files to mp3.')\n encode_parser.add_argument(\n 'encoding_from', choices=['cd', 'wav'], help='The source to encode from.')\n encode_parser.add_argument(\n 'encoding_to', choices=['mp3', 'flac'], help='The destination to encode to.')\n encode_parser.add_argument(\n '--source', help='The destination of the source wav file. This can be a file or directory.')\n encode_parser.add_argument(\n '--destination', help='The destination of the resulting mp3 or flac. This can be a file or directory.')\n encode_parser.add_argument(\n '--keep-source', action='store_true', help='If encoding from wav, use this to keep the original wav being removed.')\n encode_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n encode_parser.add_argument(\n '--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the encoded files.')\n decode_parser = subparsers.add_parser('decode', help='Decodes a set of FLAC or MP3 files to WAV.')\n decode_parser.add_argument(\n 'decode_from', choices=['flac', 'mp3'], help='The source to decode from.')\n decode_parser.add_argument(\n '--source', help='The destination of the source file. This can be a file or directory.')\n decode_parser.add_argument(\n '--destination', help='The destination of the resulting wav. This can be a file or directory.')\n tag_parser = subparsers.add_parser('tag', help='Tags an audio file')\n tag_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The tagging action to be performed. A tag can be added or removed.')\n tag_parser.add_argument(\n 'format', choices=['mp3', 'flac'], help='The file format of the audio file being tagged.')\n tag_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n tag_parser.add_argument(\n '--source',\n help='The source audio files to tag. This can be a file or a directory. If the source is omitted, the files in the current working directory will be used.')\n tag_parser.add_argument('--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the tagged files.')\n tag_parser.add_argument('--artist', help='The artist to use for the tag.')\n tag_parser.add_argument('--album-artist', help='The album artist to use for the tag.')\n tag_parser.add_argument('--album', help='The album to use for the tag.')\n tag_parser.add_argument('--title', help='The title to use for the tag.')\n tag_parser.add_argument('--year', help='The year to use for the tag.')\n tag_parser.add_argument('--genre', help='The year to use for the tag.')\n tag_parser.add_argument('--track-number', help='The track number to use for the tag.')\n tag_parser.add_argument('--track-total', help='The track total to use for the tag.')\n tag_parser.add_argument('--disc-number', help='The disc number to use for the tag.')\n tag_parser.add_argument('--disc-total', help='The disc total to use for the tag.')\n tag_parser.add_argument('--comment', help='The comment for the tag.')\n artwork_parser = subparsers.add_parser('artwork', help='adds or removes artwork from a file')\n artwork_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The artwork action to be performed. The artwork can be added or removed.')\n artwork_parser.add_argument(\n 'type', choices=['mp3', 'flac'], help='The type of file to apply the artwork to.')\n artwork_parser.add_argument(\n '--source', help='The destination file or directory to apply the artwork to. If there is no source then any artwork in the current directory will be used.')\n artwork_parser.add_argument(\n '--destination', help='The destination file or directory to apply the artwork to. If there is no destination then the current directory will be used.')\n mix_parser = subparsers.add_parser('mix', help='adds a mix')\n mix_parser.add_argument('source', help='the source of the mix')\n mix_parser.add_argument('--artist', help='The artist to use for the tag.')\n mix_parser.add_argument('--album', help='The album to use for the mix.')\n mix_parser.add_argument('--title', help='The title to use for the mix.')\n mix_parser.add_argument('--year', help='The year to use for the mix.')\n mix_parser.add_argument('--comment', help='The comment for the mix.')\n return parser", "def parse_args():\n parser = MyParser(description='Data processing and analytics library \\\n for OpenStack Browbeat perf data')\n\n parser.add_argument('-s', '--summary', dest=\"days\", type=int, default=-1,\n help='-s N summary of last N days of results')\n\n parser.add_argument('--summary-uuid', dest=\"summary_uuid\", type=str,\n default=None,\n help='--summary-uuid UUID summary of a specific uuid')\n\n parser.add_argument('--short-summary', dest=\"short_days\", type=int,\n default=-1,\n help='--short-summary N gives \\\n summary of last N days of results but uses cockroach \\\n db so only provides with basic summary')\n\n parser.add_argument('--upload-timesummary', dest=\"timeseries_uuid\",\n type=str, default=None,\n help='--upload-timesummary UUID \\\n uploads the features computed from data obtained from\\\n graphite. ')\n\n parser.add_argument('--upload-logsummary', dest=\"loggin_uuid\",\n type=str, default=None,\n help='--upload-logsummary UUID \\\n uploads the log summary to crdb \\\n currently just summarizes over entire timeperiod. ')\n\n parser.add_argument('-u', '--update-db', dest='update', type=bool,\n default=False,\n help='-u True pushes data to cockroach db')\n\n parser.add_argument('--update-clf', dest=\"clf_days\", type=int,\n default=-1,\n help='--update-clf 60 will update all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days')\n\n parser.add_argument('--test-clf', dest=\"test_days\", type=int,\n default=-1,\n help='--test-clf 60 will train all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days and then test it \\\n and display metrics')\n\n parser.add_argument('-v', '--osp-version', dest='version', type=str,\n default=None,\n help='-v 11-tripleo only returns hits for that \\\n OpenStack version, \\\n only supported by summary right now')\n\n parser.add_argument('-c', '--config', dest='config', type=str,\n default=pkg_resources.resource_filename('bml',\n \"config.yml\"),\n help='-c <config file path> use custom config file')\n\n args = parser.parse_args()\n return args", "def get_args():\n parser = argparse.ArgumentParser('Find a featureclass, database, mxd, or service in ArcGIS Server',\n epilog='For search strings inlcuding spaces, enclose the query in double-quotes')\n parser.add_argument('name', help='string for which to search (blank returns info on all services)',\n nargs='?', default='')\n parser.add_argument('-q', '--quiet', help='only display service names and URLs', action='store_true')\n parser.add_argument('-qq', '--veryquiet', help='only display service URLs, comma delimited', action='store_true')\n parser.add_argument('-cs', '--configstore', help='explicitly provide full path to config store', action='store')\n parser.add_argument('-csv', '--tocsv', help='create csv output', action='store_true')\n parser.add_argument('-md', '--markdown', help='create Markdown output', action='store_true')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def __init__(self, aggregator: af.Aggregator):\r\n self.aggregator = aggregator", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(usage='Creates an ensemble of classifiers based on majority voting.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def aggregator_id(self) -> str:\n return pulumi.get(self, \"aggregator_id\")", "def aggregator_id(self) -> str:\n return pulumi.get(self, \"aggregator_id\")", "def aggregator_id(self) -> str:\n return pulumi.get(self, \"aggregator_id\")", "def parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='ml-1m', help='which dataset to use')\n args = parser.parse_args()\n main(args)", "def getargs(parser: argparse.ArgumentParser) -> argparse.Namespace:\n parser.add_argument(\n '-servers', type=str, default='',\n help=\"\"\"\n Hostname or IP and port of Kafka broker producing stream.\n [KAFKA_IPPORT/KAFKA_IPPORT_SIM]\n \"\"\")\n parser.add_argument(\n '-topic', type=str, default='',\n help=\"\"\"\n Name of Kafka topic stream to read from.\n [KAFKA_TOPIC/KAFKA_TOPIC_SIM]\n \"\"\")\n parser.add_argument(\n '-schema', type=str, default='',\n help=\"\"\"\n Schema to decode the alert. Should be avro file.\n [FINK_ALERT_SCHEMA]\"\"\")\n parser.add_argument(\n '-startingoffsets_stream', type=str, default='',\n help=\"\"\"From which stream offset you want to start pulling data when\n building the raw database: latest, earliest, or custom.\n [KAFKA_STARTING_OFFSET]\n \"\"\")\n parser.add_argument(\n '-online_data_prefix', type=str, default='',\n help=\"\"\"Path prefix to store online data, e.g. /path/to/online.\n This would then contain automatically {raw, science}/year=/month=/day=\n [ONLINE_DATA_PREFIX]\n \"\"\")\n parser.add_argument(\n '-agg_data_prefix', type=str, default='',\n help=\"\"\"Path prefix to store archive data, e.g. /path/to/archive.\n This would then contain automatically {raw, science}/year=/month=/day=\n [AGG_DATA_PREFIX]\n \"\"\")\n parser.add_argument(\n '-science_db_name', type=str, default='',\n help=\"\"\"\n The name of the HBase table\n [SCIENCE_DB_NAME]\n \"\"\")\n parser.add_argument(\n '-science_db_catalogs', type=str, default='',\n help=\"\"\"\n The path for HBase table catalogs. Must exist.\n [SCIENCE_DB_CATALOGS]\n \"\"\")\n parser.add_argument(\n '-log_level', type=str, default='',\n help=\"\"\"\n The minimum level of log: OFF, DEBUG, INFO, WARN, ERROR, CRITICAL\n [LOG_LEVEL]\n \"\"\")\n parser.add_argument(\n '-finkwebpath', type=str, default='',\n help=\"\"\"\n Folder to store UI data for display.\n [FINK_UI_PATH]\n \"\"\")\n parser.add_argument(\n '-tinterval', type=int, default=0,\n help=\"\"\"\n Time interval between two monitoring. In seconds.\n [FINK_TRIGGER_UPDATE]\n \"\"\")\n parser.add_argument(\n '-tinterval_kafka', type=float, default=0.0,\n help=\"\"\"\n Time interval between two messages are published. In seconds.\n [TIME_INTERVAL]\n \"\"\")\n parser.add_argument(\n '-exit_after', type=int, default=None,\n help=\"\"\"\n Stop the service after `exit_after` seconds.\n This primarily for use on Travis, to stop service after some time.\n Use that with `fink start service --exit_after <time>`. Default is None.\n \"\"\")\n parser.add_argument(\n '-datasimpath', type=str, default='',\n help=\"\"\"\n Folder containing simulated alerts to be published by Kafka.\n [FINK_DATA_SIM]\n \"\"\")\n parser.add_argument(\n '-poolsize', type=int, default=5,\n help=\"\"\"\n Maximum number of alerts to send. If the poolsize is\n bigger than the number of alerts in `datapath`, then we replicate\n the alerts. Default is 5.\n [POOLSIZE]\n \"\"\")\n parser.add_argument(\n '-distribution_servers', type=str, default='',\n help=\"\"\"\n Kafka bootstrap servers for alert redistribution\n [DISTRIBUTION_SERVERS]\n \"\"\")\n parser.add_argument(\n '-distribution_topic', type=str, default='',\n help=\"\"\"\n Kafka topic for Alert redistribution\n [DISTRIBUTION_TOPIC]\n \"\"\")\n parser.add_argument(\n '-distribution_schema', type=str, default='',\n help=\"\"\"\n The path where the avro schema for alert distribution is stored\n [DISTRIBUTION_SCHEMA]\n \"\"\")\n parser.add_argument(\n '-startingOffset_dist', type=str, default='',\n help=\"\"\"From which offset(timestamp) you want to start the\n distribution service.\n Options are: latest, earliest or a custom timestamp\n [DISTRIBUTION_OFFSET]\n \"\"\")\n parser.add_argument(\n '-checkpointpath_dist', type=str, default='',\n help=\"\"\"\n The path of file in which to store the offset for distribution service.\n This file will store the timestamp up-till which the science db is\n scanned and alerts have been distributed.\n [DISTRIBUTION_OFFSET_FILE]\n \"\"\")\n parser.add_argument(\n '-distribution_rules_xml', type=str, default='',\n help=\"\"\"\n The path to distribution-rules.xml which stores user defined rules to\n filter the distribution stream\n [DISTRIBUTION_RULES_XML]\n \"\"\")\n parser.add_argument(\n '-slack_channels', type=str, default='',\n help=\"\"\"\n Text file with list of slack channels to which automatic alerts\n must be sent for e.g. based on cross-match type\n [SLACK_CHANNELS]\n \"\"\")\n parser.add_argument(\n '-night', type=str, default='',\n help=\"\"\"\n YYYYMMDD night\n [NIGHT]\n \"\"\")\n parser.add_argument(\n '-fs', type=str, default='',\n help=\"\"\"\n Filesystem: local or hdfs.\n [FS_KIND]\n \"\"\")\n parser.add_argument(\n '-datapath', type=str, default='',\n help=\"\"\"\n Directory on disk for saving temporary alert data.\n [DATA_PREFIX]\n \"\"\")\n parser.add_argument(\n '--save_science_db_catalog_only', action='store_true',\n help=\"\"\"\n If True, save only the catalog on disk and do not push\n data on HBase. Default is False.\n [SAVE_SCIENCE_DB_CATALOG_ONLY]\n \"\"\")\n parser.add_argument(\n '-index_table', type=str, default='',\n help=\"\"\"\n Name of the rowkey for index table\n [INDEXTABLE]\n \"\"\")\n parser.add_argument(\n '-tns_folder', type=str, default='',\n help=\"\"\"\n Folder to store logs and keys for TNS submission\n [TNS_FOLDER]\n \"\"\")\n parser.add_argument(\n '--tns_sandbox', action='store_true',\n help=\"\"\"\n If True, push to TNS sandbox. Default is False.\n [TNS_SANDBOX]\n \"\"\")\n parser.add_argument(\n '-substream_prefix', type=str, default='fink_',\n help=\"\"\"\n Prefix for outgoing substreams\n [SUBSTREAM_PREFIX]\n \"\"\")\n parser.add_argument(\n '-fink_fat_output', type=str, default='',\n help=\"\"\"\n Folder that contains fink-fat output parquet files\n [FINK_FAT_OUTPUT]\n \"\"\")\n parser.add_argument(\n '-producer', type=str, default='ztf',\n help=\"\"\"\n Name of the alert producer. Currently available: ztf, elasticc, sims\n [PRODUCER]\n \"\"\")\n parser.add_argument(\n '-noscience', type=bool, default=False,\n help=\"\"\"\n Disable execution of science modules\n \"\"\")\n parser.add_argument(\n '-tns_raw_output', type=str, default='',\n help=\"\"\"\n Folder that contains raw TNS catalog\n [TNS_RAW_OUTPUT]\n \"\"\")\n args = parser.parse_args(None)\n return args", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"summary.py\"\"\")\n parser.add_argument(\n \"--maf\",\n required=True,\n default=None,\n help=\"\"\"The path to the directory containing maf file(s)\"\"\"\n )\n parser.add_argument(\n \"--db\",\n required=True,\n default=None,\n help=\"\"\"The name of the output SQLITE database to hold results\"\"\"\n )\n parser.add_argument(\n \"--metadata-key\",\n required=True,\n dest=\"metadata\",\n type=str,\n help=\"\"\"The primary species in the alignment (e.g. the one on top in the MAF file)\"\"\"\n )\n parser.add_argument(\n \"--alignment-length\",\n dest=\"align\",\n type=int,\n default=25,\n help=\"\"\"The minimum acceptable alignment length\"\"\"\n )\n parser.add_argument(\n \"--consensus-length\",\n dest=\"consensus\",\n type=int,\n default=25,\n help=\"\"\"The minimum acceptable consensus length\"\"\"\n )\n return parser.parse_args()", "def main():\n header = \"\"\"\n###############################################################################\n# #\n# Obtain data for the lookup execution time estimator #\n# #\n# --------------------------------------------------------------------------- #\n# #\n# Import execution times to mongodb from #\n# 1. mongodb_log via recorded blackboard skiller calls #\n# 2. samples of a mixed gaussian distribution #\n# #\n###############################################################################\n\"\"\"\n parser = argparse.ArgumentParser(\n description=textwrap.dedent(header),\n formatter_class=argparse.RawTextHelpFormatter,\n )\n common = argparse.ArgumentParser(add_help=False)\n group = common.add_argument_group(\"Mongodb options\")\n group.add_argument(\n \"--mongodb-uri\",\n type=str,\n help=\"The MongoDB URI of the execution time estimator lookup database (default: %(default)s)\",\n default=\"mongodb://localhost:27017/\",\n )\n group.add_argument(\n \"--db\",\n type=str,\n help=textwrap.dedent(\"\"\"name of the lookup database (default: %(default)s)\"\"\"),\n default=\"skills\",\n )\n group.add_argument(\n \"--dry-run\",\n \"-d\",\n action=\"store_true\",\n help=\"only create samples without uploading them to mongodb\",\n )\n group.add_argument(\n \"--collection\",\n \"-c\",\n type=str,\n help=\"name of the lookup collection (default: %(default)s)\",\n default=\"exec_times\",\n )\n group.add_argument(\n \"--drop-collection-first\",\n \"-dc\",\n action=\"store_true\",\n help=\"clear all old data from the collection\",\n )\n subparsers = parser.add_subparsers(\n help=\"Source of the execution time data\", dest=\"subparser\"\n )\n bb_parser = subparsers.add_parser(\n \"bblog\",\n parents=[common],\n description=textwrap.dedent(\n header\n + \"\"\"\\\n# #\n# Selected option 1 #\n# #\n###############################################################################\n\"\"\"\n ),\n formatter_class=argparse.RawTextHelpFormatter,\n )\n bb_parser.set_defaults()\n random_parser = subparsers.add_parser(\n \"generate\",\n parents=[common],\n description=textwrap.dedent(\n header\n + \"\"\"\\\n# #\n# Selected option 2 #\n# #\n###############################################################################\n\"\"\"\n ),\n formatter_class=argparse.RawTextHelpFormatter,\n )\n random_parser.set_defaults()\n bb_sanity = bb_parser.add_argument_group(\"Sanity checks to avoid faulty entries\")\n bb_sanity.add_argument(\n \"--lower-bound\",\n \"-l\",\n type=float,\n default=0,\n help=\"ignore entries with duration smaller than this\",\n )\n bb_sanity.add_argument(\n \"--upper-bound\",\n \"-u\",\n type=float,\n default=float(\"inf\"),\n help=\"ignore entries with duration smaller than this\",\n )\n bb_log = bb_parser.add_argument_group(\"Blackboard log information\")\n bb_log.add_argument(\n \"--src-uri\",\n type=str,\n help=\"The MongoDB URI of the blackboard log connection (default: %(default)s)\",\n default=\"mongodb://localhost:27017/\",\n )\n bb_log.add_argument(\n \"--src-db\",\n type=str,\n help=\"The name of the blackboard log database (default: %(default)s)\",\n default=\"fflog\",\n )\n bb_log.add_argument(\n \"--src-col\",\n type=str,\n help=\"The name of the blackboard log collection (default: %(default)s)\",\n default=\"SkillerInterface.Skiller\",\n )\n bb_log.add_argument(\n \"--drop-src-col\",\n type=bool,\n help=\"Delete the skiller blackboard log collection afterwards\",\n default=False,\n )\n\n skill = random_parser.add_argument_group(\"Skill information\")\n skill.add_argument(\n \"--quantity\",\n \"-n\",\n type=int,\n help=\"number of entries to generate\",\n required=True,\n )\n skill.add_argument(\n \"--skill-name\",\n \"-s\",\n type=str,\n help=\"skill name to generate entries for\",\n required=True,\n )\n skill.add_argument(\n \"--skill-args\",\n \"-a\",\n type=str,\n nargs=\"+\",\n action=\"append\",\n help=textwrap.dedent(\n \"\"\"skill arguments. usage -a <arg-name> <val1> <val2> ...\n where val<i> are the possible values of the argument that will be chosen from at random\n * (placeholder value) if no values are given\n \"\"\"\n ),\n )\n\n gauss = random_parser.add_argument_group(\"Mixed gaussian distribution\")\n gauss.add_argument(\n \"--gauss-params\",\n \"-g\",\n type=float,\n help=\"mean and standard deviation (in that order) of a gaussian, repeat this option to add more gaussians\",\n nargs=2,\n required=True,\n action=\"append\",\n )\n gauss.add_argument(\n \"--dist-weights\",\n \"-w\",\n type=float,\n default=[],\n help=\"Weight of each gauss distribution (default 1)\",\n nargs=\"+\",\n )\n gauss.add_argument(\n \"--lower-bound\",\n \"-l\",\n type=float,\n default=0,\n help=\"clip distribution to a lower bound\",\n )\n gauss.add_argument(\n \"--upper-bound\",\n \"-u\",\n type=float,\n default=float(\"inf\"),\n help=\"clip distribution to an upper bound\",\n )\n\n visual = random_parser.add_argument_group(\"Visualization options\")\n visual.add_argument(\n \"--bin-size\",\n \"-b\",\n type=int,\n help=\"number of bins to display sampled durations (default: %(default)s)\",\n default=50,\n )\n visual.add_argument(\n \"--non-interactive\",\n \"-y\",\n action=\"store_true\",\n help=\"skip drawing the sample range\",\n )\n parser.epilog = (\n \"--- Arguments common to all sub-parsers ---\"\n + common.format_help().replace(common.format_usage(), \"\")\n )\n random_parser.epilog = \"\"\"\nexample call: ./mongodb_skillsim_lookup.py generate -d -n \\\n1000 -g 10 2 -g 20 3 -w 1 5 -s test -a arg1 value1 value2 -a arg2\n \"\"\"\n args = parser.parse_args(args=None if sys.argv[1:] else [\"--help\"])\n # validate inputs\n if args == None:\n parser.exit(1)\n\n mongoIf = MongoInterface(args.mongodb_uri, args.db, args.collection, args.dry_run)\n if args.drop_collection_first and not args.dry_run:\n print(\"Drop collection before uploading...\")\n drop_collection(args.mongodb_uri, args.db, args.collection)\n if args.subparser == \"bblog\":\n mongoIf.transform(\n args.src_uri, args.src_db, args.src_col, args.lower_bound, args.upper_bound\n )\n if args.drop_src_col:\n drop_collection(args.src_mongodb_uri, args.src_db, args.src_col)\n elif args.subparser == \"generate\":\n sampler = GaussSampler(\n args.quantity,\n args.dist_weights,\n args.gauss_params,\n args.upper_bound,\n args.lower_bound,\n )\n if not args.non_interactive:\n sampler.display(args.bin_size)\n mongoIf.upload(sampler.samples, args.skill_name, args.skill_args)\n else:\n print(\"unrecognized mode\")", "def handle_program_options():\n parser = argparse.ArgumentParser(description=\"Gather numeric information \\\n about the processed sequence data in an \\\n MG-RAST project.\")\n parser.add_argument('project_id',\n help=\"The project identifier (MG-RAST ID)\")\n parser.add_argument('-a', '--auth_key',\n help=\"An MG-RAST API authorization key. This is \\\n necessary to access projects marked as private.\")\n parser.add_argument('-g', '--group_by', action='append',\n help=\"A string that matches some part of the \\\n 'Metagenome Name' field. All matching project \\\n metagenomes will be grouped by this identifier \\\n and their stats will be summed. This option can \\\n be specified multiple times to create multiple \\\n groups. All non-matching metagenomes will \\\n appear separately in the table. NOTE: \\\n Strings will be matched longest first. This \\\n allows for matching names that might be a \\\n substring of another match. For example: -g S \\\n -g NS. The name field will first be matched \\\n against the longest string (NS) first and then \\\n each smaller string in order.\")\n parser.add_argument('-o', '--output_filename', default='meta_stats.txt',\n help=\"The name of the file the project summary \\\n information will be written to.\")\n\n# parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\"cat_stats.py\")\n parser.add_argument(\"folder\", \n help = \"folder where all the stats files are located\")\n return parser.parse_args()", "def process_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"srcDir\", type=str, help=\"Directory containing Unit Hydrograph grids to be aggregated\")\n parser.add_argument(\"gridFile\", type=str, help=\"Input netCDF target grid\")\n parser.add_argument(\"--remapDir\", type=str, help=\"Directory containing Output Unit Hydrograph grids\")\n parser.add_argument(\"--aggDir\", type=str, help=\"Directory where to store aggregated files (before remap)\")\n parser.add_argument(\"--inPrefix\", type=str, help=\"Input Unit Hydrograph File Prefix (default=UH_)\",default='UH_')\n parser.add_argument(\"--outPrefix\", type=str, help=\"Output Unit Hydrograph File Prefix (default=Agg_UH_)\", default=\"Agg_UH_\")\n parser.add_argument(\"--time\", type=str, help=\"Input Unit Hydrograph time variable name (default=time)\",default='time')\n parser.add_argument(\"--lon\", type=str, help=\"Input Unit Hydrograph longitude variable name (default=lon)\",default='lon')\n parser.add_argument(\"--lat\", type=str, help=\"Input Unit Hydrograph latitude variable name (default=lat)\",default='lat')\n parser.add_argument(\"--fraction\", type=str, help=\"Input Unit Hydrograph fraction variable name (default=fraction)\",default='fraction')\n parser.add_argument(\"--unit_hydrograph\",type=str, help=\"Input unit hydrograph variable name (default=unit_hydrograph)\",default='unit_hydrograph')\n parser.add_argument(\"--xc\", type=str, help=\"Input target grid longitude variable (default=xc)\",default='xc')\n parser.add_argument(\"--yc\", type=str, help=\"Input target grid latitude variable (default=yc)\",default='yc') \n parser.add_argument(\"--testAgg\",help=\"Do a test aggregation, where all inpoint points are aggregated into one file, remapping can be done afterwards using the --remap flag\",action=\"store_true\")\n parser.add_argument(\"--cdoDebug\",help=\"Enable CDO debuging (prings each step to screen)\",action=\"store_true\")\n parser.add_argument(\"--cdoForce\",help=\"Enable CDO force output (will overwrite existing files during remap)\",action=\"store_true\")\n parser.add_argument(\"--verbose\",help=\"Make script verbose\",action=\"store_true\")\n parser.add_argument(\"--remap\",help=\"Remap the aggregated Unit Hydrographs to outDir and put the aggregated files in the tempDir\",action='store_true')\n parser.add_argument(\"--agg\",help=\"Aggregate the input files onto the targetGrid (gridFile)\",action='store_true')\n parser.add_argument(\"--fill_value\",type=float,help=\"value to use as masked value (default=9.96920996839e+36)\",default = 9.96920996839e+36)\n parser.add_argument(\"--pad\",type=int,help=\"Set number of empty cells to include around each aggregated basin (default=10)\",default=10)\n parser.add_argument(\"--resolution\",type=float,help=\"Set resolution of input Unit Hydrographs (default=1/16.)\",default=1/16.)\n parser.add_argument(\"--clean\",help=\"Clean up aggregated Unit Hydrograph grids if remapping\", action='store_true')\n parser.add_argument(\"--dryrun\",help=\"Do the mapping between the source and target grid based on the files in the input directory, return the performance stats for the run\", action='store_true')\n args = parser.parse_args()\n\n options = {}\n paths = {}\n # parse the basics\n Rvars = (args.time,args.lon,args.lat,args.fraction,args.unit_hydrograph)\n Cvars = (args.yc,args.xc)\n paths['srcDir'] = args.srcDir\n paths['gridFile'] = args.gridFile\n\n if args.aggDir:\n paths['aggDir'] = args.aggDir\n else:\n paths['aggDir'] = os.path.join(paths['srcDir'],'../aggregated/')\n if not os.path.exists(paths['aggDir']):\n os.makedirs(paths['aggDir'])\n\n options['verbose'] = args.verbose\n options['fill_value'] = args.fill_value\n options['pad'] = args.pad\n options['resolution'] = args.resolution\n options['inPrefix'] = args.inPrefix\n options['outPrefix'] = args.outPrefix\n options['dryrun'] = args.dryrun\n options['testAgg'] = args.testAgg\n options['clean']=args.clean\n options['remap']=args.remap\n options['agg']=args.agg\n \n if options['remap']:\n cdo.debug=args.cdoDebug\n cdo.forceOutput=args.cdoForce\n if args.remapDir:\n paths['remapDir'] = args.remapDir\n else:\n paths['remapDir'] = os.path.join(paths['srcDir'],'../remaped/')\n if not os.path.exists(paths['remapDir']):\n os.makedirs(paths['remapDir'])\n print paths['remapDir'] \n\n return Rvars,Cvars,paths,options" ]
[ "0.623255", "0.60802627", "0.5820678", "0.5737905", "0.57234126", "0.57187593", "0.5651868", "0.55634636", "0.5494111", "0.5488788", "0.54820406", "0.53977966", "0.5384751", "0.5330864", "0.5323734", "0.5302474", "0.52866167", "0.5259916", "0.52444696", "0.52374387", "0.52364254", "0.52364254", "0.52364254", "0.5216306", "0.52087843", "0.5203598", "0.5197078", "0.5195497", "0.5166971", "0.5163054" ]
0.6193614
1
Used by Crawler class, append a line on instance of aggregator setuped.
def append(self, line): self.ag.append(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_add_node(self, line=''):\n self.fibbing.add_node()", "def add(self, line):\n self.body.append(line)", "def connectionMade(self):\n self.output = DelayedStartupLineLogger()\n self.output.makeConnection(self.transport)\n self.output.tag = self.name", "def _augment_pipeline_cfg(self):", "def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)", "def add(name, **spider_args):", "def __init__(self):\n super(GithubCollector, self).__init__()\n config_file = ('collectors.cfg')\n log_file = self.config['Github']['log_file']\n logging.config.fileConfig(config_file,\n defaults={'GithubCollector': log_file}\n )\n self.logger = logging.getLogger('GithubCollector')\n self.elasticsearch = Elasticsearch(['localhost:9200'])\n self.redis = redis.Redis(host='127.0.0.1', port=6379, password='')\n self.timestamp = datetime.date.today().isoformat()", "def setup(self):\n self.ca_lines = []\n self.ca_lines.append(self.build_initial_line())\n self.set_display_from_lines()", "def introduce(self):\n print(f\"Hi, I am {self.name}!\")", "def __call__(self, this):\n if self.logging is True:\n self.trace += '{Now} {Host} {Proc} {Event}\\n'.format(\n Now=time.strftime('%c', time.localtime()),\n Host=node(),\n Proc=self.tag,\n Event=this,\n )", "def write(self, line, *, preprocessor):\n preprocessor.insert_lines((line + \"\\n\",))", "def test_add_processor(caplog):\n\n testapp = holocron.Application()\n marker = None\n\n def processor(app, items):\n nonlocal marker\n marker = 42\n yield from items\n\n testapp.add_processor(\"processor\", processor)\n\n for _ in testapp.invoke([{\"name\": \"processor\"}]):\n pass\n\n assert marker == 42\n assert len(caplog.records) == 0", "def __init__(self, aggregator: af.Aggregator):\r\n self.aggregator = aggregator", "def updateOutliner(self):\n\n pass", "def record_meta(spider):\n pass", "def append(self, item: Operation): # type: ignore\n if self.enabled:\n logger.debug(\n f'Appended {item.description} to the operations queue')\n super().append(item)\n else:\n loginfo('- ' + item.long_description)\n item()", "def publish_line(self, stream, line):\n pass", "def __processOutputLine(self, line):\n if (\n line.startswith(\"--- \") or\n line.startswith(\"+++ \")\n ):\n self.__processFileLine(line)\n \n self.__output.append(line)", "def test_add_processor_override(caplog):\n\n testapp = holocron.Application()\n marker = None\n\n def processor_a(app, items):\n nonlocal marker\n marker = 42\n yield from items\n\n def processor_b(app, items):\n nonlocal marker\n marker = 13\n yield from items\n\n testapp.add_processor(\"processor\", processor_a)\n testapp.add_processor(\"processor\", processor_b)\n\n for _ in testapp.invoke([{\"name\": \"processor\"}]):\n pass\n\n assert marker == 13\n\n assert len(caplog.records) == 1\n assert caplog.records[0].message == \"processor override: 'processor'\"", "def _populate_output(self):\n pass", "def _add_produce(self, name):\n self._produces.append(\"- {name}\\n\")\n pass", "def add_collector_imports(self):\n with open(self.filename, \"r+\") as code_file:\n content = code_file.read()\n if not content.startswith(self.IMPORT_COLLECTOR_LINE):\n logger.debug(\n \"Adding import lines, please do not remove while generating yml.\"\n )\n code_file.seek(0, 0)\n code_file.write(\n f\"{self.IMPORT_COLLECTOR_LINE}\\n{self.EXPLICIT_DECLARATION_IMPORTS_LINE}\\n\\n{content}\"\n )", "def __on_group_created(self, logger, *args):", "def __add_pipeline__(self,pipeline):\n if not re.search('Pipeline',pipeline.obj_type):\n raise Exception(\"Trying to add non-pipeline key to flowcell statistics reports\")\n if not self.pipelines is None:\n self.pipelines += ';'\n self.pipelines += str(pipeline.key) + \":\" + pipeline.obj_type\n else:\n self.pipelines = str(pipeline.key) + \":\" + pipeline.obj_type", "def add(self, agent):\n self._agents[agent.unique_id] = agent\n self.logger.add(agent)", "def post_build(self):", "def setup(self) -> None:\n self.logger.info(\"ML Train task: setup method called.\")", "def printme(self, line):\n self.otag.printme(line)", "def __enter__(self) -> None:\n self.log = super().__enter__()", "def decorate(self, content: StringList) -> None:\n super().decorate(content)\n for line in content[:20]:\n if line.startswith(TITLE_MARKER):\n title = line[len(TITLE_MARKER) :].strip()\n fence = \"=\" * len(title)\n content.insert(0, \"\", \"<generated>\", 0)\n content.insert(0, fence, \"<generated>\", 0)\n content.insert(0, title, \"<generated>\", 0)\n content.insert(0, fence, \"<generated>\", 0)" ]
[ "0.56161034", "0.5413999", "0.5366023", "0.5317709", "0.531319", "0.52674985", "0.52674633", "0.52596486", "0.52380824", "0.5237493", "0.52317125", "0.5214208", "0.5210514", "0.51949674", "0.51943386", "0.51818913", "0.5181332", "0.5165727", "0.51442534", "0.5141288", "0.5126247", "0.5115031", "0.5098946", "0.50897545", "0.5087846", "0.5085947", "0.5080262", "0.50789416", "0.5075054", "0.50696385" ]
0.6032251
0
Release module to pypi
def release_pypi(): local('python setup.py clean sdist register upload')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def upload():\n sh('python setup.py register sdist upload')", "def release_package_to_repository(self, version: str) -> None:\n logger.info(f\"Uploading the package [{version}]\")\n pass", "def source_release(request, new_package):\n\n new_module, pkg_root = new_package\n source_label = random_str(40)\n source_url = \"http://{}.com/{}\".format(random_str(7), random_str(12))\n with open(os.path.join(new_module, META_NAME), \"w\") as openmeta:\n openmeta.write((\n '{{\"packages\": [\"find_packages()\"], \"source_label\": \"{}\", '\n '\"source_url\": \"{}\"}}'\n ).format(source_label, source_url))\n\n request.addfinalizer(module_cleanup)\n return new_module, source_label, source_url", "def release(ctx, sdist=True, wheel=True, sign=True, dry_run=False):\n # Build docs first. Use terribad workaround pending invoke #146\n ctx.run(\"inv docs\", pty=True, hide=False)\n # Move the built docs into where Epydocs used to live\n target = 'docs'\n rmtree(target, ignore_errors=True)\n # TODO: make it easier to yank out this config val from the docs coll\n copytree('sites/docs/_build', target)\n # Publish\n publish(ctx, sdist=sdist, wheel=wheel, sign=sign, dry_run=dry_run)\n # Remind\n print(\"\\n\\nDon't forget to update RTD's versions page for new minor \"\n \"releases!\")", "def _provision_package(self):", "def upload(ctx, release, rebuild, version):\n\n dist_path = Path(DIST_PATH)\n if rebuild is False:\n if not dist_path.exists() or not list(dist_path.glob('*')):\n print(\"No distribution files found. Please run 'build' command first\")\n return\n else:\n ctx.invoke(build, force=True, version=version)\n\n if release:\n args = ['twine', 'upload', 'dist/*']\n else:\n repository = 'https://test.pypi.org/legacy/'\n args = ['twine', 'upload', '--repository-url', repository, 'dist/*']\n\n env = os.environ.copy()\n\n p = subprocess.Popen(args, env=env)\n p.wait()", "def upload_package(conn, module, remotepath = None, chunk_size = 16000):\n if remotepath is None:\n site = conn.modules[\"distutils.sysconfig\"].get_python_lib()\n remotepath = conn.modules.os.path.join(site, module.__name__)\n localpath = os.path.dirname(inspect.getsourcefile(module))\n upload(conn, localpath, remotepath, chunk_size = chunk_size)", "def util_sign_release():\n os.chdir(REPO_PATH)\n dr = DebRepo()\n keyname = dr.read_keyname()\n out, err = dr.sign_release(keyname)\n print(out)\n print(err)", "def develop():\n# Install package in development mode\n sh('python setup.py develop')", "def create_release(self, alias=\"devel\"):\n self.create_package(alias)\n\n self.upload_package()\n\n logger.info(\"Creating release {0}\".format(self.hash_release))\n\n response_code = self.aws_lambda.update_function_code(\n FunctionName=self.function_selected,\n S3Bucket=self.function_config['Code']['S3Bucket'],\n S3Key=self.s3_filename,\n Publish=True\n )\n\n logger.info(\"Created revision {0}\".format(response_code['Version']))\n\n self.update_or_create_alias(response_code['Version'], self.hash_release)\n self.update_or_create_alias(response_code['Version'], alias)\n\n logger.info(\"If config wash changed, remember to update function \"\n \"configuration\")", "def upload_package(self, __contents):\n raise NotImplementedError", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()", "def release(c, dry_run=False):\n tox_args = \"--skip-pkg-install -e py37\" if not dry_run else \"\"\n c.run(f\"tox {tox_args}\")\n dry = \"--dry-run\" if dry_run else \"\"\n c.run(f\"bump2version {dry} --verbose patch\")\n\n if not dry_run:\n c.run(\"git push --tags\")", "def create_package(self, release_tag=''):\n\n code_directory = self.function_config['Code']['Directory']\n package_name = self.function_selected\n hash_release = _get_git_release()\n logger.info(\"Creating package with git release {0}\".format(hash_release))\n\n lp = self.runtime['packager'](\n package_name,\n hash_release + release_tag,\n code_directory,\n target_directory='.')\n\n lp.build_and_save()\n\n self.hash_release = hash_release\n self.local_filename = lp.filename", "def dist(context):\n context.run(\"python setup.py sdist\")\n context.run(\"python setup.py bdist_wheel\")", "def upload(version=minv.__version__, release=\"1\"):\n version = version or minv.__version__\n put(\n join(\n env.builder_path,\n \"build/RPMS/minv-%s-%s.noarch.rpm\" % (version, release)\n ), \"\"\n )\n put(\"minv/package/minv_install_postgresql.sh\", \"\")\n sudo(\"chmod a+x minv_install_postgresql.sh\")\n with lcd(env.ink_path):\n for rpm in RPMS:\n put(rpm, \"\")", "def deploy_api(dist_file, apt_req_file):\n _set_credentials()\n provision()\n _deploy_apt_requirements(apt_req_file)\n _deploy_python_package(dist_file)\n _sighup_api()\n _verify_api_heartbeat()\n send_build_stat(PROJECT_NAME, env.stage)", "def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)", "def task_build(argv):\n pytaskmaster.generator(\"setup.py.in\", \"setup.py\", config)\n pytaskmaster.generator(\"pytaskmaster/version.py.in\", \"pytaskmaster/version.py\", config)\n shell(\"python setup.py bdist_wheel\")\n if \"--sign\" in argv:\n for file in os.listdir(\"dist\"):\n asc_file = \"dist/\" + file + \".asc\"\n if file.endswith(\".whl\") and not os.path.isfile(asc_file):\n shell(\"gpg --detach-sign -a dist/{}\".format(file))", "def deploy():\n build()\n copy()\n install()", "def publish_release(ctx):\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)", "def install(self):\n PiService.install(self)\n self.sudo('svn co https://svn.code.sf.net/p/mjpg-streamer/code /etc/mjpg-streamer')\n self.run('cd /etc/mjpg-streamer/mjpg-streamer && sudo make USE_LIB4VL=true clean all && sudo make DESTDIR=/usr install')", "def install():\n execute(generate)\n execute(upload)", "def package_software(self, version: str) -> None:\n logger.info(f\"Generating a release package [{version}]\")\n pass", "def release(context):\n print(f\"Starting a release of v{IMAGE_VER} on GitHub!\")\n run_cmd(context, exec_cmd=\"git checkout main\", pty=False, error_message=\"Failed to checkout main!\")\n\n run_cmd(context, exec_cmd=\"git pull origin main\", pty=False, error_message=\"Failed to pull from origin/main\")\n\n run_cmd(\n context, exec_cmd=f\"git tag v{IMAGE_VER}\", pty=False, error_message=f\"Failed to create the tag 'v{IMAGE_VER}'!\"\n )\n\n run_cmd(context, exec_cmd=\"git push --tags\", pty=False, error_message=f\"Failed to push the tag 'v{IMAGE_VER}'!\")", "def ship():\n cotton.git_push()\n cotton.install_python_dependencies()\n\n # Deploy the secrets module to the remote project root\n spath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'secrets'))\n put(spath, env.project_root)\n\n cotton.upload_template_and_reload('cron')", "def perform_register(path, file_name):\n subprocess.call(\n [sys.executable, 'setup.py', 'sdist', 'bdist_wheel'], cwd=path)\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '.tar.gz')])\n subprocess.call(['twine', 'register', '-r', 'pypi', os.path.join(\n path, 'dist', file_name + '-py3-none-any.whl')])", "def test_pydroid_pip_install_python_api(self):\n\n module_name = 'simplekv'\n package_dir = os.path.join(site_packages_dir(), module_name)\n self.assertFalse(os.path.exists(package_dir))\n pydroid_pip_install.pydroid_pip_install([module_name])\n self.assertTrue(os.path.exists(package_dir))", "def sdist():\n pass" ]
[ "0.67773676", "0.6478244", "0.63994586", "0.6243652", "0.62267685", "0.62066156", "0.6148887", "0.60896164", "0.60631406", "0.6047037", "0.60178316", "0.5989916", "0.59694237", "0.5946053", "0.58958906", "0.5860913", "0.5859919", "0.58371365", "0.58328044", "0.5812867", "0.5787837", "0.5773149", "0.5768149", "0.57560974", "0.5748894", "0.5724356", "0.5703695", "0.5694783", "0.5664385", "0.56603324" ]
0.77637976
0
Pylint and PEP8 QA report generator We use subprocess instead local because pylint and pep8 don't return a zero exit code. This behaviour is incompatible with fabric...
def release_qa(): lines = StringIO.StringIO(local('find . -name "*.py"', capture=True)) for line in lines.readlines(): print "PYLINT CHECK" print "-----------------------" pyfile = os.path.normpath(line).replace("\n","").replace("\r","") reportfilename = pyfile.replace("./", "").replace("/", "_").replace(".py", ".txt") reportpath = os.path.join("qa", "pylint", reportfilename) options = {"pyfile":pyfile, "reportpath": reportpath} command = "pylint %(pyfile)s > %(reportpath)s" % options _subexec(command) print "PEP8 CHECK" print "-----------------------" reportpath = os.path.join("qa", "pep8", reportfilename) options['reportpath'] = reportpath command = "pep8 %(pyfile)s > %(reportpath)s" % options _subexec(command)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lint(to_lint):\n exit_code = 0\n for linter, options in (('pyflakes', []), ('pep8', pep8_options)):\n try:\n output = local[linter](*(options + to_lint))\n except commands.ProcessExecutionError as e:\n output = e.stdout\n\n if output:\n exit_code = 1\n print \"{0} Errors:\".format(linter)\n print output\n\n output = hacked_pep257(to_lint)\n if output:\n exit_code = 1\n print \"Docstring Errors:\".format(linter.upper())\n print output\n\n sys.exit(exit_code)", "def lint(context):\n context.run(\" \".join([\n \"autopep8\",\n \"--recursive\",\n \"--jobs 0\",\n \"--in-place\",\n \"--aggressive\",\n \"-v\",\n PACKAGE_NAME,\n \"tests\"\n ]))\n context.run(\"pylint %s\" % PACKAGE_NAME)", "def pylint(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_pylint(session)", "def run():\n args = parse_args(sys.argv[1:])\n fnames = args.fnames\n runner = PylintRunner(args)\n runner.run(fnames)", "def test_2():\n try:\n import flake8 # noqa: F401\n except ImportError:\n return None\n\n cwd = os.getcwd()\n os.chdir(PACKAGE_DIR)\n try:\n subprocess.check_call(['flake8'])\n os.chdir(cwd)\n except CalledProcessError:\n os.chdir(cwd)\n raise CalledProcessError", "def prepare():\n sh('pip install pylint pyflakes behave nose clonedigger pep8 sphinx')\n sh('pip install watchdog coverage ipython sphinx_rtd_theme')\n develop()", "def test_pylint(self):\n files_list = []\n\n for root, dirnames, filenames in os.walk(PROJECT_DIR):\n if ignore(root):\n continue\n\n for filename in fnmatch.filter(filenames, '*.py'):\n files_list.append(os.path.join(root, filename))\n\n for file in files_list:\n # (pylint_stdout, pylint_stderr) = epylint.py_run(\n # command_options=\"{} --errors-only\".format(file),\n # return_std=True)\n\n # print(pylint_stdout.getvalue())\n # print(pylint_stderr.getvalue())\n\n call([\n 'pylint',\n '--errors-only',\n file])", "def lint():\n load_env_vars('dev')\n from tools.static_code_analysis import Lint\n pylint = Lint()\n score = pylint.run_test()\n pylint.create_badge(score)", "def pep8():\n import pycodestyle\n # pycode requires full paths\n exclude = [\"{cwd}/{pattern}\".format(\n cwd=cwd,\n pattern=pattern\n ) for pattern in _pep8_excluded_patterns]\n style = pycodestyle.StyleGuide(config_file='./tox.ini', exclude=exclude)\n report = style.check_files('.')\n print(\"Errors: {}\".format(report.get_count()))\n # this ensures we can programmatically determine failures\n if report.get_count() > 0:\n sys.exit(1)", "def lint(self):\n return {\n \"actions\": [\n (create_dir, [\"build/lint\"]),\n TaskCreator.get_flake8() + \" \" + self.project_name_sc + \" | tee build/lint/flake8.log\",\n TaskCreator.get_pylint() + \" --output-format=parseable --reports=no \" + self.project_name_sc + \" | tee build/lint/pylint.log\"\n ],\n \"verbosity\": 2\n }", "def commands_lint():\n lint()", "def codeqa():\n\n try:\n sh('flake8 h5_validator')\n except BuildFailure:\n pep8_fail = True\n else:\n pep8_fail = False\n\n try:\n sh(\"pydocstyle h5_validator\")\n except BuildFailure:\n docstring_fail = True\n else:\n docstring_fail = False\n\n if pep8_fail or docstring_fail:\n raise BuildFailure('Code Quality checks failed')", "def pylint(config, python_file):\n try:\n cmd = [config.pylint_exe]\n\n if config.pylint_params:\n cmd += config.pylint_params.split()\n if '--rcfile' not in config.pylint_params:\n cmd.append('--rcfile={}'.format(config.pylintrc))\n else:\n cmd.append('--rcfile={}'.format(config.pylintrc))\n\n cmd += ['--reports=y', '--persistent=n', python_file]\n res = command.execute(cmd)\n LOGGER.debug(cmd)\n return res.stdout\n except OSError:\n print \"\\nAn error occurred. Is pylint installed?\"\n sys.exit(1)", "def test():\n # subprocess.check_output() has been introduced in Python 2.7\n if sys.version_info < (2, 7):\n print(\"Python version too old, skipping test.\")\n return 2\n\n try:\n subprocess.check_output(['shellcheck', '--version'])\n except OSError as exc:\n if exc.errno == errno.ENOENT:\n print(\"shellcheck not found, aborting test.\")\n return 2\n raise\n except subprocess.CalledProcessError:\n print(\"shellcheck on an simple stream exited with error\")\n return 1\n\n retval = 0\n basedir = os.path.join(os.path.dirname(__file__), os.path.pardir)\n for dirpath, _, files in os.walk(basedir):\n # Ignore files in .git directory\n if '/.git' in dirpath:\n continue\n # Ignore collected hardware data files\n if '/specific/collected_hwdata' in dirpath:\n continue\n\n for filename in sorted(files):\n # Skip .toprc, which is not UTF-8\n if filename == 'toprc' and dirpath.endswith('dotfiles'):\n continue\n\n filepath = os.path.join(dirpath, filename)\n\n # If there is no extension and the file is known not to be a\n # shell script, the first line is used to find shell scripts\n if '.' not in filename:\n is_shellconfig = (\n dirpath.endswith('/dotfiles/shell') or\n '/dotfiles/bash' in filepath or\n '/dotfiles/profile' in filepath or\n '/dotfiles/zsh' in filepath)\n if not is_shellconfig:\n with open(filepath, 'r') as cur_file:\n firstline = cur_file.readline().rstrip()\n if 'sh' not in firstline:\n continue\n elif not filename.lower().endswith('.sh'):\n continue\n\n exclwarns = get_excluded_warnings(filepath[len(basedir):])\n exitcode = subprocess.call(\n ['shellcheck', '-e', ','.join(exclwarns), filepath])\n if exitcode != 0:\n retval = 1\n\n if not custom_lint_rules(filepath):\n retval = 1\n return retval", "def task_lint():\n return {\"actions\": [[\"prospector\"]], \"verbosity\": 1}", "def lint(\n command,\n):\n print(\n \"\"\"\nRunning flakeheaven, a Python code linter\n===================================\n\"\"\"\n )\n command.run(\"flakeheaven lint\", echo=True, pty=POSIX)", "def lint_command(argv) -> CommandResult:\n app = FlakeHellApplication(program=NAME, version=VERSION)\n try:\n app.run(argv)\n app.exit()\n except SystemExit as exc:\n return int(exc.code), ''\n raise RuntimeError('unreachable')", "def run_quality():\r\n\r\n # Directory to put the diff reports in.\r\n # This makes the folder if it doesn't already exist.\r\n dquality_dir = get_or_make_dir(os.path.join(Env.REPORT_DIR, \"diff_quality\"))\r\n\r\n # Generage diff-quality html report for pep8, and print to console\r\n # If pep8 reports exist, use those\r\n # Otherwise, `diff-quality` will call pep8 itself\r\n\r\n pep8_files = []\r\n for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):\r\n for f in files:\r\n if f == \"pep8.report\":\r\n pep8_files.append(os.path.join(subdir, f))\r\n\r\n pep8_reports = u' '.join(pep8_files)\r\n\r\n sh(\r\n \"diff-quality --violations=pep8 --html-report {dquality_dir}/\"\r\n \"diff_quality_pep8.html {pep8_reports}\".format(\r\n dquality_dir=dquality_dir, pep8_reports=pep8_reports)\r\n )\r\n\r\n sh(\r\n \"diff-quality --violations=pep8 {pep8_reports}\".format(\r\n pep8_reports=pep8_reports)\r\n )\r\n\r\n # Generage diff-quality html report for pylint, and print to console\r\n # If pylint reports exist, use those\r\n # Otherwise, `diff-quality` will call pylint itself\r\n\r\n pylint_files = []\r\n for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):\r\n for f in files:\r\n if f == \"pylint.report\":\r\n pylint_files.append(os.path.join(subdir, f))\r\n\r\n pylint_reports = u' '.join(pylint_files)\r\n\r\n pythonpath_prefix = (\r\n \"PYTHONPATH=$PYTHONPATH:lms:lms/djangoapps:lms/lib:cms:cms/djangoapps:cms/lib:\"\r\n \"common:common/djangoapps:common/lib\"\r\n )\r\n\r\n sh(\r\n \"{pythonpath_prefix} diff-quality --violations=pylint --html-report \"\r\n \"{dquality_dir}/diff_quality_pylint.html {pylint_reports}\".format(\r\n pythonpath_prefix=pythonpath_prefix,\r\n dquality_dir=dquality_dir,\r\n pylint_reports=pylint_reports\r\n )\r\n )\r\n\r\n sh(\r\n \"{pythonpath_prefix} diff-quality --violations=pylint {pylint_reports}\".format(\r\n pythonpath_prefix=pythonpath_prefix,\r\n pylint_reports=pylint_reports\r\n )\r\n )", "def run(self):\n check_paths = PACKAGES + [\n 'setup.py',\n 'tests',\n 'util',\n ]\n ignore = [\n 'doc/',\n ]\n\n # try to install missing dependencies and import prospector\n try:\n from prospector.run import main\n except ImportError:\n # try to install and then import\n self.distribution.fetch_build_eggs(['prospector[with_pyroma]'])\n from prospector.run import main\n\n self.install_deps_temp()\n\n # run linter\n\n # change working directory to package root\n package_root = os.path.abspath(os.path.dirname(__file__))\n os.chdir(package_root)\n\n # write command line\n files = discover_python_files(check_paths, ignore)\n sys.argv = ['prospector']\n sys.argv.extend(files)\n\n # run prospector\n errno = main()\n\n sys.exit(errno)", "def lint(ctx):\r\n print('Running linting...')\r\n ctx.run('pylint metrics')", "def lint(session):\n session.install(\"-r\", \"requirements-test.txt\")\n session.install(\"-r\", \"requirements.txt\")\n session.install(\"flake8-import-order\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests\",\n \"google\",\n \"tests\",\n )\n session.run(\"mypy\", \"google\", \"tests\")\n session.run(\"python\", \"setup.py\", \"sdist\")\n session.run(\"twine\", \"check\", \"dist/*\")", "def pylint(context):\n exec_cmd = 'find . -name \"*.py\" | xargs pylint'\n run_cmd(context, exec_cmd)", "def test_pep8_conformance_unitests(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n\n self.run_check(path)", "def test_pylint_score_main_script(self):\n my_dir = pathlib.Path(__file__).resolve().parent\n root_dir = my_dir.parent.parent.parent\n pylintrc = root_dir / \".pylintrc\"\n script = root_dir / \"backend\" / \"quality_report.py\"\n self.assert_pylint_score(\"{0} --rcfile {1}\".format(script, pylintrc), 10.0)", "def _run(self, config):\n \n files = self.transaction.get_files(\n config.check_files, config.ignore_files\n )\n # Exit when no files has to be checked.\n if not files:\n self.logger.debug(\"PyLint check skipped. No files for check.\")\n return self.success()\n \n # Defining pylint home directory.\n os.environ['PYLINTHOME'] = config.pylint_home\n self.logger.debug(\"PyLint Home is used at '%s'.\", config.pylint_home)\n \n # Determine which pylintrc file is used for the validation.\n if config.pylintrc:\n self.logger.debug(\"Pylintrc is used at '%s'.\", config.pylintrc)\n os.environ['PYLINTRC'] = config.pylintrc\n else:\n self.logger.debug(\"Default PyLintRC is used.\")\n \n # Only added or updated files will be checked.\n files = [\n self.transaction.get_file(name) \n for name, attr in files.iteritems() \n if attr in [\"A\", \"U\", \"UU\"]\n ]\n \n if not files:\n self.logger.debug(\"No files to validate. PyLint check skipped.\")\n return self.success()\n \n output = StringIO.StringIO()\n reporter = TextReporter(output)\n \n # Mock to prevent the sys.exit called by pylint.lint.Run.__init__\n lint.sys.exit = lambda _: 0\n \n self.logger.debug(\"PyLint is running...\")\n lint.Run([\"--reports=n\"] + files, reporter=reporter)\n \n output = output.getvalue()\n self.logger.debug(\"PyLint output:\\n %s\", output)\n if output:\n return self.error(output)\n else:\n return self.success()", "def run_pylint(options):\r\n errors = getattr(options, 'errors', False)\r\n systems = getattr(options, 'system', 'lms,cms,common').split(',')\r\n\r\n for system in systems:\r\n # Directory to put the pylint report in.\r\n # This makes the folder if it doesn't already exist.\r\n report_dir = get_or_make_dir(os.path.join(Env.REPORT_DIR, system))\r\n\r\n flags = '-E' if errors else ''\r\n\r\n apps = [system]\r\n\r\n for directory in ['djangoapps', 'lib']:\r\n dirs = os.listdir(os.path.join(system, directory))\r\n apps.extend([d for d in dirs if os.path.isdir(os.path.join(system, directory, d))])\r\n\r\n apps_list = ' '.join(apps)\r\n\r\n pythonpath_prefix = (\r\n \"PYTHONPATH={system}:{system}/djangoapps:{system}/\"\r\n \"lib:common/djangoapps:common/lib\".format(\r\n system=system\r\n )\r\n )\r\n\r\n sh(\r\n \"{pythonpath_prefix} pylint {flags} -f parseable {apps} | \"\r\n \"tee {report_dir}/pylint.report\".format(\r\n pythonpath_prefix=pythonpath_prefix,\r\n flags=flags,\r\n apps=apps_list,\r\n report_dir=report_dir\r\n )\r\n )", "def run_pylint(self, fname):\n if '.py' not in fname:\n return False\n if not op.isfile(fname):\n self.logging.info('------------------------------------------------------------------')\n self.logging.info(\"FILE {} DOES NOT EXIST.\".format(fname))\n self.logging.info('------------------------------------------------------------------')\n self.logging.info('\\n')\n return False\n self.logging.info(\"{}\\n\".format(fname))\n self.fname = fname\n if self.rcfile and op.isfile(self.rcfile):\n command_arg = [fname, '--rcfile', self.rcfile, '--score', 'no']\n else:\n command_arg = [fname, '--score', 'no']\n try:\n with redirect_stdout(PrintLogger(name=\"pylint\", log_level=\"INFO\")):\n if int(pylint.__version__[0]) < 2:\n self.results = Run(command_arg, exit=False) # pylint: disable=unexpected-keyword-arg\n else:\n # Use the default one\n self.results = Run(command_arg, do_exit=False) # pylint: disable=unexpected-keyword-arg\n return True\n except Exception as error: # pylint: disable=broad-except\n # We want to crash if ANYTHING goes wrong\n self.logging.warning('------------------------------------------------------------------')\n self.logging.warning(\"PYLINT CRASHED WHILE HANDLING {}\".format(fname))\n self.logging.warning(\"{}: {}\".format(type(error), error.args))\n self.logging.warning('------------------------------------------------------------------')\n self.logging.info('\\n')\n self.failed_files.append(fname)\n return False", "def test_lint_myself(capsys) -> None:\n source_dir = Path(\"python_dev_tools\")\n if not source_dir.exists():\n # run from inside tests directory\n source_dir = Path(\"../python_dev_tools\")\n linter = flake8.get_style_guide()\n\n linter.check_files([str(path) for path in source_dir.rglob(\"*.py\")])\n\n captured = capsys.readouterr().out.replace(\"../\", \"\").replace(\"\\\\\", \"/\")\n warnings = \"\"\"\\\n python_dev_tools/whatalinter.py:15:42: NQA102 \"# noqa: WPS433\" has no matching violations\n python_dev_tools/whatalinter.py:17:35: NQA102 \"# noqa: WPS433, WPS440\" has no matching violations\n python_dev_tools/whatalinter.py:20:38: NQA102 \"# noqa: WPS440\" has no matching violations\n \"\"\"\n assert captured == dedent(warnings)", "def lint(cline):\n print(\"Linting with pylint.\")\n cline.run(r\"git ls-files '*.py' | xargs python3 -m pylint -j 0\")\n print(\"Type checking with mypy.\")\n cline.run(r\"git ls-files '*.py' | xargs python3 -m mypy\")", "def main(ctx, qa_dir, no_editor, report_dir, vcs, debug, main_branch):\n __main_imp__(ctx, qa_dir, no_editor, report_dir, vcs, debug, main_branch)" ]
[ "0.65316886", "0.64339733", "0.62938845", "0.6269195", "0.6211118", "0.61833465", "0.61646247", "0.61259955", "0.6116404", "0.6100978", "0.6065386", "0.60457647", "0.60411966", "0.6017067", "0.5966548", "0.5959962", "0.5957931", "0.5918821", "0.5904913", "0.5893862", "0.58845365", "0.5879583", "0.5877811", "0.58313674", "0.58257663", "0.58053905", "0.5782933", "0.57777244", "0.57437044", "0.5715116" ]
0.7032081
0
installs and configures a fresh DIRAC UI (VO specific)
def install_ui(): # pick which VO I want to test, default gridpp print "Which VO do you want to test (default: gridpp) ?" user_VO = raw_input("Your choices are: gridpp, lz, lsst, solidexperiment.org, skatelescope.eu: ") \ or "gridpp" if user_VO not in ["gridpp", "lz", "lsst", "solidexperiment.org", "skatelescope.eu"]: print "Testing for %s VO is not supported." % user_VO sys.exit(0) # I'll need the proxy password later proxypasswd = getpass.getpass("Please enter your proxy password: ") if proxypasswd == "": print "Password seems to be empty, that won't work." sys.exit(0) else: print "Read password of length %d" % (len(proxypasswd)) # make a new directory using date and time # I refuse to use seconds here .... dirac_test_dir = datetime.datetime.now().strftime("%Y_%b_%d_%H%M") dirac_test_dir = dirac_test_dir + '_' + str(user_VO) el = platform.linux_distribution()[1].split('.')[0] if (int(el) not in [6, 7]): print "This does not look lile EL6 or EL7, here be dragons (HBD ;-)" dirac_test_dir = dirac_test_dir+'_HBD' else: dirac_test_dir = dirac_test_dir+'_EL'+ str(el) # this should only happen if program was quit in anger # the main purpose of this function is to annoy Simon :-) if os.path.exists(dirac_test_dir): print 'Trying to make dir %s, but it exists already.' % dirac_test_dir print 'Did you quit in anger previously ?' print 'Please be patient ....' time.sleep(30) print 'Your call is important to us. Please hold ....' time.sleep(31) dirac_test_dir = datetime.datetime.now().strftime("%Y_%b_%d_%H%M") print '\nCreating test dir: %s' % dirac_test_dir os.mkdir(dirac_test_dir) os.chdir(dirac_test_dir) # retrieve and install executable wget_cmd = ["wget", "-np", "-O", "dirac-install", "https://raw.githubusercontent.com/DIRACGrid/DIRAC/integration/Core/scripts/dirac-install.py"] simple_run(wget_cmd) os.chmod("dirac-install", 0744) pwd = os.getcwd() install_command_string = pwd + "/dirac-install" # needs full path # install UI # inst_cmd = [install_command_string, "-r", UI_VERSION, # "-i", UI_PYTHON_VERSION, "-g", LCG_BINDINGS] if UI_VERSION[0:2] == "v6": inst_cmd = "%s -r %s -i %s -g %s | tee install.log" %(install_command_string, UI_VERSION, UI_PYTHON_VERSION, LCG_BINDINGS) else: inst_cmd = "%s -r %s | tee install.log" %(install_command_string, UI_VERSION) simple_run(inst_cmd, shell=True) # to capture output # log ui and related versions in a convenient place uiverfile = open('ui_versions.txt', 'w') uiverfile.write('UI_VERSION: '+UI_VERSION+'\n') uiverfile.write('UI_PYTHON_VERSION: '+UI_PYTHON_VERSION+'\n') if UI_VERSION[0:2] == "v6": uiverfile.write('LCG_BINDINGS: '+LCG_BINDINGS+'\n') ext_version = extract_externals_version("install.log") uiverfile = open('ui_versions.txt', 'a') uiverfile.write(ext_version) else: diracos_version = extract_diracos_version("install.log") uiverfile = open('ui_versions.txt', 'a') uiverfile.write('DIRACOS: '+diracos_version+'\n') uiverfile.close() # from Simon # We have to "source" the bashrc now. # This is a bit of a hassle to do as we're in python not bash. # There are some pickle tricks to do this, # but python changes on source bashrc. source_cmd = [ "/bin/bash", "-c", "source bashrc && env -0" ] proc = Popen(source_cmd, stdout=PIPE) vars_out, _ = proc.communicate() if proc.returncode: print "ERROR: Failed to source bashrc. Check output above." sys.exit(0) # Get the vars from the output for var in vars_out.split("\0"): var_name, _, var_value = var.partition("=") os.environ[var_name] = var_value # Make a generic proxy to be able to download the config files proxy_child = pexpect.spawn('dirac-proxy-init -r -x') proxy_child.expect('password:') proxy_child.sendline(proxypasswd) print(proxy_child.before) # configure UI # sorry pylint, no shortcuts configure_ui_cmd = ["dirac-configure", "-F", "-S", "GridPP", "-C", "dips://dirac01.grid.hep.ph.ic.ac.uk:9135/Configuration/Server", "-I"] simple_run(configure_ui_cmd) # now all should be well, so make a %s VO proxy make_proxy_string = 'dirac-proxy-init -r -g %s_user -M' % user_VO # print make_proxy_string proxy_child = pexpect.spawn(make_proxy_string) # proxy_child = pexpect.spawn('dirac-proxy-init -g gridpp_user -M') proxy_child.expect('password:') proxy_child.sendline(proxypasswd) # try to give a hint of what is going on print proxy_child.read() # check if it's a voms-proxy and if it's not, try again. Once. proxycheck = complex_run(["dirac-proxy-info"]) match = re.search(r'username\s+:\s+(.+)', proxycheck) if not match: print 'Cannot determine dirac user name. Something has gone terribly wrong.' sys.exit(0) if proxycheck.find("VOMS fqan") < 0: print 'This proxy does not seem to contain a VOMS fqan, try again. Once' time.sleep(3) proxy_child = pexpect.spawn(make_proxy_string) proxy_child.expect('password:') proxy_child.sendline(proxypasswd) proxycheck2 = complex_run(["dirac-proxy-info"]) if proxycheck2.find("VOMS fqan") < 0: print 'This proxy still does not seem to contain a VOMS fqan. Giving up.' sys.exit(0) # send a status message - I should probably check for errors along the way print "UI installed and configured." print "Current proxy is: " simple_run(["dirac-proxy-info"]) # needed elsewhere return user_VO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")", "def prepare_UI(self):", "def initGui(self):\n from p4_view import Gui\n self.updateStatus(\"Launching GUI...\")\n self.gui = Gui(self, self.lmap)\n self.gui.setStart(self.cfg[\"START\"])\n self.gui.setGoal(self.cfg[\"GOAL\"])\n self.gui.setPossGoals(self.cfg[\"POSS_GOALS\"])\n #GHD\n self.gui.setMapName(self.cfg[\"MAP_FILE\"])\n self.updateStatus(\"OK\")\n self.gui.mainloop()", "def configure(options):\n configuring = Configuring()\n configuring.run()\n config = configuring.config\n\n # Install the right dependencies\n print_to_terminal(\"Installing extra dependencies (webkit, gth and simplejson)\")\n to_install_packages = ['python-webkit', 'python-gtk2', 'python-simplejson']\n subprocess.call(['sudo', 'apt-get', 'install']+to_install_packages)\n\n # Create fancy desktop entry\n for file in ['viki.desktop', 'viki_env.sh', 'viki_launch.sh']:\n process_template(file, config)\n os.chmod(file, os.stat(file).st_mode | stat.S_IEXEC)\n app_dir = os.path.expanduser('~/.local/share/applications')\n command = \"desktop-file-install --dir={} {}/viki.desktop\".format(app_dir, os.getcwd())\n subprocess.call(command, shell=True)\n return None", "def main():\n\tif mc.window( 'SetupOccUI', q = 1, ex = 1 ):\n\t\tmc.deleteUI( 'SetupOccUI' )\n\tPyForm=SetupOccUI()\n\tPyForm.show()", "def cli(add, rm, show, complete, dotfiles, configs, packages, fonts, old_path, new_path, remote, reinstall_packages,\n reinstall_configs, delete_config, destroy_backup, v):\n\tbackup_config_path = get_config_path()\n\n\t# No interface going to be displayed\n\tif any([v, delete_config, destroy_backup, show, rm]) or None not in add:\n\t\tif v:\n\t\t\tprint_version_info()\n\t\telif delete_config:\n\t\t\tos.remove(backup_config_path)\n\t\t\tprint_bright_red(\"Removed config file...\")\n\t\telif destroy_backup:\n\t\t\tbackup_home_path = get_config()[\"backup_path\"]\n\t\t\tdestroy_backup_dir(backup_home_path)\n\t\telif None not in add:\n\t\t\tadd_path_to_config(add[0], add[1])\n\t\telif rm:\n\t\t\trm_path_from_config(rm)\n\t\telif show:\n\t\t\tshow_config()\n\t\tsys.exit()\n\n\t# Start CLI\n\tsplash_screen()\n\tcreate_config_file_if_needed()\n\tbackup_config = get_config()\n\n\t# User entered a new path, so update the config\n\tif new_path:\n\t\tabs_path = os.path.abspath(new_path)\n\t\tprint(Fore.BLUE + Style.NORMAL + \"\\nUpdating shallow-backup path to -> \" + Style.BRIGHT + \"{}\".format(\n\t\t\tabs_path) + Style.RESET_ALL)\n\t\tbackup_config[\"backup_path\"] = abs_path\n\t\twrite_config(backup_config)\n\n\t# User didn't enter any CLI args so prompt for path update before showing menu\n\telif not (old_path or complete or dotfiles or packages or fonts):\n\t\tprompt_for_path_update(backup_config)\n\n\t# Create backup directory and do git setup\n\tbackup_home_path = get_config()[\"backup_path\"]\n\tmkdir_warn_overwrite(backup_home_path)\n\trepo, new_git_repo_created = safe_git_init(backup_home_path)\n\n\t# Create default gitignore if we just ran git init\n\tif new_git_repo_created:\n\t\tsafe_create_gitignore(backup_home_path)\n\t\t# Prompt user for remote URL\n\t\tif not remote:\n\t\t\tprompt_for_git_url(repo)\n\n\t# Set remote URL from CLI arg\n\tif remote:\n\t\tgit_set_remote(repo, remote)\n\n\tdotfiles_path = os.path.join(backup_home_path, \"dotfiles\")\n\tconfigs_path = os.path.join(backup_home_path, \"configs\")\n\tpackages_path = os.path.join(backup_home_path, \"packages\")\n\tfonts_path = os.path.join(backup_home_path, \"fonts\")\n\n\t# Command line options\n\tif any([complete, dotfiles, configs, packages, fonts, reinstall_packages, reinstall_configs]):\n\t\tif reinstall_packages:\n\t\t\treinstall_packages_from_lists(packages_path)\n\t\telif reinstall_configs:\n\t\t\treinstall_config_files(configs_path)\n\t\telif complete:\n\t\t\tbackup_all(dotfiles_path, packages_path, fonts_path, configs_path)\n\t\t\tgit_add_all_commit_push(repo, \"everything\")\n\t\telif dotfiles:\n\t\t\tbackup_dotfiles(dotfiles_path)\n\t\t\tgit_add_all_commit_push(repo, \"dotfiles\")\n\t\telif configs:\n\t\t\tbackup_configs(configs_path)\n\t\t\tgit_add_all_commit_push(repo, \"configs\")\n\t\telif packages:\n\t\t\tbackup_packages(packages_path)\n\t\t\tgit_add_all_commit_push(repo, \"packages\")\n\t\telif fonts:\n\t\t\tbackup_fonts(fonts_path)\n\t\t\tgit_add_all_commit_push(repo, \"fonts\")\n\t# No CL options, prompt for selection\n\telse:\n\t\tselection = actions_menu_prompt().lower().strip()\n\t\tif selection == \"back up everything\":\n\t\t\tbackup_all(dotfiles_path, packages_path, fonts_path, configs_path)\n\t\t\tgit_add_all_commit_push(repo, \"everything\")\n\t\telif selection == \"back up dotfiles\":\n\t\t\tbackup_dotfiles(dotfiles_path)\n\t\t\tgit_add_all_commit_push(repo, \"dotfiles\")\n\t\telif selection == \"back up configs\":\n\t\t\tbackup_configs(configs_path)\n\t\t\tgit_add_all_commit_push(repo, \"configs\")\n\t\telif selection == \"back up packages\":\n\t\t\tbackup_packages(packages_path)\n\t\t\tgit_add_all_commit_push(repo, \"packages\")\n\t\telif selection == \"back up fonts\":\n\t\t\tbackup_fonts(fonts_path)\n\t\t\tgit_add_all_commit_push(repo, \"fonts\")\n\t\telif selection == \"reinstall packages\":\n\t\t\treinstall_packages_from_lists(packages_path)\n\t\telif selection == \"reinstall configs\":\n\t\t\treinstall_config_files(configs_path)\n\t\telif selection == \"show config\":\n\t\t\tshow_config()\n\t\telif selection == \"destroy backup\":\n\t\t\tif prompt_yes_no(\"Erase backup directory: {}?\".format(backup_home_path), Fore.RED):\n\t\t\t\tdestroy_backup_dir(backup_home_path)\n\t\t\telse:\n\t\t\t\tprint_bright_red(\"Exiting to prevent accidental deletion of backup directory.\")\n\n\tsys.exit()", "def launchUI():\n app = QtWidgets.QApplication(sys.argv)\n ui = ClientFileManager()\n ui.resize(1200, 650)\n ui.show()\n sys.exit(app.exec_())", "def setup(self):\n self.profile = config.get(\"profile\")\n ui.command(\":cache_show.x__text\", config.get(\"pacman_cache\"))\n\n ulm = (config.get(\"uselocalmirror\") != \"\")\n ui.command(\":mirrorlist.opton\", config.get(\"usemirrorlist\") != \"\")\n ui.command(\":mirrorlist.enable\", not ulm)\n ui.command(\":use_local_mirror.opton\", ulm)\n ui.command(\":local_mirror.x__text\", config.get(\"localmirror\"))\n return True", "def _install(self):\n\n pass", "def usb_setup():\n print(\"Warning: using deprecated usb_setup routine!\")\n largest = largest_partition()\n medium = medium_partition()\n smallest = smallest_partition()\n\n print(\"Starting USB installation\")\n print(\"Using {} as archive storage\".format(largest))\n print(\"Using {} as volatile storage\".format(medium))\n print(\"Using {} as important storage\".format(smallest))\n\n lncm_usb = \"/usr/local/sbin/lncm-usb\"\n\n cli_invocation = [\n lncm_usb,\n largest,\n medium,\n smallest,\n get_uuid(largest),\n get_uuid(medium),\n get_uuid(smallest),\n str(largest_part_size()),\n ]\n\n call(cli_invocation)", "def setupMonti():\n #Update /etc/hosts with mongo-server and management-engine nodes\n sudo(\"apt-get install zookeeper\")\n sudo(\"apt-get install zookeeperd\")\n sudo(\"pip2 install chariot-runtime\")\n #update configuration file located in /etc/chariot/chariot.conf\n run (\"cd /etc/init.d && sudo update-rc.d chariot-nmw defaults 99\")\n sudo(\"reboot\")", "async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True", "def install():\n deploy()\n configure()", "def _initUI(self) -> None:\n self._createActions()\n self._addActionsToMoveButtons()\n self._createToolBar()\n self._createStatusBar()\n self._createMainContextMenu()", "def add_mode(conffile, debug = False):\n install_control.initlogger(debug)\n lau = launcher.load(conffile)\n lau['danacenter'].install_center = False\n lau['danacenter'].install_manage = False\n\n #ctl = install_control.InstallControl(lau)\n #for x in ctl.planlist:\n # print x.name, x.stats()\n # for seq in x.seqs:\n # for step in seq.steps.values():\n # if hasattr(step.function, 'cmd'):\n # print step.function.cmd\n\n #return\n http_server.setctl(ctl)\n ctl.start()\n http_server.start(port)\n ctl.wait()\n\n http_server.wait()\n ctl.stop()\n http_server.stop()", "def setup(self):\n self.ui.setup_window()", "def base_install():\n # scwrl\n scwrl = {}\n print('{BOLD}{HEADER}Generating configuration files for ISAMBARD.{END_C}\\n'\n 'All required input can use tab completion for paths.\\n'\n '{BOLD}Setting up SCWRL 4.0 (Recommended){END_C}'.format(**text_colours))\n scwrl_path = get_user_path('Please provide a path to your SCWRL executable', required=False)\n scwrl['path'] = str(scwrl_path)\n pack_mode = get_user_option(\n 'Please choose your packing mode (flexible is significantly slower but is more accurate).',\n ['flexible', 'rigid'])\n if pack_mode == 'rigid':\n scwrl['rigid_rotamer_model'] = True\n else:\n scwrl['rigid_rotamer_model'] = False\n settings['scwrl'] = scwrl\n\n # dssp\n print('{BOLD}Setting up DSSP (Recommended){END_C}'.format(**text_colours))\n dssp = {}\n dssp_path = get_user_path('Please provide a path to your DSSP executable.', required=False)\n dssp['path'] = str(dssp_path)\n settings['dssp'] = dssp\n\n # buff\n print('{BOLD}Setting up BUFF (Required){END_C}'.format(**text_colours))\n buff = {}\n ffs = []\n ff_dir = isambard_path / 'buff' / 'force_fields'\n for ff_file in os.listdir(str(ff_dir)):\n ff = pathlib.Path(ff_file)\n ffs.append(ff.stem)\n force_field_choice = get_user_option(\n 'Please choose the default BUFF force field, this can be modified during runtime.',\n ffs)\n buff['default_force_field'] = force_field_choice\n settings['buff'] = buff\n return", "def main():\r\n app = appdirs.AppDirs('Python Installer', 'Unicorn')\r\n try:\r\n os.makedirs(app.user_log_dir)\r\n except:\r\n pass\r\n\r\n pyversion = platform.python_version()\r\n pyarch = platform.architecture()[0]\r\n\r\n # log installed python version\r\n with open(os.path.join(app.user_log_dir, 'install.log'), 'a', encoding='utf-8') as fp:\r\n fp.write('Python {} ({}) installed.'.format(pyversion, pyarch))\r\n\r\n # log installed modules\r\n modules = freeze.freeze()\r\n module_str = ''\r\n for module in modules:\r\n module_str += '{}\\n'.format(module)\r\n \r\n with open(os.path.join(app.user_log_dir, 'modules-py{}-{}.log'.format(pyversion, pyarch)), 'w', encoding='utf-8') as fp:\r\n fp.write(module_str)\r\n\r\n app = QtGui.QApplication(sys.argv)\r\n\r\n hello = QtGui.QLabel(\"Python {} ({}) installed\".format(pyversion, pyarch))\r\n hello.show()\r\n hello.resize(250, 80)\r\n sys.exit(app.exec_())", "def initUI(self):\n\n lbl_names = ['Название проекта', 'Версия', 'Директория', 'Описание', 'Автор', 'Почта', 'Дополнительные зависимости', 'Название ноды']\n param_list = ['motor_driver', '0.0.0', '/home/mitya/catkin_ws/src/', 'The motor_driver package', 'D. Potapov',\n '[email protected]', 'nav_msgs, geometry_msgs, tf, ', 'motor_driver_node']\n labels = []\n for name in lbl_names:\n labels.append(QLabel(name))\n for i, ph in zip(range(len(labels)), param_list):\n ed_line = QLineEdit()\n if i == 1:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([0-9\\.])*[0-9]$\")))\n elif i == 5:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([a-z0-9_-]+\\.)*[a-z0-9_-]+@[a-z0-9_-]+(\\.[a-z0-9_-]+)*\\.[a-z]{2,6}$\")))\n ed_line.setPlaceholderText(ph)\n if i != 0:\n ed_line.textEdited.connect(self.change_data)\n else:\n ed_line.textEdited.connect(self.change_pkg_name)\n self.full_ed_lines.append(ed_line)\n grid = QGridLayout()\n grid.setSpacing(5)\n for i in range(1, len(labels) + 1):\n for j in range(0, 2):\n if j == 0:\n grid.addWidget(labels[i - 1], i, j)\n else:\n grid.addWidget(self.full_ed_lines[i - 1], i, j)\n ch_dirButton = QPushButton(self)\n ch_dirButton.setIcon(QIcon('./icons/open_folder.png'))\n ch_dirButton.clicked.connect(self.ch_dirDialog)\n grid.addWidget(ch_dirButton, 3, 3)\n genButton = QPushButton(\"Сгенерировать\")\n genButton.clicked.connect(self.generate)\n grid.addWidget(genButton, len(labels) + 2, 1)\n self.setLayout(grid)\n self.setMinimumSize(700, 400)\n self.show()", "def main():\n\n # Method to call when we think something might be hung.\n #\n settings.timeoutCallback = timeout\n\n # Various signal handlers we want to listen for.\n #\n signal.signal(signal.SIGHUP, shutdownOnSignal)\n signal.signal(signal.SIGINT, shutdownOnSignal)\n signal.signal(signal.SIGTERM, shutdownOnSignal)\n signal.signal(signal.SIGQUIT, shutdownOnSignal)\n signal.signal(signal.SIGSEGV, abortOnSignal)\n\n # See if the desktop is running. If it is, the import of gtk will\n # succeed. If it isn't, the import will fail.\n #\n desktopRunning = False\n try:\n if gtk.gdk.display_get_default():\n desktopRunning = True\n except:\n pass\n\n # Parse the command line options.\n #\n # Run the preferences setup if the user has specified\n # \"--setup\" or \"--text-setup\" on the command line. If the\n # desktop is not running, we will fallback to the console-based\n # method as appropriate.\n #\n bypassSetup = False\n setupRequested = False\n showGUI = False\n\n # We hack a little here because the shell script to start orca can\n # conflate all of command line arguments into one string, which is\n # not what we want. We detect this by seeing if the length of the\n # argument list is 1.\n #\n arglist = sys.argv[1:]\n if len(arglist) == 1:\n arglist = arglist[0].split()\n\n try:\n # ? is for help\n # e is for enabling a feature\n # d is for disabling a feature\n # h is for help\n # u is for alternate user preferences location\n # s is for setup\n # n is for no setup\n # t is for text setup\n # v is for version\n #\n opts, args = getopt.getopt(\n arglist,\n \"?stnvld:e:u:\",\n [\"help\",\n \"user-prefs-dir=\",\n \"enable=\",\n \"disable=\",\n \"setup\",\n \"gui-setup\",\n \"text-setup\",\n \"no-setup\",\n \"list-apps\",\n \"version\"])\n for opt, val in opts:\n if opt in (\"-u\", \"--user-prefs-dir\"):\n userPrefsDir = val.strip()\n try:\n os.chdir(userPrefsDir)\n settings.userPrefsDir = userPrefsDir\n except:\n debug.printException(debug.LEVEL_FINEST)\n\n if opt in (\"-e\", \"--enable\"):\n feature = val.strip()\n\n if feature == \"speech\":\n _commandLineSettings[\"enableSpeech\"] = True\n elif feature == \"braille\":\n _commandLineSettings[\"enableBraille\"] = True\n elif feature == \"braille-monitor\":\n _commandLineSettings[\"enableBrailleMonitor\"] = True\n elif feature == \"magnifier\":\n _commandLineSettings[\"enableMagnifier\"] = True\n elif feature == \"main-window\":\n _commandLineSettings[\"showMainWindow\"] = True\n else:\n usage()\n die(2)\n\n if opt in (\"-d\", \"--disable\"):\n feature = val.strip()\n if feature == \"speech\":\n _commandLineSettings[\"enableSpeech\"] = False\n elif feature == \"braille\":\n _commandLineSettings[\"enableBraille\"] = False\n elif feature == \"braille-monitor\":\n _commandLineSettings[\"enableBrailleMonitor\"] = False\n elif feature == \"magnifier\":\n _commandLineSettings[\"enableMagnifier\"] = False\n elif feature == \"main-window\":\n _commandLineSettings[\"showMainWindow\"] = False\n else:\n usage()\n die(2)\n\n if opt in (\"-s\", \"--gui-setup\", \"--setup\"):\n setupRequested = True\n showGUI = desktopRunning\n if opt in (\"-t\", \"--text-setup\"):\n setupRequested = True\n showGUI = False\n if opt in (\"-n\", \"--no-setup\"):\n bypassSetup = True\n if opt in (\"-?\", \"--help\"):\n usage()\n die(0)\n if opt in (\"-v\", \"--version\"):\n print \"Orca %s\" % platform.version\n die(0)\n if opt in (\"-l\", \"--list-apps\"):\n apps = filter(lambda x: x is not None,\n pyatspi.Registry.getDesktop(0))\n for app in apps:\n print app.name\n die(0)\n\n except:\n debug.printException(debug.LEVEL_OFF)\n usage()\n die(2)\n\n # Do not run Orca if accessibility has not been enabled.\n # We do allow, however, one to force Orca to run via the\n # \"-n\" switch. The main reason is so that things such\n # as accessible login can work -- in those cases, the gconf\n # setting is typically not set since the gdm user does not\n # have a home.\n #\n a11yEnabled = settings.isAccessibilityEnabled()\n if (not bypassSetup) and (not a11yEnabled):\n _showPreferencesConsole()\n die()\n\n if setupRequested and (not bypassSetup) and (not showGUI):\n _showPreferencesConsole()\n\n if not desktopRunning:\n print \"Cannot start Orca because it cannot connect\"\n print \"to the Desktop. Please make sure the DISPLAY\"\n print \"environment variable has been set.\"\n return 1\n\n userprefs = settings.userPrefsDir\n sys.path.insert(0, userprefs)\n sys.path.insert(0, '') # current directory\n\n init(pyatspi.Registry)\n\n try:\n message = _(\"Welcome to Orca.\")\n speech.speak(message)\n braille.displayMessage(message)\n except:\n debug.printException(debug.LEVEL_SEVERE)\n\n # Check to see if the user wants the configuration GUI. It's\n # done here so that the user's existing preferences can be used\n # to set the initial GUI state. We'll also force the set to\n # be run if the preferences file doesn't exist, unless the\n # user has bypassed any setup via the --no-setup switch.\n #\n if setupRequested and (not bypassSetup) and showGUI:\n showPreferencesGUI()\n elif (not _userSettings) and (not bypassSetup):\n if desktopRunning:\n showPreferencesGUI()\n else:\n _showPreferencesConsole()\n\n start(pyatspi.Registry) # waits until we stop the registry\n return 0", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def run_installer():\n global DEBUG_ON\n global NM_AVAILABLE\n username = ''\n password = ''\n pfx_file = ''\n parser = argparse.ArgumentParser(description='eduroam linux installer.')\n parser.add_argument('--debug', '-d', action='store_true', dest='debug',\n default=False, help='set debug flag')\n args = parser.parse_args()\n if args.debug:\n DEBUG_ON = True\n print(\"Runnng debug mode\")\n\n debug(\"Calling InstallerData\")\n\n installer_data = InstallerData(username=username, password=password, pfx_file=pfx_file)\n\n # test dbus connection\n if NM_AVAILABLE:\n config_tool = CatNMConfigTool()\n if config_tool.connect_to_nm() is None:\n NM_AVAILABLE = False\n\n installer_data.get_user_cred()\n\n # get user credentials from file\n\n # installer_data.get_user_cred_from_file()\n installer_data.save_ca()\n if NM_AVAILABLE:\n config_tool.add_connections(installer_data)\n else:\n wpa_config = WpaConf()\n wpa_config.create_wpa_conf(Config.ssids, installer_data)\n debug(\"Installation finished.\")", "def initGui(self):\n\n icon_path = ':/plugins/new_raptor/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Add New Raptor nest'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True", "def setupDeviceGui(self):\n\n dropDowns = list(self.drivers[driver]['uiDriver'] for driver in self.drivers)\n for dropDown in dropDowns:\n dropDown.clear()\n dropDown.setView(PyQt5.QtWidgets.QListView())\n dropDown.addItem('No device selected')\n\n # adding special items\n self.drivers['dome']['uiDriver'].addItem('INDI')\n self.drivers['imaging']['uiDriver'].addItem('INDI')\n self.drivers['sensorWeather']['uiDriver'].addItem('INDI')\n self.drivers['directWeather']['uiDriver'].addItem('Built-In')\n self.drivers['onlineWeather']['uiDriver'].addItem('Built-In')\n self.drivers['cover']['uiDriver'].addItem('INDI')\n self.drivers['skymeter']['uiDriver'].addItem('INDI')\n self.drivers['telescope']['uiDriver'].addItem('INDI')\n self.drivers['power']['uiDriver'].addItem('INDI')\n self.drivers['relay']['uiDriver'].addItem('Built-In')\n for app in self.app.astrometry.solverAvailable:\n self.drivers['astrometry']['uiDriver'].addItem(app)\n self.drivers['remote']['uiDriver'].addItem('Built-In')\n self.drivers['measure']['uiDriver'].addItem('Built-In')\n\n return True", "def initGui(self):\n\n icon_path = ':/plugins/trialPurpose/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'trial'),\n callback=self.run,\n parent=self.iface.mainWindow())", "def setUp(self):\n self.portal = self.layer['portal']\n self.installer = get_installer(self.portal)", "def setup_theme():\n os.system('sudo apt install arc-theme')\n\n output = \"{padding}{mark} Installing theme...\"\n print(output.format(padding=LEFT_PADDING, mark=BALLOT_MARK))", "def setup(self, rc):\n pass", "def updateSettingsUI(self):\n\n pass" ]
[ "0.603138", "0.58798337", "0.5784808", "0.5766571", "0.57437", "0.5734021", "0.5714146", "0.56864846", "0.5641124", "0.5625011", "0.56127167", "0.56086886", "0.5601726", "0.55902636", "0.55759335", "0.55585897", "0.55564934", "0.55510265", "0.5542947", "0.5528047", "0.5500627", "0.5500627", "0.5482993", "0.5470047", "0.5465812", "0.54577786", "0.5454809", "0.5450601", "0.54226065", "0.5415545" ]
0.7038603
0
Called before anything else, i.e. just after installer controller creation. Return value is ignored.
def pre_installation(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepareController(self):\n pass", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def test_initialization(self, create_controller: Controller) -> None:\n pass", "def do_before(self):\r\n pass", "async def pre_action_init(self) -> None:", "def pre_start_hook(self):\n\n LOG.debug(_('XManager pre_start_hook...'))\n\n pass", "def startUp(self):\n pass", "def pre_start(self) -> None:\n pass", "def post_setup(self, context):\n pass", "def before(self) -> None:\n pass", "def Setup(self):\n return True", "def pre_deploy(self) -> Any:\n raise NotImplementedError", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\")\n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=self.templateName)", "def __init__(self):\n self.setup_called = False", "def on_before_execution(self):\n pass", "def setUp(self):\n\n installHandler()", "def setUp(self):\n\n installHandler()", "def pre_setup(self) -> None:\n if self.__setup_done:\n self.base_logger.error(\"pre_setup was erroneously called twice\")\n raise SetupAlreadyDoneError()", "def pre_build(self):\n pass", "def _afterInit(self):\n pass", "def at_pre_cmd(self):\n pass", "def setUp(self):\n self.portal = self.layer['portal']\n self.installer = get_installer(self.portal)", "def preRun_(self):\n super().preRun_()\n self.client = None", "def PreExecute(self):\n return True", "def _setup(self) -> None:\n\t\treturn", "def pre_install(self, installable_pkgs):\n pass", "def on_startup(self) -> None:\n ...", "def setup(self):\n\n if self.has_setup():\n self.logger.info(\"%s has a pre-flight setup routine. Running now.\" % self.plugin_dict_name)\n self._module.setup(self, self.versions)" ]
[ "0.69888854", "0.6657627", "0.6657627", "0.65865713", "0.64082533", "0.6388744", "0.6384215", "0.6347374", "0.63209623", "0.6310592", "0.6240077", "0.62371147", "0.62121433", "0.6202042", "0.619813", "0.61480397", "0.61330086", "0.6025088", "0.6025088", "0.60231227", "0.6007485", "0.60029405", "0.5998575", "0.5993744", "0.5984419", "0.59679854", "0.5945434", "0.5935053", "0.59340274", "0.5895869" ]
0.71761113
0
Called before any files have downloaded.
def pre_download(self, remote_files): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_and_prepare(self):\n self._download_and_prepare()", "def download_files(self):", "def pre_download(self):\n while not os.path.exists(self.file_path):\n time.sleep(1)\n\n if self.downloader.file_size != 0:\n # Waits %1 of the total download\n percent = self.downloader.file_size * 0.01\n\n while self.downloader.downloaded_size < percent:\n time.sleep(1)\n else:\n # Waits 2MB, just an arbitrary amount\n while self.downloader.downloaded_size < 2 * 1024 * 1024:\n time.sleep(0.5)", "def post_download(self, remote_files):\n pass", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def init_downloader(self) -> None:\n raise NotImplementedError", "def monitor_downloads(self):\n return self.request_manager.get_downloads().addCallback(self.on_downloads)", "def download_finish(self, cloud_file):", "def download(self):\n pass", "def download(self):\n pass", "def prepare_data(self) -> None:\n if (self.root).is_dir():\n logger.info(\"Found the dataset.\")\n else:\n download_and_extract(self.root, DOWNLOAD_INFO)", "def onContentDownloadStart(self, fetcher, contentLength): #$NON-NLS-1$\r", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download_and_preprocess(self):\n print('Preparing steering angle database.')\n print('Downloading...')\n self.download()\n print('Preprocessing...')\n self.preprocess()", "def start_download(self) -> NoReturn:\n if self.threaded:\n self.threaded_download()\n else:\n self.regular_download()", "def test_download(self):\n pass", "def run_downloader(self):\n \"\"\"calls to the file downloader\"\"\"\n try:\n html = self.get_page(self.url)\n soup = self.get_soup(html)\n if soup is not None: # If we have soup -\n self.get_links(soup)\n self.get_files()\n else:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected',\n e)\n return False\n except Exception as e:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected', e)\n\n return False\n return True", "def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path", "def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()", "def maybe_download_and_extract(self, force=False):\r\n if force:\r\n if os.path.exists(self.get_working_dir()):\r\n logger.info(\"Removing downloaded data...\")\r\n shutil.rmtree(self.get_working_dir(), ignore_errors=True)\r\n while os.path.exists(self.get_working_dir()):\r\n pass", "def download_done(self):\n if self.dl_goal_nbytes != 0:\n self.dl_output_done()\n\n if self.dl_cur_npkgs != self.dl_goal_npkgs:\n logger.error(\"\\nExpected %s pkgs, received %s pkgs \"\n \"instead.\" % (self.dl_goal_npkgs,\n self.dl_cur_npkgs))\n if self.dl_cur_nfiles != self.dl_goal_nfiles:\n logger.error(\"\\nExpected %s files, received %s files \"\n \"instead.\" % (self.dl_goal_nfiles,\n self.dl_cur_nfiles))\n if self.dl_cur_nbytes != self.dl_goal_nbytes:\n logger.error(\"\\nExpected %s bytes, received %s bytes \"\n \"instead.\" % (self.dl_goal_nbytes,\n self.dl_cur_nbytes))\n\n assert self.dl_cur_npkgs == self.dl_goal_npkgs, \\\n \"Expected %s packages but got %s\" % \\\n (self.dl_goal_npkgs, self.dl_cur_npkgs)\n assert self.dl_cur_nfiles == self.dl_goal_nfiles, \\\n \"Expected %s files but got %s\" % \\\n (self.dl_goal_nfiles, self.dl_cur_nfiles)\n assert self.dl_cur_nbytes == self.dl_goal_nbytes, \\\n \"Expected %s bytes but got %s\" % \\\n (self.dl_goal_nbytes, self.dl_cur_nbytes)", "def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)", "def run(self):\n download(self.attempt)", "def reset(self):\n self.reset_cache_dir()\n self.reset_download_dir()", "def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')", "def startDownloadQueue(self):\n\n self.runEventCallbacks('downloadQueueStarted') \n while len(self.downloadQueue):\n if self.downloadQueue[0]['dst'] != None:\n self.getFile(self.downloadQueue[0]['src'], \n self.downloadQueue[0]['dst'])\n self.runEventCallbacks('downloadQueueFinished') \n self.clearDownloadQueue()", "def acknowledge_downloaded_files():\n requests_to_delete = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='finished'\")\n if len(requests_to_delete) > 0:\n\n queries = []\n for request_to_delete in requests_to_delete:\n\n DownloaderSPAN512.delete_stagged_file(request_to_delete)\n\n dlm_cout.outs(\"Report download (%s) succeeded.\" % request_to_delete['guid'])\n queries.append(\"UPDATE requests \" \\\n \"SET status='cleaned_up', \" \\\n \"details='download complete', \" \\\n \"updated_at='%s' \" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), request_to_delete['id']))\n\n jobtracker.query(queries)\n else: pass", "def on_start(self):\n # self.login()\n\n # self.createfiles()", "def download_file(self, parsed_event, input_dir_path):", "def need_download(self, episode):\n self.downloads_worker.need_download.emit(episode, self.tick)" ]
[ "0.72078025", "0.6953876", "0.6938813", "0.6892764", "0.6278536", "0.60983944", "0.60776293", "0.59928954", "0.59894645", "0.59894645", "0.59813267", "0.5980656", "0.5966963", "0.59472805", "0.5930741", "0.59225214", "0.58397645", "0.58169204", "0.57641727", "0.57575905", "0.57533944", "0.5743556", "0.57328826", "0.57242733", "0.57137054", "0.5708049", "0.5665129", "0.56620556", "0.5653823", "0.56538016" ]
0.80088097
0
Called after every files have been downloaded.
def post_download(self, remote_files): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_finish(self, cloud_file):", "def pre_download(self, remote_files):\n pass", "def download_files(self):", "def _finalize(self):\n if self.url and self.url.startswith('file://'):\n self.parse_external_files(self.url[7:])\n Media._finalize(self)", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def cleanup():\n download_dir = settings.DOWNLOAD_BASE_DIR\n\n for base, dirs, files in os.walk(download_dir):\n for dir in dirs:\n shutil.rmtree(download_dir + dir)", "def after_all(self) -> None:", "def finalize(self):\n # 027 Not needed in the simple FilesAdaptor. \n pass", "def acknowledge_downloaded_files():\n requests_to_delete = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='finished'\")\n if len(requests_to_delete) > 0:\n\n queries = []\n for request_to_delete in requests_to_delete:\n\n DownloaderSPAN512.delete_stagged_file(request_to_delete)\n\n dlm_cout.outs(\"Report download (%s) succeeded.\" % request_to_delete['guid'])\n queries.append(\"UPDATE requests \" \\\n \"SET status='cleaned_up', \" \\\n \"details='download complete', \" \\\n \"updated_at='%s' \" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), request_to_delete['id']))\n\n jobtracker.query(queries)\n else: pass", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def download_done(self):\n if self.dl_goal_nbytes != 0:\n self.dl_output_done()\n\n if self.dl_cur_npkgs != self.dl_goal_npkgs:\n logger.error(\"\\nExpected %s pkgs, received %s pkgs \"\n \"instead.\" % (self.dl_goal_npkgs,\n self.dl_cur_npkgs))\n if self.dl_cur_nfiles != self.dl_goal_nfiles:\n logger.error(\"\\nExpected %s files, received %s files \"\n \"instead.\" % (self.dl_goal_nfiles,\n self.dl_cur_nfiles))\n if self.dl_cur_nbytes != self.dl_goal_nbytes:\n logger.error(\"\\nExpected %s bytes, received %s bytes \"\n \"instead.\" % (self.dl_goal_nbytes,\n self.dl_cur_nbytes))\n\n assert self.dl_cur_npkgs == self.dl_goal_npkgs, \\\n \"Expected %s packages but got %s\" % \\\n (self.dl_goal_npkgs, self.dl_cur_npkgs)\n assert self.dl_cur_nfiles == self.dl_goal_nfiles, \\\n \"Expected %s files but got %s\" % \\\n (self.dl_goal_nfiles, self.dl_cur_nfiles)\n assert self.dl_cur_nbytes == self.dl_goal_nbytes, \\\n \"Expected %s bytes but got %s\" % \\\n (self.dl_goal_nbytes, self.dl_cur_nbytes)", "def purge_downloaded_files():\n for fpath in DOWNLOADED_FILEPATHS:\n if os.path.exists(fpath):\n os.remove(fpath)", "def download_and_prepare(self):\n self._download_and_prepare()", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def perform_final_actions(self):\n for finalizer_function in self._finalizer_functions:\n finalizer_function()", "def onContentDownloadComplete(self, fetcher, connectionResp): #$NON-NLS-1$\r", "def post_dump(self) -> \"None\":\n # Close all the file immediately\n for file in self.files:\n self.close_file(file)\n\n # Queue exiting the application\n asyncio.get_event_loop().call_soon(self.pre_exit)", "def cleanup(self):\n\t\tself.loader.cleanup()\n\t\tself.Loaded = False", "def tearDown(self):\n\n for fname in self.fnames:\n FileSystem.unlink(fname)", "def cleanup(self):\n super(Test200SmartSanityDownload004, self).cleanup()", "def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def do_after_dump(self, dump_items):\n # note that it's possible for links in \"latest\" to point to\n # files from different runs, in which case the checksum files\n # will have accurate checksums for the run for which it was\n # produced, but not the other files. FIXME\n for htype in Checksummer.HASHTYPES:\n dfname = DumpFilename(\n self.wiki, None, self.checksummer.get_checksum_filename_basename(htype))\n self.symlinks.save_symlink(dfname)\n self.symlinks.cleanup_symlinks()\n for item in dump_items:\n self.runinfo.save_dump_runinfo(RunInfo.report_dump_runinfo(dump_items))\n if item.to_run():\n dump_names = item.list_dumpnames()\n if type(dump_names).__name__ != 'list':\n dump_names = [dump_names]\n if item._parts_enabled:\n # if there is a specific part we are doing, we want to only clear out\n # old files for that part, because new files for the other\n # parts may not have been generated yet.\n partnum = item._partnum_todo\n else:\n partnum = None\n\n checkpoint = None\n if item._checkpoints_enabled:\n if item.checkpoint_file is not None:\n # if there's a specific checkpoint file we are\n # rerunning, we would only clear out old copies\n # of that very file. meh. how likely is it that we\n # have one? these files are time based and the start/end pageids\n # are going to fluctuate. whatever\n checkpoint = item.checkpoint_file.checkpoint\n\n for dump in dump_names:\n self.symlinks.remove_symlinks_from_old_runs(\n self.wiki.date, dump, partnum, checkpoint, onlyparts=item.onlyparts)\n\n self.feeds.cleanup_feeds()", "def export_file_complete_sig_handler(self):\n # Increment the index\n self.export_file_index += 1\n\n # Move the state\n self.scanFilesProgressBar.setValue(self.scanFilesProgressBar.value() + 1)\n\n # Check if we have exported all the files\n if self.export_file_index >= len(self.analzye_results):\n # Show a dialog box that everything is exported and complete\n QMessageBox.question(self.parent, \"Export Complete\", \"All files have been exported.\", QMessageBox.Ok)\n else:\n # Export the next file\n self.export_file(self.analzye_results[self.export_file_index])", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def finalize(self):\n if self._writer:\n self.flush()\n if self._archive_file:\n self._archive_file.close()", "def after(self):\n pass", "def after(self):\n pass", "def onDone(self):\n pass" ]
[ "0.6957989", "0.6865277", "0.68242747", "0.64642113", "0.64255536", "0.64033055", "0.62918645", "0.62449217", "0.62424684", "0.62129873", "0.62109494", "0.62088096", "0.6166017", "0.616356", "0.6145047", "0.6114954", "0.6033107", "0.6015093", "0.59706825", "0.5961392", "0.59431547", "0.5941272", "0.59236896", "0.5923082", "0.59099346", "0.5909879", "0.5904875", "0.58967006", "0.58967006", "0.5875506" ]
0.7216123
0
Called before the installation of any pkg.
def pre_install(self, installable_pkgs): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_install_pkg(self, installable_pkg):\n pass", "def pre_installation(self):\n pass", "def post_install(self, installable_pkgs):\n pass", "def post_install_pkg(self, installable_pkg):\n pass", "def _install(self):\n\n pass", "def do_post_install(self, context):\n pass", "def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()", "def before_packages(manager):\n if manager not in b.packages:\n return\n if 'apt' == manager:\n s.add('export APT_LISTBUGS_FRONTEND=\"none\"')\n s.add('export APT_LISTCHANGES_FRONTEND=\"none\"')\n s.add('export DEBIAN_FRONTEND=\"noninteractive\"')\n s.add('apt-get -q update')\n elif 'yum' == manager:\n s.add('yum makecache')", "def post_installation(self, exc_value):\n pass", "def pre_install(self, dest_dir):\n pass", "def pipInstall(self):\n\n print \"Does Nothing\"", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)", "def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()", "def test_setup_package(self):\n pluggable_package.setup(self._test_package)\n self._test_setup(self._test_package)", "def setup(self):\n\n if self.has_setup():\n self.logger.info(\"%s has a pre-flight setup routine. Running now.\" % self.plugin_dict_name)\n self._module.setup(self, self.versions)", "def _provision_package(self):", "def init():\n print(\"Installed everything under {0} \"\n \"virtual environment\".format(package_name()))", "def prereposetup_hook(conduit):\n return init_hook(conduit)", "def preprocess_raw_pkgs(self, raw_installable_pkgs):\n for installable_pkg in raw_installable_pkgs:\n installable_pkg.pkg_info['explicit_install'] = True\n return raw_installable_pkgs", "def test_install(self):\n pass", "def _setup(self) -> None:\n\t\treturn", "def install_dependencies(self):\n return False", "def do(self):\r\n parameters = ParametersParserStr(self.args_parameters).get()\r\n self.core.install(self.product_names, parameters, with_dependencies=True)", "def install(self):\n raise NotImplementedError", "def setUp(self):\n trytond.tests.test_tryton.install_module('nereid_webshop')" ]
[ "0.87274486", "0.8490351", "0.78138494", "0.76809055", "0.7401921", "0.7185641", "0.7014699", "0.7007921", "0.68065137", "0.6684582", "0.6648925", "0.6547911", "0.6547911", "0.6547911", "0.6547911", "0.6547911", "0.6534111", "0.65294236", "0.6491212", "0.6484541", "0.64816105", "0.64802855", "0.6427031", "0.6424274", "0.6408111", "0.6362568", "0.6359924", "0.63491464", "0.63451165", "0.63174367" ]
0.86834514
1
Called before the installation of the given installable pkg.
def pre_install_pkg(self, installable_pkg): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_install(self, installable_pkgs):\n pass", "def post_install_pkg(self, installable_pkg):\n pass", "def post_install(self, installable_pkgs):\n pass", "def pre_installation(self):\n pass", "def setPkgRequired(self, *args):\n return _libsbml.SBMLDocument_setPkgRequired(self, *args)", "def do_post_install(self, context):\n pass", "def _install(self):\n\n pass", "def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()", "def install_package(self, package):\n raise NotImplementedError(\"install_package not implemented!\")", "def preprocess_raw_pkgs(self, raw_installable_pkgs):\n for installable_pkg in raw_installable_pkgs:\n installable_pkg.pkg_info['explicit_install'] = True\n return raw_installable_pkgs", "def test_setup_package(self):\n pluggable_package.setup(self._test_package)\n self._test_setup(self._test_package)", "def setPackageRequired(self, *args):\n return _libsbml.SBMLDocument_setPackageRequired(self, *args)", "def pre_install(self, dest_dir):\n pass", "def install_pkg(pkg):\n# Checkout packages if we have them in ROOT\n src_path = check_module_path(pkg)\n# Analyzing packages: generation of package/module CMakeFile.txt,\n analizer_package(pkg, src_path)\n# Building DB\n db_manifest = generation_db_modules()\n# Print DB\n print_db_manifest(db_manifest)\n# Generating DB instance for dag\n dag_manifest = dag_pre_generation(db_manifest)\n# WIP: Still not working\n generation_lock_file_from_dag(dag_manifest)\n# Parcing packages in db_manifest\n parced_db_manifest = parser_db_manifest(db_manifest)\n# Resolving dependecies without DAG\n resolver_dependencies(pkg, parced_db_manifest)\n# Adopting name of package according generated DB\n# We are rewriting name of package!\n pkg = naming_checker(pkg)\n# Before buiding we need to check if pkg is really in the Db\n check_pkg_db(pkg, parced_db_manifest)\n# Check if package is installed\n if check_install_pkg_db(pkg, parced_db_manifest):\n return True\n else:\n# Trigger trigger_dependencies_pkg_db\n #trigger_dependency_pkg_db(pkg, parced_db_manifest)\n# Clean build directory\n clean_build(pkg, parced_db_manifest)\n# Reruning CMake\n rerun_configuration(pkg)\n# Buiding packages\n build_package(pkg, parced_db_manifest)\n# Preparing packages\n prepare_package(pkg)\n# Installing packages\n deploy_val = deploy_pkg(pkg)\n####################\n try:\n db_manifest[pkg][\"installed\"] = True\n except:\n pass\n return deploy_val, True", "def notify_add_package(self, pkg):\n ver_key = (pkg.category, pkg.package)\n s = set(self.versions.get(ver_key, ()))\n s.add(pkg.fullver)\n if pkg.category not in self.categories:\n self.categories.force_add(pkg.category)\n self.packages.force_regen(pkg.category)\n self.versions.force_regen(ver_key, tuple(s))", "def set_package(self, pkg): \n self.pkg = pkg", "def before_packages(manager):\n if manager not in b.packages:\n return\n if 'apt' == manager:\n s.add('export APT_LISTBUGS_FRONTEND=\"none\"')\n s.add('export APT_LISTCHANGES_FRONTEND=\"none\"')\n s.add('export DEBIAN_FRONTEND=\"noninteractive\"')\n s.add('apt-get -q update')\n elif 'yum' == manager:\n s.add('yum makecache')", "def enablePackageInternal(self, *args):\n return _libsbml.Event_enablePackageInternal(self, *args)", "def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()", "def pipInstall(self):\n\n print \"Does Nothing\"", "def enablePackageInternal(self, *args):\n return _libsbml.DefaultTerm_enablePackageInternal(self, *args)", "def addUnknownPackageRequired(self, *args):\n return _libsbml.SBMLDocument_addUnknownPackageRequired(self, *args)", "def enablePackageInternal(self, *args):\n return _libsbml.QualModelPlugin_enablePackageInternal(self, *args)", "def test_install_packages():\n\n\tassert packaging.install_packages(pkgs) == None", "def test_default_packages(host, pkg):\n assert host.package(pkg).is_installed", "def enablePackageInternal(self, *args):\n return _libsbml.SBasePlugin_enablePackageInternal(self, *args)", "def analizer_package(pkg, src_dir_root):\n print(\"[root-get] DEBUG: Preparing environment for package\")\n ecanalyze = 0\n try:\n ecanalyze = os.system(PWD_PATH + \"/analyzer/preparing-environment-for-pkg \" + pkg + \" \" + src_dir_root)\n except:\n pass\n if ecanalyze != 0:\n print(\"[root-get] Failed to configure package\")\n return False", "def _loadManifest(self, pkg):\r\n if pkg in self._packages:\r\n return\r\n\r\n sys.path = self._generatePythonPath(pkg) + sys.path", "def enablePackageInternal(self, *args):\n return _libsbml.Input_enablePackageInternal(self, *args)", "def enablePackageInternal(self, *args):\n return _libsbml.SBase_enablePackageInternal(self, *args)" ]
[ "0.803951", "0.79661417", "0.72544426", "0.7061348", "0.6615395", "0.645456", "0.63259906", "0.62912256", "0.6285924", "0.62740517", "0.625301", "0.62484384", "0.6227197", "0.6225595", "0.6215587", "0.6126311", "0.6083534", "0.60764074", "0.5963906", "0.59445024", "0.5924112", "0.5922857", "0.5900175", "0.5892806", "0.5887208", "0.58641094", "0.5857424", "0.5857051", "0.5845306", "0.5839644" ]
0.89542097
0
Called after the successful installation of the given installable pkg.
def post_install_pkg(self, installable_pkg): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_install(self, installable_pkgs):\n pass", "def pre_install_pkg(self, installable_pkg):\n pass", "def do_post_install(self, context):\n pass", "def notify_add_package(self, pkg):\n ver_key = (pkg.category, pkg.package)\n s = set(self.versions.get(ver_key, ()))\n s.add(pkg.fullver)\n if pkg.category not in self.categories:\n self.categories.force_add(pkg.category)\n self.packages.force_regen(pkg.category)\n self.versions.force_regen(ver_key, tuple(s))", "def install_pkg(pkg):\n# Checkout packages if we have them in ROOT\n src_path = check_module_path(pkg)\n# Analyzing packages: generation of package/module CMakeFile.txt,\n analizer_package(pkg, src_path)\n# Building DB\n db_manifest = generation_db_modules()\n# Print DB\n print_db_manifest(db_manifest)\n# Generating DB instance for dag\n dag_manifest = dag_pre_generation(db_manifest)\n# WIP: Still not working\n generation_lock_file_from_dag(dag_manifest)\n# Parcing packages in db_manifest\n parced_db_manifest = parser_db_manifest(db_manifest)\n# Resolving dependecies without DAG\n resolver_dependencies(pkg, parced_db_manifest)\n# Adopting name of package according generated DB\n# We are rewriting name of package!\n pkg = naming_checker(pkg)\n# Before buiding we need to check if pkg is really in the Db\n check_pkg_db(pkg, parced_db_manifest)\n# Check if package is installed\n if check_install_pkg_db(pkg, parced_db_manifest):\n return True\n else:\n# Trigger trigger_dependencies_pkg_db\n #trigger_dependency_pkg_db(pkg, parced_db_manifest)\n# Clean build directory\n clean_build(pkg, parced_db_manifest)\n# Reruning CMake\n rerun_configuration(pkg)\n# Buiding packages\n build_package(pkg, parced_db_manifest)\n# Preparing packages\n prepare_package(pkg)\n# Installing packages\n deploy_val = deploy_pkg(pkg)\n####################\n try:\n db_manifest[pkg][\"installed\"] = True\n except:\n pass\n return deploy_val, True", "def post_install(self, dest_dir):\n raise NotImplementedError(\"post_install is not implemented\")", "def set_package(self, pkg): \n self.pkg = pkg", "def post_installation(self, exc_value):\n pass", "def pre_install(self, installable_pkgs):\n pass", "def test_setup_package(self):\n pluggable_package.setup(self._test_package)\n self._test_setup(self._test_package)", "def _add_package(self, pkg):\n for dep in pkg.dependency:\n for key in self.registry.keys():\n if key[0] == dep:\n break\n else:\n raise RuntimeError(\n 'Package %s has unresolved dependency issues: %s' %\n (pkg.name, dep))\n self.package_dependency.add_edge(pkg.name, dep)\n self.package_dependency.add_vertex(pkg.name)\n for key, task in pkg.tasks.iteritems():\n self.registry[key, pkg.version] = pkg\n self.registry[key, None] = pkg\n self.registry[pkg.name, pkg.version] = pkg\n\n # mark this package as the latest one\n self.registry[pkg.name, None] = pkg", "def install_dep_pkg(pkg, db_manifest):\n# Checkout packages if we have them in ROOT\n src_path = check_module_path(pkg)\n# Analyzing packages: generation of package/module CMakeFile.txt,\n analizer_package(pkg, src_path)\n# Building DB\n db_manifest = generation_db_modules()\n# Print DB\n print_db_manifest(db_manifest)\n# Generating DB instance for dag\n dag_manifest = dag_pre_generation(db_manifest)\n# WIP: Still not working\n generation_lock_file_from_dag(dag_manifest)\n# Parcing packages in db_manifest\n #parced_db_manifest = parser_db_manifest(db_manifest)\n# Resolving dependecies without DAG\n resolver_dependencies(pkg, db_manifest)\n# Before buiding we need to check if pkg is really in the Db\n check_pkg_db(pkg, db_manifest)\n# Check if package is installed\n if check_install_pkg_db(pkg, db_manifest):\n return True\n# Trigger trigger_dependencies_pkg_db\n #trigger_dependency_pkg_db(pkg, db_manifest)\n# Buiding packages\n else:\n build_package(pkg, db_manifest)\n# Preparing packages\n prepare_package(pkg)\n# Installing packages\n deploy_pkg(pkg)\n####################\n db_manifest[pkg][\"installed\"] = True\n return True", "def notify_remove_package(self, pkg):\n ver_key = (pkg.category, pkg.package)\n l = [x for x in self.versions[ver_key] if x != pkg.fullver]\n if not l:\n # dead package\n wipe = list(self.packages[pkg.category]) == [pkg.package]\n self.packages.force_regen(pkg.category)\n if wipe:\n self.categories.force_regen(pkg.category)\n self.versions.force_regen(ver_key, tuple(l))", "def install_package(self, package):\n raise NotImplementedError(\"install_package not implemented!\")", "def test_after_install(self):\n self.run_test_suites(self.after_install_test_suite_list)", "def prepareUninstall():\n pass", "def _provision_package(self):", "def test_reinstall_packages():\n\tassert packaging.install_packages(pkgs) == None", "def find_pkg(self, pkg):\n pass", "def _run(self, pkg):\n if os.path.exists(pkg):\n err = '%s already exists in current working directory.' % pkg\n raise SystemExit(err)\n configp = util.config()\n try:\n user = configp.get('rhcephpkg', 'user')\n gitbaseurl = configp.get('rhcephpkg', 'gitbaseurl')\n except configparser.Error as err:\n raise SystemExit('Problem parsing .rhcephpkg.conf: %s',\n err.message)\n # If we were given an RPM pkg name, switch to the Debian one:\n if pkg.startswith('python-'):\n pkg = pkg[7:]\n # TODO: SafeConfigParser might make the \"user\" interpolation here\n # unnecessary? Need to test, particularly what it does to %(module).\n pkg_url = gitbaseurl % {'user': user, 'module': pkg}\n cmd = ['git', 'clone', pkg_url]\n subprocess.check_call(cmd)\n\n os.chdir(pkg)\n\n patches_url = find_patches_url(configp, user, pkg)\n if patches_url:\n cmd = ['git', 'remote', 'add', '-f', 'patches', patches_url]\n subprocess.check_call(cmd)\n\n util.setup_pristine_tar_branch()", "def PluginUninstall(self, packageName):\n pass", "def update_pkg_metadata(self, pkg, version=None, **kwargs):\n pass", "def handle(self, *args, **kwargs) -> None:\n logging.info(\"Populating the package table...\")\n Command.populate_package(package_count=50)\n logging.info(\"Done!\")", "def main(pkg_dir, years):\n pkgname = os.path.basename(pkg_dir)\n identifier = clean_name('archlinux_pkg_' + pkgname)\n metadata = {\n #'collection': ['test_collection', 'open_source_software'],\n #'collection': ['open_source_software'],\n 'collection': ['archlinuxarchive'],\n 'mediatype': 'software',\n 'publisher': 'Arch Linux',\n 'creator': 'Arch Linux',\n 'subject': ['archlinux', 'archlinux package'],\n }\n metadata['title'] = pkgname + \" package archive from Arch Linux\"\n metadata['subject'].append(pkgname)\n upload_pkg(identifier, pkgname, metadata, pkg_dir, years)", "def pipInstall(self):\n\n print \"Does Nothing\"", "def get_pkg_meta(self, pkg):\n pass", "def on_package_parse(self, ctx):\n return None", "def do_package_action(self, pkginfo, action):\n actions = PACKAGE_ACTIONS\n\n if action in actions:\n url = AUR_URL + '/packages/{}/{}'.format(pkginfo['Name'], action)\n with self.opener.open(url) as f:\n pass\n else:\n raise AurploaderError(\"unrecognized action ({})\".format(action)\n )", "def post_install(self, dest_dir):\n for obj in self.objects_used:\n obj.post_install(dest_dir)", "def _install(self):\n\n pass" ]
[ "0.7332126", "0.69622064", "0.6393076", "0.60962796", "0.5933556", "0.58876604", "0.58322257", "0.58148694", "0.5752", "0.554155", "0.5408007", "0.5398204", "0.53647107", "0.5351951", "0.5319673", "0.52867943", "0.5283457", "0.52370936", "0.5234688", "0.5212532", "0.52116287", "0.52030194", "0.51948553", "0.51845306", "0.5181443", "0.51607347", "0.5155645", "0.51313126", "0.51230407", "0.508601" ]
0.8569286
0
Called after the successful installation of all pkg.
def post_install(self, installable_pkgs): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_install_pkg(self, installable_pkg):\n pass", "def do_post_install(self, context):\n pass", "def post_installation(self, exc_value):\n pass", "def pre_install(self, installable_pkgs):\n pass", "def pre_install_pkg(self, installable_pkg):\n pass", "def _install(self):\n\n pass", "def test_after_install(self):\n self.run_test_suites(self.after_install_test_suite_list)", "def pre_installation(self):\n pass", "def _provision_package(self):", "async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True", "def post_install(self, dest_dir):\n raise NotImplementedError(\"post_install is not implemented\")", "def post_install(self, dest_dir):\n for obj in self.objects_used:\n obj.post_install(dest_dir)", "def pipInstall(self):\n\n print \"Does Nothing\"", "def prepareUninstall():\n pass", "def test_reinstall_packages():\n\tassert packaging.install_packages(pkgs) == None", "def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()", "def after_all(self) -> None:", "def handle(self, *args, **kwargs) -> None:\n logging.info(\"Populating the package table...\")\n Command.populate_package(package_count=50)\n logging.info(\"Done!\")", "def test_setup_package(self):\n pluggable_package.setup(self._test_package)\n self._test_setup(self._test_package)", "def do(self):\r\n parameters = ParametersParserStr(self.args_parameters).get()\r\n self.core.install(self.product_names, parameters, with_dependencies=True)", "def test_install(self):\n pass", "def postCheckDeps(self):\n if( self.mode == \"install\" ):\n\n # check for make\n if( not isinPath( \"make\" )):\n self.abort( \"make not found on your system!!\" )\n\n # check for tee\n if( not isinPath( \"tee\" )):\n self.abort( \"tee not found on your system!!\" )", "def setupFinished(self, *args, **kwargs): # real signature unknown\n pass", "def cleanupInstall(self):\n\n os.chdir( os.path.dirname(self.installPath) )\n tryunlink( self.download.tarball )", "def test_install_packages():\n\n\tassert packaging.install_packages(pkgs) == None", "def post_setup(self, context):\n pass", "def test_installed(self):\n # OSA script should have been installed in setUp function\n self.assertTrue(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))\n # Clean up install\n self.run_function(\"assistive.remove\", [OSA_SCRIPT])\n # Installed should now return False\n self.assertFalse(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))", "def confirm_installation(cls):\n # here, you should write any code needed to confirm that all the\n # dependencies required by your module are installed.\n # this class method will be executed when HADDOCK3 starts.\n\n # if you module does not import any run-time dependency, just leave\n # this method blank\n return", "def on_install(self, event):\n unit = self.model.unit\n\n # Install your software and its dependencies\n\n unit.status = ActiveStatus()", "def post_install_step(self):\n # handle staged install via Binary parent class\n super(EB_CPLEX, self).post_install_step()\n\n # determine bin dir\n os.chdir(self.installdir)\n binglob = 'cplex/bin/x86-64*'\n bins = glob.glob(binglob)\n\n if len(bins) == 1:\n self.bindir = bins[0]\n elif len(bins) > 1:\n raise EasyBuildError(\"More than one possible path for bin found: %s\", bins)\n else:\n raise EasyBuildError(\"No bins found using %s in %s\", binglob, self.installdir)" ]
[ "0.83991206", "0.7399972", "0.7169786", "0.69973654", "0.6994343", "0.6681931", "0.657458", "0.6570379", "0.65510577", "0.64301735", "0.6408434", "0.63056946", "0.6250174", "0.6222582", "0.61908776", "0.61649483", "0.6062687", "0.6025747", "0.6022049", "0.599792", "0.5966197", "0.5948428", "0.59092665", "0.588241", "0.587291", "0.58656806", "0.5860637", "0.57996583", "0.5791263", "0.5784014" ]
0.8408907
0
Called after anything else (will be called if pre_installation returned successfully) exc_value is None if no error, else the exception value
def post_installation(self, exc_value): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exc_handler(self, exc_type, exc, *args) -> None:\n self.exception = exc\n self.exit_code = 1", "def on_failure(self, exc: BaseException) -> None:", "def postcondition(self, result, exc_info, *args, **kwargs):\n pass", "def pre_setup(self) -> None:\n if self.__setup_done:\n self.base_logger.error(\"pre_setup was erroneously called twice\")\n raise SetupAlreadyDoneError()", "def pre_installation(self):\n pass", "def user_exception(self, frame, exc_tuple):\r\n frame.f_locals['__exc_tuple__'] = exc_tuple\r\n\r\n if not self._wait_for_mainpyfile:\r\n self.interaction(frame, exc_tuple)", "def on_exception(self):\n pass", "def handle_execution_exception(self, ex):\n if self.config.raise_errors:\n raise(ex)\n warning(str(ex))", "def do_post_install(self, context):\n pass", "def __raise_clean_exception(exc_type, exc_value, exc_traceback):\n if exc_type.__name__ not in dir(napalm.exceptions) and \\\n exc_type.__name__ not in __builtins__.keys():\n epilog = (\"NAPALM didn't catch this exception. Please, fill a bugfix on \"\n \"https://github.com/napalm-automation/napalm/issues\\n\"\n \"Don't forget to include this traceback.\")\n print(epilog)\n raise exc_type, exc_value, exc_traceback", "def process_exception(self, request, exc):\n return None", "def post_install(self, installable_pkgs):\n pass", "def test_install_subprocess_error_should_fail(self, *args):\n manifest = self.generate_mock_manifest(cfg={\n EXTCFG_SECTION.INSTALL: {\n EXTCFG_OPTION.EXEC_EXT_CMD: ['command'],\n }\n })\n ext_manager = PkgInstExtrasManager(manifest)\n with pytest.raises(exceptions.InstExtrasManagerError):\n ext_manager.handle_install_extras()", "async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True", "def handle_execution_exception(self, ex):\n raise(ex)", "def unexpected_error(self, exception):", "def failure_callback(self):\n error_filename = self.run_dir / \"eplusout.err\"\n if error_filename.exists():\n with open(error_filename, \"r\") as stderr:\n stderr_r = stderr.read()\n self.exception = EnergyPlusProcessError(\n cmd=self.cmd, stderr=stderr_r, idf=self.idf\n )\n self.cleanup_callback()", "def user_exception(self, frame, exc_info):\n pass", "def _check_return(self, name, ret_code):\n if ret_code == 0:\n pass\n else:\n raise RuntimeError('An error occured setting %s: %d' % (name, ret_code))", "def __exit__(self, exc_type, exc_value, exc_tb):\n\t\treturn exc_value is None", "def _RaiseFatal(cls, sub, subargs, errorcode, *args):\n ScriptForge.InvokeSimpleScript('ScriptForge.SF_Utils._EnterFunction', sub, subargs)\n cls.RaiseFatal(errorcode, *args)\n raise RuntimeError(\"The execution of the method '\" + sub.split('.')[-1] + \"' failed. Execution stops.\")", "def unexpectedException(self):", "def setup_function(self):\r\n raise AppModule.Unimplemented()", "def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:\n self.called_precmd += 1\n raise ValueError", "def rollback(self, exc):\n USER.info('%s: Rolling Back Failed Build', self.recipe.name)\n cascade = False\n if isinstance(exc, AssertionError):\n logging.error('Error during verify() of %s', self.recipe.name)\n cascade = True\n if cascade or isinstance(exc, PakitLinkError):\n if not cascade:\n logging.error('Error during linking of %s', self.recipe.name)\n walk_and_unlink(self.recipe.install_dir, self.recipe.link_dir)\n cascade = True\n if cascade or (not isinstance(exc, PakitLinkError) and\n not isinstance(exc, AssertionError)):\n if not cascade:\n logging.error('Error during build() of %s', self.recipe.name)\n try:\n Command('rm -rf ' + self.recipe.install_dir).wait()\n except PakitCmdError: # pragma: no cover\n pass", "def __exit__(self, exc_type, exc_value, traceback):\n return None", "def handle_err(self):\n pass", "def my_exception_hook(exctype, value, traceback):\n # Print the error and traceback\n print(exctype, value, traceback)\n # Call the normal Exception hook after\n sys._excepthook(exctype, value, traceback)\n sys.exit(1)", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def set_error(self, exc_info):\n self.exc_info = exc_info\n if exc_info is None:\n self.meta_classes = {}\n self.meta_functions = {}" ]
[ "0.62841517", "0.62787837", "0.6137501", "0.5973818", "0.5940887", "0.58668447", "0.58472353", "0.5836734", "0.5826562", "0.5807436", "0.5798474", "0.57931894", "0.57608753", "0.5755618", "0.5744285", "0.57340395", "0.57157147", "0.5701963", "0.5669499", "0.566829", "0.5648024", "0.5640793", "0.56406003", "0.56373495", "0.563721", "0.5605948", "0.56054884", "0.5558921", "0.555725", "0.5534615" ]
0.8339836
0
From the list of known to be upgraded pkgs, return a list of tuple (installed_pkg, [], [])) such that both list doesn't contains the pkg of the installed pkg. Also, if a new package to install is found in more than in one list, it will be discard in the later list.
def preprocess_upgrade_list(self, upgrade_list): return [(ed_pkg, able_pkg, [], []) for (ed_pkg, able_pkg) in upgrade_list]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_installed_packages() -> List['Package']:\n repo_packages_names = set(expac(\"-S\", ['n'], []))\n\n # packages the user wants to install from aur\n aur_names = packages_from_other_sources()[0]\n repo_packages_names -= aur_names\n\n installed_packages_names = set(expac(\"-Q\", ['n'], []))\n installed_repo_packages_names = installed_packages_names & repo_packages_names\n unclassified_installed_names = installed_packages_names - installed_repo_packages_names\n\n return_list = []\n\n # installed repo packages\n if installed_repo_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_repo_packages_names), PossibleTypes.REPO_PACKAGE)\n )\n\n # installed aur packages\n installed_aur_packages_names = set(\n [package.name for package in Package.get_packages_from_aur(list(unclassified_installed_names))]\n )\n\n # package names the user gave us must be in the aur\n for name in aur_names:\n if name not in installed_aur_packages_names:\n aurman_error(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n raise InvalidInput(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n\n if installed_aur_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_aur_packages_names), PossibleTypes.AUR_PACKAGE)\n )\n\n unclassified_installed_names -= installed_aur_packages_names\n\n # installed not repo not aur packages\n if unclassified_installed_names:\n return_list.extend(\n Package.get_packages_from_expac(\n \"-Q\", list(unclassified_installed_names),\n PossibleTypes.PACKAGE_NOT_REPO_NOT_AUR\n )\n )\n\n return return_list", "def compare_package_lists(manifest, installed):\n\n uninstalled = [x for x in manifest if x not in installed]\n\n # == comm -23\n also_installed = [x for x in installed if x not in manifest]\n\n # 'easiest' solution\n # print \"apt-get remove -y %s\" % (' '.join(uninstalled))\n # print \"apt-get install -y %s\" % (' '.join(also_installed))\n\n # >>> why isn't this good enough?\n # <<< why manually install dependencies that may change?\n # <<< better to select the minimal graph/set/covering\n # <<< though apt-get will just re-compute these dependencies again\n # <<< \"i swear i didn't manually install [...]\"\n\n # stack = collections.dequeue()\n def visit_graph(apt_cache, pkgname, depends, visited):\n try:\n pkg = apt_cache[pkgname]\n except KeyError as e:\n print(e) # TODO\n return\n\n for pkgset in pkg.installedDependencies:\n for pkg in pkgset:\n depends[pkg.name].append(pkgname)\n if pkgname not in visited:\n visited[pkgname] = True\n visit_graph(apt_cache, pkg.name, depends, visited)\n # stack.push( pkg['name'] )\n\n try:\n apt = import_apt()\n apt_cache = apt.Cache()\n\n depends = collections.defaultdict(list)\n visited = {}\n for pkgname in also_installed:\n visit_graph(apt_cache, pkgname, depends, visited)\n\n # TODO: more optimal covering\n minimal = [x for x in also_installed if x not in depends]\n finally:\n tmp_dir = getattr(apt, '_tmp_dirname')\n if tmp_dir and os.path.exists(tmp_dir):\n shutil.rmtree(apt._tmp_dirname)\n\n return PkgComparison(\n minimal,\n also_installed,\n uninstalled,\n manifest,\n installed)", "def get_not_installed_rpm_packages():\n def is_installed(elem):\n return elem in PMDK_TOOLS and elem in listdir('/usr/bin/') or\\\n elem == \"pmdk\" or elem + '.so' in listdir('/usr/lib64/')\n\n elements = get_libraries_names()\n not_installed_packages = []\n for elem in elements:\n if not is_installed(elem):\n not_installed_packages.append(elem)\n return not_installed_packages", "def getAllInstalledPackages(installedPkgPath):\n allPkgVers = []\n if os.path.exists(installedPkgPath):\n for pkg in os.listdir(installedPkgPath):\n pkgVersions = os.listdir(os.path.join(installedPkgPath, pkg))\n for pkgVersion in pkgVersions:\n pkgPath = os.path.join(installedPkgPath, pkg)\n if not fnmatch.fnmatch(pkgVersion, '*.inprogress'):\n allPkgVers.append(os.path.join(pkgPath, pkgVersion))\n return allPkgVers", "def _filter_pkgs(self, pkgs):\n pkgs = [pkg.strip() for pkg in pkgs]\n return [\n pkg for pkg in pkgs\n if pkg.startswith(self.base_pkg) and not pkg.startswith(os.path.join(self.base_pkg, \"vendor/\"))\n ]", "def missingTranslationPkgs(self, pkg, translation_pkg):\n\n # FIXME: this function is called too often and it's too slow\n # -> see ../TODO for ideas how to fix it\n missing = []\n # check if the pkg itself is available and installed\n if not self._cache.has_key(pkg):\n return missing\n if not self._cache[pkg].isInstalled:\n return missing\n\n # match every packages that looks similar to translation_pkg\n # \n for pkg in self._cache:\n if (pkg.name == translation_pkg or\n pkg.name.startswith(translation_pkg+\"-\")):\n if not pkg.isInstalled and pkg.candidateVersion != None:\n missing.append(pkg.name)\n return missing", "def sort_packages(self) -> None:\n self.recommended_packages = []\n self.required_packages = []\n for package in self.repository_packages:\n try:\n output = self.guest.execute(Command('rpm', '-q', package), silent=True)\n assert output.stdout\n self.debug(f\"Package '{output.stdout.strip()}' already installed.\")\n except tmt.utils.RunError:\n if self.skip_missing:\n self.recommended_packages.append(package)\n else:\n self.required_packages.append(package)", "def _remove_extra_packages(frozen_pkgs, ret, **kwargs):\n pkgs = __salt__[\"pkg.list_pkgs\"](**kwargs)\n extra_pkgs = set(pkgs) - set(frozen_pkgs)\n for pkg in extra_pkgs:\n try:\n __salt__[\"pkg.remove\"](name=pkg, **kwargs)\n ret[\"pkgs\"][\"remove\"].append(pkg)\n log.info(\"Removed extra package %s\", pkg)\n except Exception as e: # pylint: disable=broad-except\n msg = \"Error removing %s package: %s\"\n log.error(msg, pkg, e)\n ret[\"comment\"].append(msg % (pkg, e))", "def removed_pkgs():\n name_versions = defaultdict(set)\n fedoras = py2_pkgs()\n last_fedoras = defaultdict(set)\n new = {pkg.name for pkg in repoquery(all=True)}\n for version in fedoras:\n for name_evr in set(fedoras[version]):\n name, _, evr = name_evr.partition(' ')\n if name not in new:\n name_versions[name].add(evr)\n last_fedoras[version].add(name)\n max_versions = {name: max(versions, key=SortableEVR)\n for name, versions in name_versions.items()}\n return last_fedoras, max_versions", "def get_installed_jre_packages():\n # Convert to a set and back to a list again to uniqueify.\n return sorted(list(set(rpm_query_whatprovides('java', 'java7', 'jdk'))))", "def _filter_installed_packages(self, packages):\n filtered_packages = []\n for package in packages:\n name = package.name\n for installed in self._top_installed_repository.find_packages(name):\n if installed.key == package.key:\n break\n else:\n filtered_packages.append(package)\n return filtered_packages", "def getMissingLangPacks(self):\n missing = []\n for langInfo in self._cache.getLanguageInformation():\n #print langInfo.languageCode\n trans_package = \"language-pack-%s\" % langInfo.languageCode\n # we have a langpack installed, see if we have all of them\n if (self._cache.has_key(trans_package) and \n self._cache[trans_package].isInstalled):\n #print \"IsInstalled: %s \" % trans_package\n for (pkg, translation) in self._cache.pkg_translations:\n missing += self.missingTranslationPkgs(pkg, translation+langInfo.languageCode)\n\n # now check for a missing default language support\n default_lang = self.getSystemDefaultLanguage()\n # if there is no default lang, return early\n if default_lang is None:\n return missing\n if \"_\" in default_lang:\n default_lang = default_lang.split(\"_\")[0]\n trans_package = \"language-pack-%s\" % default_lang\n if (self._cache.has_key(trans_package) and \n not self._cache[trans_package].isInstalled):\n missing += [trans_package]\n for (pkg, translation) in self._cache.pkg_translations:\n missing += self.missingTranslationPkgs(pkg, translation+default_lang)\n support_packages = LanguageSelectorPkgCache._getPkgList(self._cache, default_lang)\n for support_package in support_packages:\n if (self._cache.has_key(support_package) and \n not self._cache[support_package].isInstalled):\n missing.append(support_package)\n\n return missing", "def getMissingPackages(self, language=None, all=False, packages=None, showInstalled=False):\n if self._cache.broken_count > 0:\n raise SoftwareIndexBroken\n \n self.langpack_locales = {}\n self.pkg_translations = {}\n self.pkg_writing = {}\n filter_list = {}\n blacklist = []\n show = []\n self.missing = set()\n self.installed = set()\n self.system_pkgcode = ''\n \n for l in open(self.BLACKLIST):\n l = l.strip()\n if not l.startswith('#'):\n blacklist.append(l)\n \n for l in open(self.LANGCODE_TO_LOCALE):\n try:\n l = l.rstrip()\n if ':' in l:\n (pkgcode, locale) = l.split(':')\n else:\n pkgcode = l\n locale = l\n except ValueError:\n continue\n self.langpack_locales[locale] = pkgcode\n \n for l in open(self.PACKAGE_DEPENDS):\n if l.startswith('#'):\n continue\n try:\n l = l.rstrip()\n # sort out comments\n if l.find('#') >= 0:\n continue\n (c, lc, k, v) = l.split(':')\n except ValueError:\n continue\n if (c == 'tr' and lc == ''):\n filter_list[v] = k\n elif (c == 'wa' and lc != ''):\n if '|' in lc:\n for l in lc.split('|'):\n if not l in self.pkg_writing:\n self.pkg_writing[l] = []\n self.pkg_writing[l].append((\"%s\" % k, \"%s\" % v))\n else:\n if not lc in self.pkg_writing:\n self.pkg_writing[lc] = []\n self.pkg_writing[lc].append((\"%s\" % k, \"%s\" % v))\n\n # get list of all packages available on the system and filter them\n for item in self._cache.keys():\n if item in blacklist: \n continue\n for x in filter_list.keys():\n if item.startswith(x) and not item.endswith('-base'):\n # parse language code\n langcode = item.replace(x, '')\n #print \"%s\\t%s\" % (item, langcode)\n if langcode == 'zh':\n # special case: zh langpack split\n for langcode in ['zh-hans', 'zh-hant']:\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n elif langcode in self.langpack_locales.values():\n # langcode == pkgcode\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n else:\n # need to scan for LL-CC and LL-VARIANT codes\n for locale in self.langpack_locales.keys():\n if '_' in locale or '@' in locale:\n if '@' in locale:\n (locale, variant) = locale.split('@')\n else:\n variant = ''\n (lcode, ccode) = locale.split('_')\n if langcode in [\"%s-%s\" % (lcode, ccode.lower()),\n \"%s%s\" % (lcode, ccode.lower()),\n \"%s-%s\" % (lcode, variant),\n \"%s%s\" % (lcode, variant),\n \"%s-latn\" % lcode,\n \"%slatn\" % lcode,\n \"%s-%s-%s\" % (lcode, ccode.lower(), variant),\n \"%s%s%s\" % (lcode, ccode.lower(), variant)]:\n # match found, get matching pkgcode\n langcode = self.langpack_locales[locale]\n if not langcode in self.pkg_translations:\n self.pkg_translations[langcode] = []\n self.pkg_translations[langcode].append((\"%s\" % filter_list[x], \"%s\" % item))\n #print self.pkg_translations[langcode]\n break\n\n if language:\n pkgcode = ''\n if language == 'zh-hans' or language == 'zh-hant':\n self.system_pkgcode = language\n elif language in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[language]\n else:\n # pkgcode = ll\n if '_' in language:\n (self.system_pkgcode) = language.split('_')[0]\n elif '@' in language:\n (self.system_pkgcode) = language.split('@')[0]\n else:\n self.system_pkgcode = language\n\n if packages:\n self.findPackages(self.system_pkgcode, packages)\n else:\n self.findPackages(self.system_pkgcode)\n \n elif all:\n # try all available languages\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist:\n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base'):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n\n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n\n else:\n # get a list of language-packs we have already installed or are going to install\n # 1. system locale\n system_langcode = self._localeinfo.getSystemDefaultLanguage()[0]\n if system_langcode == None:\n system_langcode = 'en_US'\n if system_langcode in self.langpack_locales:\n self.system_pkgcode = self.langpack_locales[system_langcode]\n # 2. installed language-packs\n pkgcodes = []\n for item in self._cache.keys():\n if item in blacklist: \n continue\n if item.startswith('language-pack-') and \\\n not item.startswith('language-pack-gnome') and \\\n not item.startswith('language-pack-kde') and \\\n not item.endswith('-base') and \\\n (self._cache[item].is_installed or \\\n self._cache[item].marked_install):\n pkgcode = item.replace('language-pack-', '')\n pkgcodes.append(pkgcode)\n if self.system_pkgcode and \\\n not self.system_pkgcode in pkgcodes:\n pkgcodes.append(self.system_pkgcode)\n \n for pkgcode in pkgcodes:\n if packages:\n self.findPackages(pkgcode, packages)\n else:\n self.findPackages(pkgcode)\n \n if showInstalled:\n show = self.missing | self.installed\n else:\n show = self.missing\n\n return show", "def get_module_list_from_pkglist(self):\n module_list = []\n for package in self.package_list:\n mod_list = self.get_module_list_from_pkg_rcrsv(package, [])\n module_list.extend(mod_list)\n return module_list", "def get_incompatible_packages():\n pkgconfig_directory = '/usr/lib64/pkgconfig/'\n incompatibe_packages = []\n libraries = get_libraries_names() - set(NO_PKG_CONFIGS)\n for library in libraries:\n with open(pkgconfig_directory + library + '.pc') as f:\n out = f.readlines()\n for line in out:\n if 'version=' in line:\n version = line.split('=')[1].strip(linesep)\n if not version in PMDK_VERSION.replace('~', '-'):\n incompatibe_packages.append(library)\n return incompatibe_packages", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def getInstalledPackages(self) -> PackageContainer:\n\t\tself.getPackageManager()\n\t\tif self.package_manager == \"apt\":\n\t\t\tpackages = subprocess.check_output([\"apt\", \"list\", \"--installed\"], encoding='UTF-8', universal_newlines=True)\n\t\t\tpackages = packages.split(\"\\n\")[1:-1]\n\t\telse:\n\t\t\tlogger.error(\"Package manager not supported for extracting packages.\")\n\t\t\traise ValueError(\"Package manager unsupported\")\n\n\t\t# Parse packages to self.installed_packages\n\t\tself.parsePackages(packages)\n\n\t\tlogger.info(\"Installed packages collected\")\n\t\treturn self.installed_packages", "def get_installed_jdk_packages():\n # Convert to a set and back to a list again to uniqueify.\n return sorted(list(set(rpm_query_whatprovides('java-devel', 'java7-devel', 'jdk'))))", "def get_installed_packages(cache=False,\n output_dir='.',\n output_filename='installed.pkgs.txt'):\n output = os.path.join(output_dir, output_filename)\n cmd = '''aptitude search '~i !~M' -F '%%p' | sort -u > %r''' % (\n output)\n ensure_file(cmd, output, shell=True, overwrite=not(cache))\n installed = list(read_lines(output))\n return installed", "def updates_check(self,request):\n\t\tp0 = subprocess.Popen(['LC_ALL=C apt-get update'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p0.communicate()\n\n\t\tp1 = subprocess.Popen(['LC_ALL=C apt-get -u dist-upgrade -s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p1.communicate()\n\n\t\tresult = {}\n\t\tresult['install'] = []\n\t\tresult['update'] = []\n\t\tresult['remove'] = []\n\t\tfor line in stdout.split('\\n'):\n\t\t\t# upgrade:\n\t\t\t# Inst univention-updater [3.1.1-5] (3.1.1-6.408.200810311159 192.168.0.10)\n\t\t\t# inst:\n\t\t\t# Inst mc (1:4.6.1-6.12.200710211124 oxae-update.open-xchange.com)\n\t\t\t#\n\t\t\t# *** FIX ***\tthe above example lines ignore the fact that there's\n\t\t\t#\t\t\t\tsome extra text (occasionally) after the last closing\n\t\t\t#\t\t\t\tparenthesis. Until now, I've seen only a pair of empty\n\t\t\t#\t\t\t\tbrackets [], but who knows...\n\t\t\tmatch = re.search('^Inst (\\S+)\\s+(.*?)\\s*\\((\\S+)\\s.*\\)',line)\n\t\t\tif match:\n\t\t\t\tpkg = match.group(1)\n\t\t\t\told = match.group(2)\n\t\t\t\tver = match.group(3)\n\t\t\t\tif old:\n\t\t\t\t\tresult['update'].append([pkg,ver])\n\t\t\t\telse:\n\t\t\t\t\tresult['install'].append([pkg,ver])\n\t\t\telif line.startswith('Remv '):\n\t\t\t\tl=line.split(' ')\n\t\t\t\tpkg = l[1]\n\t\t\t\tver = _('unknown')\n\t\t\t\tif len(l) > 2:\n\t\t\t\t\tver = l[2].replace('[','').replace(']','')\n\t\t\t\tresult['remove'].append([pkg,ver])\n\n\n\t\t# sort package names?\n\t\tresult['update'] = sorted(result['update'])\n\t\tresult['install'] = sorted(result['install'])\n\t\tresult['remove'] = sorted(result['remove'])\n\n\t\tself.finished(request.id,result)", "def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages", "def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]", "def check_state_pkg(package, list_to_check, versionlock_packages, check_type):\n state_pkg = dict()\n state_pkg['present'] = []\n state_pkg['missing'] = []\n missing_pkg_version = None\n for is_checked in list_to_check:\n if is_checked != 'Installed Packages' and is_checked != 'Available Packages':\n # Split to concatenate name & version\n pkg_split = is_checked.split()\n if missing_pkg_version:\n pkg_name = missing_pkg_version\n pkg_version = pkg_split[0]\n missing_pkg_version = None\n else:\n pkg_name = pkg_split[0].split('.')[0]\n try:\n pkg_version = pkg_split[1]\n except IndexError:\n missing_pkg_version = pkg_split[0].split('.')[0]\n if pkg_version:\n pkg_vers = pkg_name + '-' + pkg_version\n if check_type == 'installed':\n regex_search = re.compile('^(\\d+:)?%s-(\\d+:)?%s\\.\\*'%(pkg_name,pkg_version))\n else:\n regex_search = re.compile('^\\!(\\d+:)?%s-(\\d+:)?%s\\.\\*'%(pkg_name,pkg_version))\n # Init flag is_present\n is_present = False\n # search for the installed package in versionlock list\n for locked in versionlock_packages:\n if regex_search.search(locked):\n is_present = True\n if is_present:\n state_pkg['present'].append(pkg_vers)\n else:\n state_pkg['missing'].append(pkg_vers)\n return state_pkg", "def getSetupPackages(self):\n e = eups.Eups()\n setupProducts = e.getSetupProducts()\n a = \"\"\n\n # create a new list will all products and versions\n allProducts = {}\n for i in setupProducts:\n allProducts[i.name] = i.version\n\n # replace any existing products that we saw on the command line, adding\n # them if they're not already there.\n if self.opts.setup is not None:\n for i, pkg in enumerate(self.opts.setup):\n name = pkg[0]\n version = pkg[1]\n print(\"name = %s, version = %s\" % (name, version))\n allProducts[name] = version\n\n # write out all products, except those that are setup locally.\n for name in allProducts:\n version = allProducts[name]\n if self.platform == \"lsst\":\n a = a + \"setup -j %s %s\\\\n\\\\\\n\" % (name, version)\n else:\n if not version.startswith(\"LOCAL:\"):\n a = a + \"setup -j %s %s\\\\n\\\\\\n\" % (name, version)\n return a", "def get_package_lists(manifest_url=MANIFEST_URL, cache=False, output_dir=None):\n\n installed = get_installed_packages(cache=cache,\n output_dir=output_dir)\n manifest = get_manifest_packages(manifest_url=manifest_url,\n cache=cache,\n output_dir=output_dir)\n\n return installed, manifest", "def preprocess_raw_upgrade_list(self, raw_upgrade_list):\n # By default, upgrade all package that are not in sync, which is\n # not what you want to do for more evolved package management\n return raw_upgrade_list", "def provided_by(self, dep: str) -> List['Package']:\n\n dep_name, dep_cmp, dep_version = split_name_with_versioning(dep)\n return_list = []\n\n if dep_name in self.all_packages_dict:\n package = self.all_packages_dict[dep_name]\n if not dep_cmp:\n return_list.append(package)\n elif version_comparison(package.version, dep_cmp, dep_version):\n return_list.append(package)\n # https://github.com/polygamma/aurman/issues/246\n elif Package.ignore_versioning:\n return_list.append(package)\n\n if dep_name in self.provides_dict:\n possible_packages = self.provides_dict[dep_name]\n for package in possible_packages:\n\n if package in return_list:\n continue\n\n for provide in package.provides:\n provide_name, provide_cmp, provide_version = split_name_with_versioning(provide)\n\n if provide_name != dep_name:\n continue\n\n if not dep_cmp:\n return_list.append(package)\n elif provide_cmp == \"=\" and version_comparison(provide_version, dep_cmp, dep_version):\n return_list.append(package)\n # https://github.com/polygamma/aurman/issues/67\n elif not provide_cmp and Package.optimistic_versioning:\n return_list.append(package)\n # https://github.com/polygamma/aurman/issues/246\n elif Package.ignore_versioning:\n return_list.append(package)\n\n return return_list", "def get_required_packages(self) -> list:\n\t\tret = []\n\t\tlocal_packages = ChocoInfo.get_local_packages(\n\t\t\tPUSHED_PACKAGES_PATH)\n\n\t\tprint(\"local_packages\", local_packages)\n\n\t\treturn [c_package for c_package in self._community_packages if c_package not in local_packages]", "def calc_install_chunks(packages_to_chunk: Sequence['Package']) -> List[List['Package']]:\n current_list: List['Package'] = []\n return_list: List[List['Package']] = [current_list]\n\n for package in packages_to_chunk:\n if current_list and (current_list[0].type_of is not package.type_of\n or package.type_of is not PossibleTypes.REPO_PACKAGE\n and package.pkgbase != current_list[0].pkgbase):\n\n current_list = [package]\n return_list.append(current_list)\n else:\n current_list.append(package)\n\n return return_list", "def get_available_packages():\n all_providers_yaml = load_package_data()\n provider_package_names = [\n provider[\"package-name\"] for provider in all_providers_yaml if not provider.get(\"suspended\")\n ]\n return [\n \"apache-airflow\",\n \"docker-stack\",\n *provider_package_names,\n \"apache-airflow-providers\",\n \"helm-chart\",\n ]" ]
[ "0.68577033", "0.66643363", "0.63001037", "0.6242023", "0.6221818", "0.61724806", "0.611967", "0.6084042", "0.608403", "0.60733235", "0.60682243", "0.6050123", "0.6046999", "0.60445344", "0.60400933", "0.599542", "0.59662074", "0.5938913", "0.5904515", "0.5884107", "0.5870722", "0.581265", "0.5753482", "0.57253444", "0.57151645", "0.57031965", "0.5688603", "0.56786436", "0.5649472", "0.5646112" ]
0.7349749
0
Copies the model parameters of one estimator to another.
def copy_model_parameters(sess, estimator1, estimator2): e1_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator1.scope)] e1_params = sorted(e1_params, key=lambda v: v.name) e2_params = [t for t in tf.trainable_variables() if t.name.startswith(estimator2.scope)] e2_params = sorted(e2_params, key=lambda v: v.name) update_ops = [] for e1_v, e2_v in zip(e1_params, e2_params): op = e2_v.assign(e1_v) update_ops.append(op) sess.run(update_ops)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)", "def sync_parameters(self, model: nn.Module) -> None:\n # before ema, copy weights from orig\n avg_param = (\n itertools.chain(self.module.parameters(), self.module.buffers()))\n src_param = (itertools.chain(model.parameters(), model.buffers()))\n for p_avg, p_src in zip(avg_param, src_param):\n p_avg.data.copy_(p_src.data)", "def sync_parameters(self, model: nn.Module) -> None:\n # before ema, copy weights from orig\n avg_param = (\n itertools.chain(self.module.parameters(), self.module.buffers()))\n src_param = (itertools.chain(model.parameters(), model.buffers()))\n for p_avg, p_src in zip(avg_param, src_param):\n p_avg.data.copy_(p_src.data)", "def copy_values(self, another):\n\n # Copy all value, uncertainty, and source information from the other\n # ExoParameter object.\n if isinstance(another, ExoParameter):\n self.reference = another.reference\n self.uncertainty = another.uncertainty\n self.uncertainty_lower = another.uncertainty_lower\n self.uncertainty_upper = another.uncertainty_upper\n self.units = another.units\n self.url = another.url\n self.value = another.value\n else:\n raise TypeError(\"Cannot copy values from a non-ExoParameter obj!\")", "def reset_parameters(self) -> None:\n \n self.classifier.apply(xavier)\n if len(self.old_cols) > 0:\n self.adaptor1.apply(xavier)\n self.adaptor2.apply(xavier)", "def copy_model_parameters(sess, net1, net2):\n\n copy_scope_parameters(sess, net1.scope, net2.scope)", "def set_params(model, params): # type: ignore\n for p, p_new in zip(model.parameters(), params):\n p.data = p_new.data", "def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):\n for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):\n if name_opti != name_model:\n logger.error(\"name_opti != name_model: {} {}\".format(name_opti, name_model))\n raise ValueError\n param_model.data.copy_(param_opti.data)", "def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):\n for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):\n if name_opti != name_model:\n logger.error(\"name_opti != name_model: {} {}\".format(name_opti, name_model))\n raise ValueError\n param_model.data.copy_(param_opti.data)", "def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)", "def update_params(self, other):\n if isinstance(other, Params):\n found = False\n for key, param in other._src.items():\n if key in self._src:\n self._src[key] = param\n found = True\n\n if not found:\n raise RuntimeError(\n \"Tried to set parameters which do not exist in the target model.\"\n )\n else:\n raise RuntimeError(\"Attempt to stream non-parameter list to parameter list.\")", "def reset_from(self, other_model):\n self.wv.vocab = other_model.wv.vocab\n self.wv.index2word = other_model.wv.index2word\n self.vocabulary.cum_table = other_model.vocabulary.cum_table\n self.corpus_count = other_model.corpus_count\n self.docvecs.count = other_model.docvecs.count\n self.docvecs.doctags = other_model.docvecs.doctags\n self.docvecs.offset2doctag = other_model.docvecs.offset2doctag\n self.trainables.reset_weights(self.hs, self.negative, self.wv, self.docvecs)", "def hard_update(target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def swap(self):\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n new = p.data\n p.data = state['exp_avg_param']\n state['exp_avg_param'] = new", "def set_params(self, **params):\n super(AveragingRegressor, self)._set_params('estimators', **params)\n return self", "def test_parameters_copied_to_actor_model(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n np.testing.assert_equal(\n _state_dict_to_numpy(self.actor_model.state_dict()),\n _state_dict_to_numpy(self.model.state_dict()),\n )", "def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):\n for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):\n if name_opti != name_model:\n warnings.warn(\"name_opti != name_model: {} {}\".format(name_opti, name_model))\n raise ValueError\n param_model.data.copy_(param_opti.data)", "def hard_update(source_net, target_net):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(param.data)", "def copy_model(dst_model, src_model, const=0.0):\n \n params_dst = dst_model.named_parameters()\n params_src = src_model.named_parameters()\n dict_params_dst = dict(params_dst)\n with torch.no_grad():\n for name, param in params_src:\n if name in dict_params_dst:\n # NOTE: Must add a dummy float otherwise only setting 'reference' to old param.data\n dict_params_dst[name].set_(param.data + const)", "def copy_params(source, target):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def hard_copy_weights(self, target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def update_target_estimator(self):\n self.target_estimator.load_state_dict(self.estimator.state_dict())", "def update_target_estimator(self):\n self.target_estimator.load_state_dict(self.estimator.state_dict())", "def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])", "def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])", "def before_run(self, runner):\n model = self._get_model(runner)\n self.src_model = getattr(model, self.src_model_name, None)\n self.dst_model = getattr(model, self.dst_model_name, None)\n if self.src_model and self.dst_model:\n self.src_params = self.src_model.state_dict(keep_vars=True)\n self.dst_params = self.dst_model.state_dict(keep_vars=True)" ]
[ "0.6655023", "0.6186064", "0.6186064", "0.61854655", "0.6052574", "0.60511726", "0.60407877", "0.5963009", "0.5963009", "0.59216297", "0.59045017", "0.58989644", "0.589293", "0.58706653", "0.5860739", "0.58496034", "0.58299047", "0.58268255", "0.5764856", "0.57535136", "0.57449305", "0.5718402", "0.5718402", "0.5695621", "0.559971", "0.55862653", "0.55862653", "0.5564211", "0.5564211", "0.5542255" ]
0.7852629
1
LSTM input Generates a tensor that corresponds an LSTM input sequence from a two dimensional table (rows = samples, columns = variables)
def generate_lstm_input_sequence( input_tensor: Tensor, seq_len: int, window_shift_step_size: int ): num_iterations = (seq_len // window_shift_step_size) num_vars = input_tensor.shape[1] tensor_list = [] for i in range(num_iterations): # calculate how much the window has to be shifted window_shift = i * window_shift_step_size # shift the input tensor shifted_tensor = input_tensor[window_shift:, :] # evaluate new size total_time_steps = shifted_tensor.shape[0] # evalute the new sample size sample_size = total_time_steps // seq_len # crop samples that cannot be used (as not devidable by sample size) upper_bound = sample_size * seq_len # log values logger.debug('creating {} samples using data idx {} to {}'.format( str(sample_size), str(window_shift), str(upper_bound + window_shift) )) # subset shifted tensor to match sample size subset_tensor = shifted_tensor[0:upper_bound, :] # create input_samples input_samples = subset_tensor.view(sample_size, seq_len, num_vars) # add it to the list tensor_list.append(input_samples) return(torch.cat(tensor_list, dim=0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LSTM(inputs, dim, seq_len, name):\r\n with tf.name_scope(name) as scope:\r\n cell = tf.contrib.rnn.LSTMCell(num_units=dim)\r\n hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)\r\n\r\n return hidden_states, cell_states", "def LSTM_train(X_train, Y_train, X_dev, Y_dev, R_train, R_dev, hyperparams):", "def lstm_cell(inputs, **kwargs):\n if context.executing_eagerly():\n return OpLib.execute('LSTMCell', inputs, outputs=[None, None])\n return OpLib.add('LSTMCell', num_outputs=2, **kwargs)", "def create_logits(self):\n with tf.variable_scope('LSTM'):\n first_label = self.get_input(prev=None, i=0)\n decoder_inputs = [first_label] + [None] * (self._params.seq_length - 1)\n lstm_cell = tf.contrib.rnn.LSTMCell(\n self._mparams.num_lstm_units,\n use_peepholes=False,\n cell_clip=self._mparams.lstm_state_clip_value,\n state_is_tuple=True,\n initializer=orthogonal_initializer)\n lstm_outputs, _ = self.unroll_cell(\n decoder_inputs=decoder_inputs,\n initial_state=lstm_cell.zero_state(self._batch_size, tf.float32),\n loop_function=self.get_input,\n cell=lstm_cell)\n\n with tf.variable_scope('logits'):\n logits_list = [\n tf.expand_dims(self.char_logit(logit, i), dim=1)\n for i, logit in enumerate(lstm_outputs)\n ]\n\n return tf.concat(logits_list, 1)", "def create_LSTM_LSTM_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Get dimensions\n lstm_size = model_settings['lstm_size']\n\n batch_size = tf.shape(feats2d)[0] \n feats2d = tf.reshape(feats2d, shape=[batch_size,-1,model_settings['feature_width']]) # features are of shape [max seq length for batch, 40]\n seq_lengths = shapes[:,0] # all shapes are [seq_length, 40], we extract seq_length\n\n # First LSTM \n\n # LSTM cells\n cell_fw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True)\n\n # Bi-directional RNN (+ Dropout)\n (output_fw, output_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, feats2d, \n sequence_length=seq_lengths, \n dtype=tf.float32)\n\n # TODO: make predictions after every 64 time slices\n\n concat_rnn = tf.concat([state_fw[0], state_bw[0]], axis=1)\n\n if is_training:\n first_dropout = tf.nn.dropout(concat_rnn, dropout_prob)\n else:\n first_dropout = concat_rnn\n\n # Second LSTM \n # TODO\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=first_dropout, units=num_classes)\n \n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def lstm(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.0))\n #model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(self.nb_classes, activation='softmax'))\n return model", "def apply_lstm(x, seq_len):\n return cudnn_layers.stacked_bilstm(\n input_emb=x,\n input_len=seq_len,\n hidden_size=FLAGS.lstm_dim,\n num_layers=1,\n dropout_ratio=0.0,\n mode=tf_estimator.ModeKeys.TRAIN,\n use_cudnn=None)", "def lstm_cell(input, cx):\n return FunctionLib.apply('LSTMCell', input.device, [input, cx])", "def lstm_model(nlstm=128, layer_norm=False):\n\n def network_fn(X, nenv=1, obs_size=-1):\n with tf.variable_scope(\"emb\", reuse=tf.AUTO_REUSE):\n w_emb = tf.get_variable(\"w_emb\", [obs_size+1, 32])\n X = tf.nn.embedding_lookup(w_emb, X)\n\n nbatch = X.shape[0]\n nsteps = nbatch // nenv\n\n h = tf.layers.flatten(X)\n\n M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)\n S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states\n\n xs = batch_to_seq(h, nenv, nsteps)\n ms = batch_to_seq(M, nenv, nsteps)\n\n assert not layer_norm\n h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm)\n\n h = seq_to_batch(h5)\n initial_state = np.zeros(S.shape.as_list(), dtype=float)\n\n return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}\n\n return network_fn", "def build(self, observations):\n with tf.variable_scope(self._scope, default_name=''):\n shape = observations[0].shape\n dets = tf.reshape(observations[0], [-1, shape[-2], shape[-1]])\n det_num = tf.reshape(observations[1], [-1])\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self._rnn_state_size)\n batch_size = tf.shape(dets)[0]\n lstm_outputs, _ = tf.nn.dynamic_rnn(\n cell=lstm_cell,\n inputs=dets,\n sequence_length=det_num,\n initial_state=lstm_cell.zero_state(batch_size, dtype=tf.float32),\n dtype=tf.float32)\n # Gathering the last state of each sequence in the batch.\n batch_range = tf.range(batch_size)\n indices = tf.stack([batch_range, det_num - 1], axis=1)\n last_lstm_outputs = tf.gather_nd(lstm_outputs, indices)\n last_lstm_outputs = tf.reshape(last_lstm_outputs,\n [-1, shape[1], self._rnn_state_size])\n return last_lstm_outputs", "def __init__(self, input_dim, hidden_dim, output_dim):\r\n super(LstmEstimator, self).__init__()\r\n \r\n # The LSTM takes track features as inputs, and outputs hidden states\r\n # with dimensionality hidden_dim\r\n self.lstm = nn.LSTM(input_dim, hidden_dim)\r\n \r\n self.hidden2target = nn.Linear(hidden_dim, output_dim)", "def build_lstm(self, keep_prob):\n def get_cell():\n if self.kernel == 'LSTM':\n cell = tf.contrib.rnn.BasicLSTMCell(self.num_hidden_units)\n print('LSTM is using...')\n elif self.kernel == 'GRU': # GRU RNN\n cell = tf.contrib.rnn.GRUCell(self.num_hidden_units)\n print('GRU is using...')\n else:\n raise AttributeError\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n return cell\n lstm_cell = get_cell()\n init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)\n return lstm_cell, init_state", "def model_create_lstm(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(LSTM(nn_hparams['units'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features), return_sequences=True))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Flatten())\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n log(\"embedding\")\n log(embedding)\n log(X_inputs)\n log(\"X_inputs\")\n inputs = tf.nn.embedding_lookup(embedding, X_inputs) \n \n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n \n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32) \n \n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n# try:\n# outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# except Exception: # Old TensorFlow version only returns outputs not states\n# outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs, \n# initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n# output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n \n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state \n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n \n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1,0,2])\n output = tf.reshape(output, [-1, hidden_size*2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def lstm_cell(x, h, c, name=None, reuse=False):\n nin = x.shape[-1].value\n nout = h.shape[-1].value\n with tf.variable_scope(name, default_name=\"lstm\", values=[x, h, c], reuse=reuse):\n wx = get_variable_wrap(\"kernel/input\", [nin, nout * 4], dtype=tf.float32, \n initializer=tf.orthogonal_initializer(1.0))\n wh = get_variable_wrap(\"kernel/hidden\", [nout, nout * 4],dtype=tf.float32,\n initializer=tf.orthogonal_initializer(1.0))\n b = get_variable_wrap(\"bias\", [nout * 4], dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\n z = ed.dot(x, wx) + ed.dot(h, wh) + b\n i, f, o, u = tf.split(z, 4, axis=0)\n i = tf.sigmoid(i)\n f = tf.sigmoid(f + 1.0)\n o = tf.sigmoid(o)\n u = tf.tanh(u)\n c = f * c + i * u\n h = o * tf.tanh(c)\n return h, c", "def lstm_atten(self):\n # Model.\n model = Sequential()\n model.add(LSTM(2048, return_sequences=True,\n input_shape=self.input_shape,\n dropout=0.5))\n model.add(Flatten()) #qiao_added\n # model.add(Dense(1024, activation='relu'))\n # model.add(Dropout(0.5))\n\n attention = Dense(1, activation='tanh')(activations)\n attention = Flatten()(attention)\n attention = Activation('softmax')(attention)\n attention = RepeatVector(2048)(attention)\n attention = Permute([2, 1])(attention)\n\n sent_representation = concatenate([activations, attention], mode='mul')\n sent_representation = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(2048,))(sent_representation)\n\n probabilities = Dense(self.nb_classes, activation='softmax')(sent_representation)\n\n model = model(input=self.input_shape, output=probabilities )\n\n dense1800 = Dense(4096, activation='relu')\n\n #dense1800 = Dense(1800, activation='relu', kernel_regularizer=regularizers.l2(0.01))(inputs)\n attention_probs = Dense(4096, activation='sigmoid', name='attention_probs')(dense1800)\n attention_mul = multiply([dense1800, attention_probs], name='attention_mul')\n dense7 = Dense(self.nb_classes, kernel_regularizer=regularizers.l2(0.01), activation='softmax')(attention_mul)\n model = Model(input=[self.input_shape], output=dense7)\n return model", "def __init__(self, incoming, n_units,\n W_ci=tf.zeros, W_ig=tf.zeros, W_og=tf.zeros, W_fg=tf.zeros,\n b_ci=tf.zeros, b_ig=tf.zeros, b_og=tf.zeros, b_fg=tf.zeros,\n a_ci=tf.tanh, a_ig=tf.sigmoid, a_og=tf.sigmoid, a_fg=tf.sigmoid, a_out=tf.identity,\n c_init=tf.zeros, h_init=tf.zeros, learn_c_init=False, learn_h_init=False, forgetgate=True,\n output_dropout=False, store_states=False, return_states=False, precomp_fwds=False,\n tickerstep_biases=None, learn_tickerstep_biases=True, name='LSTM'):\n super(LSTMLayerGetNetInput, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n self.n_units = n_units\n self.lstm_inlets = ['ci', 'ig', 'og', 'fg']\n if return_states:\n store_states = True\n \n #\n # Initialize weights and biases\n #\n \n # Turn W inits into lists [forward_pass, backward_pass]\n W_ci, W_ig, W_og, W_fg = [v[:2] if isinstance(v, list) else [v, v] for v in [W_ci, W_ig, W_og, W_fg]]\n \n # Make W and b tf variables\n W_ci, W_ig, W_og, W_fg = [\n [tofov(v[0], shape=[self.incoming_shape[-1], n_units], var_params=dict(name=n + '_fwd')),\n tofov(v[1], shape=[n_units, n_units], var_params=dict(name=n + '_bwd'))]\n for v, n in zip([W_ci, W_ig, W_og, W_fg], ['W_ci', 'W_ig', 'W_og', 'W_fg'])]\n b_ci, b_ig, b_og, b_fg = [tofov(v, shape=[n_units], var_params=dict(name=n)) for v, n in\n zip([b_ci, b_ig, b_og, b_fg], ['b_ci', 'b_ig', 'b_og', 'b_fg'])]\n \n # Pack weights for fwd and bwd connections\n W_fwd_conc = tf.concat(axis=1, values=[W[0] for W in [W_ci, W_ig, W_og, W_fg]])\n W_bwd_conc = tf.concat(axis=1, values=[W[1] for W in [W_ci, W_ig, W_og, W_fg]])\n \n if not forgetgate:\n print(\"Warning: Setting forgetgate to 0 has not been tested yet, please set the W and b manually \"\n \"to not-trainable tensorflow variables!\")\n \n def a_fg(x):\n return tf.ones(x.get_shape().as_list())\n \n # Initialize bias for tickersteps\n if tickerstep_biases is not None:\n self.W_tickers = OrderedDict(zip_longest(self.lstm_inlets,\n [tofov(tickerstep_biases, shape=[n_units],\n var_params=dict(name='W_tickers_' + g,\n trainable=learn_tickerstep_biases))\n for g in self.lstm_inlets]))\n else:\n self.W_tickers = None\n \n #\n # Create mask for output dropout\n # apply dropout to n_units dimension of outputs, keeping dropout mask the same for all samples,\n # sequence positions, and pixel coordinates\n #\n output_shape = self.get_output_shape()\n if output_dropout:\n out_do_mask = tf.ones(shape=[output_shape[0], output_shape[-1]],\n dtype=tf.float32)\n out_do_mask = tf.nn.dropout(out_do_mask, keep_prob=1. - output_dropout,\n noise_shape=[1, output_shape[-1]])\n \n def out_do(x):\n \"\"\"Function for applying dropout mask to outputs\"\"\"\n if output_dropout:\n return out_do_mask * x\n else:\n return x\n \n # Redefine a_out to include dropout (sneaky, sneaky)\n a_out_nodropout = a_out\n \n def a_out(x):\n return a_out_nodropout(out_do(x))\n \n #\n # Handle initializations for h (hidden states) and c (cell states) as Variable\n #\n h_init = out_do(tofov(h_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_h_init)))\n c_init = tofov(c_init, shape=[output_shape[0], output_shape[-1]],\n var_params=dict(name='h_init', trainable=learn_c_init))\n \n # Initialize lists to store LSTM activations and cell states later\n h = [h_init]\n c = [c_init]\n \n self.precomp_fwds = precomp_fwds\n self.store_states = store_states\n self.return_states = return_states\n \n self.W_fwd = OrderedDict(zip(self.lstm_inlets, [W[0] for W in [W_ci, W_ig, W_og, W_fg]]))\n self.W_bwd = OrderedDict(zip(self.lstm_inlets, [W[1] for W in [W_ci, W_ig, W_og, W_fg]]))\n \n self.W_fwd_conc = W_fwd_conc\n self.W_bwd_conc = W_bwd_conc\n self.a = OrderedDict(zip(self.lstm_inlets, [a_ci, a_ig, a_og, a_fg]))\n self.a['out'] = a_out\n self.b = OrderedDict(zip(self.lstm_inlets, [b_ci, b_ig, b_og, b_fg]))\n self.h = h\n self.c = c\n self.external_rec = None\n \n self.ig = []\n self.og = []\n self.ci = []\n self.fg = []\n \n self.out = tf.expand_dims(h_init, 1)\n self.name = name\n \n self.cur_net_fwd = dot_product(tf.zeros(self.incoming_shape[:1] + self.incoming_shape[2:]),\n tf.zeros(self.W_fwd_conc.shape.as_list()))", "def create_lstm_model(x, N, outputs=1):\n with C.layers.default_options(initial_state = 0.1):\n m = C.layers.Recurrence(C.layers.LSTM(N))(x)\n m = C.sequence.last(m)\n # m = C.layers.Dropout(0.2, seed=1)(m)\n m = C.layers.Dense(outputs)(m)\n return m", "def build_linear_model(inputs, columns, config):\r\n features = inputs['features']\r\n\r\n cols_to_vars = {}\r\n units = int(config['linear_model'].get('units', 1))\r\n combiner = config['linear_model'].get('combiner', 'sum')\r\n linear_logits = tf.feature_column.linear_model(\r\n features=features,\r\n feature_columns=columns,\r\n units=units,\r\n sparse_combiner=combiner,\r\n cols_to_vars=cols_to_vars)\r\n\r\n return linear_logits", "def MultiLSTM(inputs,weights):\n w_kernel = weights['w_kernel']\n w_recurrent = weights['w_recurrent']\n w_bias = weights['w_bias']\n T = inputs.shape[-2]\n H = []\n for t in range(T):\n if t > 0:\n z = tf.einsum('knx,xj->knj',inputs[:,:,t,:],w_kernel) + tf.einsum('knl,lj->knj',h,w_recurrent) + tf.expand_dims(w_bias,0)\n else:\n z = tf.einsum('knx,xj->knj',inputs[:,:,t,:],w_kernel) + tf.expand_dims(w_bias,0)\n i,f,o,u = tf.split(z,4,axis=-1)\n i = tf.sigmoid(i) #input gate\n f = tf.sigmoid(f + 1.0) #forget gate\n o = tf.sigmoid(o) #output gate\n u = tf.tanh(u) #information let in by input gate\n if t > 0:\n c = f * c + i * u\n else:\n c = i * u\n h = o * tf.tanh(c)\n H.append(h)\n H = tf.stack(H,-2)\n return(H)", "def lstm_cell():\n if 'reuse' in inspect.getargspec(\n tf.contrib.rnn.BasicLSTMCell.__init__).args:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True,\n reuse=tf.get_variable_scope().reuse)\n else:\n return tf.contrib.rnn.BasicLSTMCell(\n args.hidden_size, input_size=args.embedding_size, forget_bias=0.0, state_is_tuple=True)", "def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n lstm_size = FLAGS.lstm_cells\n\n feature_size = model_input.get_shape().as_list()[2]\n sequence_length = model_input.get_shape().as_list()[1]\n\n # start_token is important!\n start_token = tf.zeros_like(tf.expand_dims(model_input[:, 0, :], axis=1), dtype=tf.float32)\n input_sequence = tf.concat( [start_token, model_input[:, :-1, :]], axis=1)\n output_sequence = model_input[:, :, :]\n\n # fc-relu\n # input_sequence = tf.reshape(input_sequence, [-1, feature_size])\n # fc1 = tf.contrib.layers.fully_connected(input_sequence, lstm_size, activation_fn=tf.nn.relu)\n # input_sequence = tf.reshape(fc1, [-1, sequence_length, lstm_size])\n\n cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)\n outputs, state = tf.nn.dynamic_rnn(\n cell=cell, \n inputs=input_sequence, \n sequence_length=None,\n parallel_iterations=128,\n dtype=tf.float32) # output = (batch, num_frames, lstm_size)\n\n # fc-linear\n outputs = tf.reshape(outputs, [-1, lstm_size])\n fc2 = tf.contrib.layers.fully_connected(outputs, feature_size, activation_fn=None)\n outputs = tf.reshape(fc2, [-1, sequence_length, feature_size])\n\n loss = tf.nn.l2_loss(outputs - output_sequence)\n\n dummy_pooled = tf.reduce_sum(model_input,axis=[1])\n dummy_output = slim.fully_connected(\n dummy_pooled, vocab_size, activation_fn=tf.nn.sigmoid,\n weights_regularizer=slim.l2_regularizer(1e-8))\n\n return {\"predictions\": dummy_output, \"loss\": loss}", "def _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden):\n param_cells = []\n last_states = []\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(\"l%d_i2h_weight\" % i),\n i2h_bias=mx.sym.Variable(\"l%d_i2h_bias\" % i),\n h2h_weight=mx.sym.Variable(\"l%d_h2h_weight\" % i),\n h2h_bias=mx.sym.Variable(\"l%d_h2h_bias\" % i)))\n state = LSTMState(c=mx.sym.Variable(\"l%d_init_c\" % i),\n h=mx.sym.Variable(\"l%d_init_h\" % i))\n last_states.append(state)\n assert len(last_states) == num_lstm_layer\n\n # embedding layer\n data = mx.sym.Variable('data')\n wordvec = mx.sym.SliceChannel(data=data, num_outputs=seq_len, squeeze_axis=1)\n\n hidden_all = []\n for seqidx in range(seq_len):\n hidden = wordvec[seqidx]\n for i in range(num_lstm_layer):\n next_state = _lstm(\n num_hidden=num_hidden,\n indata=hidden,\n prev_state=last_states[i],\n param=param_cells[i],\n seqidx=seqidx,\n layeridx=i)\n hidden = next_state.h\n last_states[i] = next_state\n hidden_all.append(hidden)\n\n hidden_concat = mx.sym.Concat(*hidden_all, dim=0)\n pred_fc = mx.sym.FullyConnected(data=hidden_concat, num_hidden=11, name=\"pred_fc\")\n return pred_fc", "def forward(self, x):\n x = tensor(x).unsqueeze(1)\n x = self.cnn(x)\n\n # LSTM from here\n batch_size = x.shape[0]\n x = x.view(batch_size, x.shape[1] * x.shape[2], x.shape[3])\n x = x.permute(2, 0, 1) # Converting from (B,H,W)->(W,B,H)\n\n output = self.rnn(x)\n return output", "def build(self, input_tensors, is_training, lengths=None, hparams=None):\n input_tensor = input_tensors[-1]\n rnn_cell = tf.compat.v1.nn.rnn_cell.LSTMCell(\n num_units=self._output_size, activation=tf.nn.tanh)\n net, _ = tf.compat.v1.nn.dynamic_rnn(\n rnn_cell, input_tensor, sequence_length=lengths, dtype=tf.float32)\n\n if self._skip:\n net += _add_projection_if_needed(input_tensor, net)\n\n return input_tensors + [net]", "def initialize_graph(self, input_statistics):\r\n super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)\r\n self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)\r\n # Create templates so we don't have to worry about variable reuse.\r\n self._lstm_cell_run = tf.make_template(\r\n name_=\"lstm_cell\",\r\n func_=self._lstm_cell,\r\n create_scope_now_=True)\r\n # Transforms LSTM output into mean predictions.\r\n self._predict_from_lstm_output = tf.make_template(\r\n name_=\"predict_from_lstm_output\",\r\n func_=lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),\r\n create_scope_now_=True)", "def TestLSTM(test_x, test_y): \r\n loss = 0.0\r\n seq_length = test_y.shape[1]\r\n for t in range(seq_length):\r\n lstm_in = StepProcess(test_x, batch_size, source_length, lstm_step)\r\n logit = lstm_restored(lstm_in)\r\n # loss function : RSME TODO\r\n loss_0 = tf.keras.losses.MSE(test_y[:, t, 1:3], logit[:, 1:3])\r\n loss += tf.sqrt(loss_0)# TODO\r\n pred_point = np.reshape(logit.numpy(), [batch_size, 1, 5])\r\n test_x = np.concatenate((test_x[:, 1:source_length, :], pred_point), axis=1) \r\n \r\n loss = tf.reduce_mean(loss)\r\n loss = loss / seq_length\r\n return loss", "def bi_lstm(X_inputs):\n # X_inputs.shape = [batchsize, timestep_size] -> inputs.shape = [batchsize, timestep_size, embedding_size]\n inputs = tf.nn.embedding_lookup(embedding, X_inputs)\n\n # ** 1.构建前向后向多层 LSTM\n cell_fw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n cell_bw = rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple=True)\n\n # ** 2.初始状态\n initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)\n initial_state_bw = cell_bw.zero_state(batch_size, tf.float32)\n\n # 下面两部分是等价的\n # **************************************************************\n # ** 把 inputs 处理成 rnn.static_bidirectional_rnn 的要求形式\n # ** 文档说明\n # inputs: A length T list of inputs, each a tensor of shape\n # [batch_size, input_size], or a nested tuple of such elements.\n # *************************************************************\n # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n # inputs.shape = [batchsize, timestep_size, embedding_size] -> timestep_size tensor, each_tensor.shape = [batchsize, embedding_size]\n # inputs = tf.unstack(inputs, timestep_size, 1)\n # ** 3.bi-lstm 计算(tf封装) 一般采用下面 static_bidirectional_rnn 函数调用。\n # 但是为了理解计算的细节,所以把后面的这段代码进行展开自己实现了一遍。\n # try:\n # outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # except Exception: # Old TensorFlow version only returns outputs not states\n # outputs = rnn.static_bidirectional_rnn(cell_fw, cell_bw, inputs,\n # initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bw, dtype=tf.float32)\n # output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size * 2])\n # ***********************************************************\n\n # ***********************************************************\n # ** 3. bi-lstm 计算(展开)\n with tf.variable_scope('bidirectional_rnn'):\n # *** 下面,两个网络是分别计算 output 和 state\n # Forward direction\n outputs_fw = list()\n state_fw = initial_state_fw\n with tf.variable_scope('fw'):\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_fw, state_fw) = cell_fw(inputs[:, timestep, :], state_fw)\n outputs_fw.append(output_fw)\n\n # backward direction\n outputs_bw = list()\n state_bw = initial_state_bw\n with tf.variable_scope('bw') as bw_scope:\n inputs = tf.reverse(inputs, [1])\n for timestep in range(timestep_size):\n if timestep > 0:\n tf.get_variable_scope().reuse_variables()\n (output_bw, state_bw) = cell_bw(inputs[:, timestep, :], state_bw)\n outputs_bw.append(output_bw)\n # *** 然后把 output_bw 在 timestep 维度进行翻转\n # outputs_bw.shape = [timestep_size, batch_size, hidden_size]\n outputs_bw = tf.reverse(outputs_bw, [0])\n # 把两个oupputs 拼成 [timestep_size, batch_size, hidden_size*2]\n output = tf.concat([outputs_fw, outputs_bw], 2)\n output = tf.transpose(output, perm=[1, 0, 2])\n output = tf.reshape(output, [-1, hidden_size * 2])\n # ***********************************************************\n return output # [-1, hidden_size*2]", "def decoder(self, tensor, reuse=False):\n\n outputs, predictions = [], []\n\n with tf.variable_scope(\"decoder\", reuse=reuse) as scope:\n\n\n # add gausian noise\n decoder_input = gaussian_noise_layer(tensor, 0.2)\n encoder_dim = tensor.get_shape().as_list()[-1]\n W = tf.get_variable(\"decoder_last_weight\", [self.num_units + encoder_dim, self.voca_size])\n b = tf.get_variable(\"decoder_last_bias\", [self.voca_size])\n # time-major: [batch_size, max_len, num_units] --> [max_len, batch_size, num_units]\n # decoder_input = tf.transpose(decoder_input, [1,0,2])\n cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units, state_is_tuple=False)\n # initial_state = state = decoder_input\n initial_state = tf.zeros([self.batch_size, self.num_units])\n initial_state = tf.concat([initial_state, decoder_input], 1)\n\n\n for i in range(self.max_len):\n if i == 0:\n # start of sequence\n input_ = tf.nn.embedding_lookup(self.embedding, tf.ones([self.batch_size], dtype=tf.int32))\n state = initial_state\n\n else:\n scope.reuse_variables()\n input_ = tf.nn.embedding_lookup(self.embedding, prediction)\n\n output, state = cell(input_, state)\n output = tf.concat([output, tensor], -1)\n output = tf.nn.xw_plus_b(output, W, b)\n\n prediction = tf.argmax(output, axis=1)\n\n outputs.append(output)\n predictions.append(prediction)\n\n predictions = tf.transpose(tf.stack(predictions), [1,0])\n outputs = tf.stack(outputs)\n\n return predictions, outputs", "def build_t_output(self, lstm_output):\n with tf.variable_scope('terminal_softmax'):\n t_weight = tf.Variable(tf.random_uniform(\n [self.num_hidden_units, self.num_ttoken], minval=-0.05, maxval=0.05))\n t_bias = tf.Variable(tf.zeros(self.num_ttoken))\n tt_logits = tf.matmul(lstm_output, t_weight) + t_bias\n return tt_logits" ]
[ "0.671775", "0.6443419", "0.63130444", "0.6284584", "0.62595385", "0.6248711", "0.61987054", "0.61636215", "0.6117673", "0.6102052", "0.6073893", "0.6053332", "0.60326666", "0.5990912", "0.5978899", "0.59719133", "0.5913929", "0.58949786", "0.58881134", "0.5882032", "0.58778363", "0.5870933", "0.58537096", "0.5850899", "0.5840717", "0.5832937", "0.58303165", "0.5825694", "0.58223057", "0.58218855" ]
0.66498864
1
Gets the maximum sequence bounds of non idle time Machines shows default values at the beginning and end of the operations; this functions returns the ids of the longest sequence that is not operating with the default values. Note that you cannot just remove all default values, essentially because order matters and there might be also intermediate interuptions. Just removing this kind of outliers in between the opertions would lead to wrong time series (timegaps).
def get_id_bounds( values: Tensor, default_value: float ): # get all values that are not default ones default_value_idx = (values == default_value).nonzero()[:, 0] # get the longest sequence without interruption # to do this, get the difference of the above ids diff = default_value_idx[1:] - default_value_idx[:-1] # find the maximum difference (maximum ids between default values) split = (diff == diff.max()).nonzero()[0, 0] # return start, end ids start = default_value_idx[split] + \ 1 if split != 0 and diff.max() != 1 else 0 end = default_value_idx[split + 1] \ if split != 0 and diff.max() != 1 else default_value_idx[0] return start, end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_loose_bounds(maxlum=None):\n return[(None,None), (10**-6, None), (2., 350),\n (None, -10**-6), (None, None)]", "def longest_sequence(start=1, end=1000000):\n\n max_length = 0\n max_start_value = 0\n\n # generate sequence for each value\n for i in range(start, end):\n current = generate_sequence(i)\n\n # if the current sequence is the longest, update values\n if len(current) > max_length:\n max_length = len(current)\n max_start_value = i\n\n return max_length, max_start_value", "def find_max_gap(self, free_space_ranges):\n # mask the bubble\n masked = np.ma.masked_where(free_space_ranges==0, free_space_ranges)\n # get a slice for each contigous sequence of non-bubble data\n slices = np.ma.notmasked_contiguous(masked)\n max_len = slices[0].stop - slices[0].start\n chosen_slice = slices[0]\n # I think we will only ever have a maximum of 2 slices but will handle an\n # indefinitely sized list for portablility\n for sl in slices[1:]:\n sl_len = sl.stop - sl.start\n if sl_len > max_len:\n max_len = sl_len\n chosen_slice = sl\n return chosen_slice.start, chosen_slice.stop", "def find_max_gap(self, free_space_ranges):\n start = end = 200\n curr_start = 200\n #print(free_space_ranges)\n for i in range(201, 880):\n if free_space_ranges[i] != 0:\n if free_space_ranges[i-1] == 0:\n curr_start = i\n else:\n if (i-curr_start) > end-start:\n start = curr_start\n end = i\n return start, end", "def find_longest_ranges(range, howmany):\n range.sort(key=lambda x: x[1]) # sort by length\n if howmany > 1:\n range = range[-howmany:] # get last few\n range.sort(key=lambda x: x[0]) # sorted by starttime\n return range\n else:\n return range[-1]", "def longest_seq_of_1s(n, index_to_ignore):\n max_ = 0\n counter = 0\n for i in range(SEQ_LENGTH):\n if i == index_to_ignore or get_bit(n, i):\n counter += 1\n max_ = max(counter, max_)\n else:\n counter = 0\n return max_", "def longest_id(ids, seqs):\r\n lengths = map(len, [seqs.get(id_, '') for id_ in ids])\r\n return ids[argmax(lengths)]", "def find_mode_range(self):\n\n if (len(self.n) < 1): return -1,-1,-1,-1\n nmin = np.nanmin(self.modes['n'])\n nmax = np.nanmax(self.modes['n'])\n lmin = np.nanmin(self.modes['l'])\n lmax = np.nanmax(self.modes['l'])\n return nmin, nmax, lmin, lmax", "def max_positions(self):\n return None", "def max_decoder_positions(self):\n return min(model.decoder.max_positions() for model in self.models.values())", "def maxs(self):\n return self.intervals[:, 1]", "def sequence_last(self):\n return max(list(self.nodes_attribute(name=\"_k\")))", "def find_longest_plateau(seq):\n\n start_longest_so_far = 0\n length_longest_so_far = 0\n i = 0\n\n # INVARIANT\n # The longest plateau in seq[0:i] starts at position\n # start_longest_so_far and has a length of\n # length_longest_so_far\n # VARIANT: len(seq) - i\n #\n while len(seq) - i > length_longest_so_far:\n\n length_current_plateau = length_plateau_at(seq, i)\n\n if length_current_plateau > length_longest_so_far:\n start_longest_so_far = i\n length_longest_so_far = length_current_plateau\n\n i += length_current_plateau\n\n return start_longest_so_far", "def _get_max_t(self):\n\n return max([\n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])", "def find_extrema(s):\n max_env = np.logical_and(\n np.r_[True, s[1:] > s[:-1]],\n np.r_[s[:-1] > s[1:], True])\n min_env = np.logical_and(\n np.r_[True, s[1:] < s[:-1]],\n np.r_[s[:-1] < s[1:], True])\n max_env[0] = max_env[-1] = False\n\n #exclude endpoints\n mini = [m for m in min_env.nonzero()[0] if m != 0 and m != len(s)-1]\n maxi = [m for m in max_env.nonzero()[0] if m != 0 and m != len(s)-1]\n\n return mini,maxi", "def longest_seq(n):\n max_seq = 0\n for i in range(SEQ_LENGTH):\n max_seq = max(max_seq, longest_seq_of_1s(n, i))\n\n return max_seq", "def get_max_dwell_mvals(model, state_data):\n dwell_results = []\n for ind in range(len(state_data)):\n ind_dwell = (model.times >= state_data['tstart'][ind]) & (model.times <= state_data['tstop'][ind])\n if np.any(ind_dwell):\n dwell_results.append(np.max(model.mvals[ind_dwell]))\n else:\n dwell_results.append(-1.0e6)\n return tuple(dwell_results)", "def get_longest_state(data):\n return max(data, key=len)", "def min_max_range(s):\n # note np.argmax, np.argmin returns the position of first occurence of global max, min\n sign = np.sign(np.argmax(s) - np.argmin(s))\n if sign == 0:\n return 0.0\n else:\n return sign*(np.max(s) - np.min(s))", "def worstVectorIndex(self):\n return max(range(len(self.costs)), key=self.costs.__getitem__)", "def index_of_max_change(vals):\n i_vals = zip(range(len(vals)), vals)\n vals = [v for i, v in i_vals]\n vals_diff = [abs(v1 - v0) for v0, v1 in zip(vals[:-1], vals[1:])]\n return i_vals[vals_diff.index(max(vals_diff))][0]", "def maxSRAM(cmds):\n\n def addr(cmd):\n return MemorySequence.getAddress(cmd) if \\\n MemorySequence.getOpcode(cmd) in [0x8, 0xA] else 0\n\n return max(addr(cmd) for cmd in cmds)", "def calcMaxIDX(fls, noct):\n freq_l = fls[-1] / (2.0 ** (1 / (2.0 * noct)))\n max_idx = np.array(abs(fls - freq_l)).argmin()\n return max_idx", "def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())", "def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())", "def masked_maximum(data, mask):\r\n\r\n flag = (torch.sum(mask, dim = 1, keepdim = True) == 1).float()\r\n only_nonzero = torch.max(mask, dim = 1, keepdim = True)[1]\r\n\r\n minimum = torch.min(data, dim = 1, keepdim = True)[0]\r\n masked_maximum, masked_maximum_id = torch.max((data - minimum) * mask, dim = 1, keepdim = True)\r\n masked_maximum += minimum\r\n\r\n masked_maximum_id = (1 - flag) * masked_maximum_id.float() + flag * only_nonzero.float()\r\n return masked_maximum, masked_maximum_id.long()", "def calcDefaultCount(self, value):\n idx = dnUtil.valuesList.index(value)\n higher = (len(dnUtil.valuesList) - idx - 1) * 4\n lower = idx * 4\n tie = 4\n return (higher, lower, tie)", "def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)", "def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)", "def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)" ]
[ "0.5749199", "0.5637835", "0.5553669", "0.5540054", "0.5517348", "0.54871017", "0.547557", "0.5464125", "0.54230356", "0.53948283", "0.53876203", "0.53298086", "0.5300341", "0.52418303", "0.5241372", "0.52193195", "0.5216842", "0.52015424", "0.520039", "0.51883274", "0.51681966", "0.5153603", "0.51374143", "0.5116756", "0.5116756", "0.5114228", "0.51040936", "0.5097055", "0.5097055", "0.5097055" ]
0.7332722
0
Creates a list of strings indicating available devices to test on. Checks for CUDA devices, primarily. Assumes CPU is always available.
def get_test_devices(): # Assumption: CPU is always available devices = ['cpu'] if torch.cuda.is_available(): devices.append('cuda') return devices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_test_devices():\n devices = [\"cpu\"]\n if torch.cuda.is_available():\n devices.append(\"cuda\")\n return devices", "def get_available_devices():\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return [0]\n\n FNULL = open(os.devnull, 'w')\n\n available_devices = []\n for i in range(num_devices):\n try:\n if b\"NVIDIA\" in subprocess.check_output(\n [\"{}/test_device\".format(executable_path),\n str(i)], stderr=FNULL):\n available_devices.append(i)\n logging.info('Device {} is available for rendering'.format(i))\n except subprocess.CalledProcessError as e:\n logging.info(e)\n logging.info('Device {} is not available for rendering'.format(i))\n FNULL.close()\n\n return available_devices", "def try_all_gpus(): #@save\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def _get_available_gpus():\r\n #global _LOCAL_DEVICES\r\n if tfback._LOCAL_DEVICES is None:\r\n devices = tf.config.list_logical_devices()\r\n tfback._LOCAL_DEVICES = [x.name for x in devices]\r\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n global _LOCAL_DEVICES\n if _LOCAL_DEVICES is None:\n if _is_tf_1():\n devices = get_session().list_devices()\n _LOCAL_DEVICES = [x.name for x in devices]\n else:\n _LOCAL_DEVICES = tf.config.experimental_list_devices()\n return [x for x in _LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tf_back._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tf_back._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tf_back._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def get_devices(needs: int = None):\n\n num_gpus = torch.cuda.device_count()\n\n if num_gpus == 0:\n devices = [torch.device(\"cpu\")]\n if needs is None:\n return devices\n return devices * needs\n\n devices = [torch.device(f\"cuda:{index:d}\") for index in range(num_gpus)]\n if needs is None:\n return devices\n return [device for _, device in zip(range(needs), itertools.cycle(devices))]", "def get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == \"GPU\"]", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def get_available_device():\n if torch.cuda.is_available():\n free_mem, device_idx = 0.0, 0\n for d in range(torch.cuda.device_count()):\n mem = torch.cuda.get_device_properties(d).total_memory - torch.cuda.memory_allocated(d)\n if mem > free_mem:\n device_idx = d\n free_mem = mem\n return torch.device(f'cuda:{device_idx}')\n else:\n return torch.device('cpu')", "def get_available_gpus() -> List[int]:\n orig_visible_devices = os.environ[f\"{CUDA_ENVVAR}\"]\n available_gpus = [int(g.strip()) for g in orig_visible_devices.split(\",\") if g and not g.isspace()]\n return available_gpus", "def _get_gpu_names() -> Sequence[str]:\n result = []\n for device in device_lib.list_local_devices():\n if device.device_type != \"GPU\":\n continue\n desc = device.physical_device_desc\n\n fields = desc.split(\",\")\n for field in fields:\n name, value = field.split(\":\", maxsplit=1)\n name = name.strip()\n value = value.strip()\n if name == \"name\":\n result.append(value)\n return result", "def _get_device_list(self):\n if self.app.config.cloud_type == 'ec2':\n # c5/m5 on AWS mounts EBS volumes as NVMe:\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html\n for itype in ['c5', 'm5']:\n if itype in self.app.cloud_interface.get_type():\n return frozenset(glob('/dev/nvme[0-26]n1'))\n return frozenset(glob('/dev/*d[a-z]'))", "def FindAllAvailableDevices(options):\n use_ssh = options.cros_remote and cros_interface.HasSSH()\n if not use_ssh and not IsRunningOnCrOS():\n logging.debug('No --remote specified, and not running on ChromeOs.')\n return []\n\n return [CrOSDevice(options.cros_remote, options.cros_remote_ssh_port,\n options.cros_ssh_identity, not use_ssh)]", "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def detect_gpus():\n def worker(q):\n # `device_lib` will not release the memory it took,\n # so we run it in a sub-process.\n try:\n from tensorflow.python.client import device_lib\n\n if is_tensorflow_version_higher_or_equal('1.8.0'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n devices = list(device_lib.list_local_devices(config))\n else:\n devices = list(device_lib.list_local_devices())\n gpus = [\n (device.name, device)\n for device in devices\n if device.device_type == 'GPU'\n ]\n union_set = {i: i for i in range(len(gpus))}\n\n for i, (name, device) in enumerate(gpus):\n assert (device.name == '/device:GPU:{}'.format(i))\n for link in device.locality.links.link:\n if link.device_id != i:\n union_set[i] = union_set[link.device_id]\n\n for i in six.iterkeys(union_set):\n while union_set[i] != union_set[union_set[i]]:\n union_set[i] = union_set[union_set[i]]\n\n root_devices = sorted(set(union_set.values()))\n gpu_groups = [[] for _ in range(len(root_devices))]\n dev_to_group = {j: i for i, j in enumerate(root_devices)}\n for i, (name, device) in enumerate(gpus):\n gpu_groups[dev_to_group[union_set[i]]].append(name)\n\n q.put((1, gpu_groups))\n except Exception:\n q.put((0, traceback.format_exc()))\n\n q = mp.Queue()\n p = mp.Process(target=worker, args=(q,))\n\n try:\n p.start()\n result = q.get()\n if result[0] == 1:\n return result[1]\n else:\n raise RuntimeError(\n 'Failed to retrieve GPU information, the traceback of '\n 'sub-process is:\\n {}'.\n format('\\n '.join(result[1].split('\\n')))\n )\n finally:\n p.terminate()\n p.join()", "def get_computation_devices(\n preferred_gpu_list: Optional[List[int]],\n multi_gpu_flag: bool,\n) -> List[Device]:\n\n # use CPU when GPUs are not preferred or not available\n if (preferred_gpu_list is None) \\\n or (len(preferred_gpu_list) == 0) \\\n or (not torch.cuda.is_available()):\n return [Device('cpu'), ]\n\n # else GPUs are preferred and available\n # get all available GPU indexes\n _available_gpu_list: List[int]\n if getAvailable:\n # by default, use GPU utility package with load and memory usage\n # specification so that the 'available' GPUs are actually ready\n # for deep learning runs (https://github.com/anderskm/gputil)\n _available_gpu_list = getAvailable(\n limit=_MAX_NUM_GPUS,\n maxLoad=_MAX_GPU_LOAD,\n maxMemory=_MAX_GPU_MEM_USED,\n )\n else:\n # assume all GPUs are good to use without GPUtil package\n _available_gpu_list = list(range(torch.cuda.device_count()))\n _warning_msg = \\\n f'GPUtil (https://github.com/anderskm/gputil) not installed.' \\\n f'Assuming all GPUs ({_available_gpu_list}) are available ' \\\n f'and ready for training ... '\n _LOGGER.warning(_warning_msg)\n\n # get the overlap between the preferred and the available GPUs\n _gpus = \\\n [_g for _g in _available_gpu_list if _g in preferred_gpu_list]\n\n # use CPU if there is no preferred GPUs that are available\n if len(_gpus) == 0:\n return [Device('cpu'), ]\n\n # otherwise return one or all GPUs depending on the multi-GPU flag\n return [Device(f'cuda:{_g}') for _g in _gpus] \\\n if multi_gpu_flag else [Device(f'cuda:{_gpus[0]}'), ]", "def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()", "def _prepare_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\n \"Warning: There\\'s no GPU available on this machine, training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(\n \"Warning: The number of GPU\\'s configured to use is {}, but only {} are available on this machine.\".format(\n n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def get_cl_devices():\n\n _devices = {'CPU':[], 'GPU':[]}\n\n platforms = cl.get_platforms()\n for platform in platforms:\n devices = platform.get_devices()\n for device in devices:\n if device.type == cl.device_type.CPU:\n _devices['CPU'].append(device)\n elif device.type == cl.device_type.GPU:\n _devices['GPU'].append(device)\n \n \n return _devices", "def _prepare_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_device(devices[0])\n else:\n devices = [torch.device('cpu')]", "def get_available_devices(self):\n available_devices = []\n try:\n out = self.get_output(\"devices\")\n except Exception as e:\n logger.error(e)\n else:\n for line in out:\n device = self.parse_device_info(line)\n if device:\n available_devices.append(device)\n return available_devices" ]
[ "0.79648924", "0.779924", "0.74314016", "0.742399", "0.72089636", "0.7203299", "0.7203299", "0.71994233", "0.71461236", "0.71363676", "0.7131888", "0.7081655", "0.69478077", "0.6909545", "0.6909545", "0.68895745", "0.6862883", "0.6847569", "0.6778976", "0.67201084", "0.6712924", "0.6632938", "0.6624643", "0.6619268", "0.66151696", "0.6586516", "0.6558381", "0.6549411", "0.6542082", "0.6529115" ]
0.8175212
0
Read the pickled spacy objects
def read_spacy_pickle(self, file_path): vocab = self.nlp.vocab try: file = open(file_path, "rb") # putting the spacy doc in a single-item list to avoid pandas splitting it up spacy_objects = [[Doc(vocab).from_bytes(x)] for x in pickle.load(file)] file.close() spacy_objects_dataset = pd.DataFrame(spacy_objects, columns=["spacy"]) return spacy_objects_dataset except FileNotFoundError: print('spaCy pickle file for {} does not exist. No spaCy objects will be included.'.format( self._dataset_name)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_dictionary():\n # model = 'en_core_web_sm'\n # model = 'en_core_web_md'\n # model = 'en_core_web_lg'\n model = 'en' # Using 'en' instead of 'en_core_web_md', as the latter has many words without vector data. Check!\n print(\"Starting to read the model:\", model)\n # nlp = spacy.cli.download(model) # Run this for the first time on a new server.\n nlp = spacy.load(model) # Use this for subsequent runs\n return nlp", "def load(self):\n self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word = pickle.load(open(self.save_file, 'rb'))", "def load_pickles(classes_mapping_path, classes_count_path, entities_path):\n classes_count = cPickle.load(open(classes_count_path, 'rb'))\n hierarchy_mapping = cPickle.load(open(classes_mapping_path, 'rb'))\n entities = cPickle.load(open(entities_path, 'rb'))\n return classes_count, hierarchy_mapping, entities", "def readobject(filename):\n # import cPickle as pickle\n with open(filename, 'rb') as input_file:\n return pickle.load(input_file)", "def load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)", "def deserialize(path):\n with open(path, 'rb') as f:\n temp = pickle.load(f)\n for q in temp.questions:\n q.on_deserialize()\n return temp", "def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))", "def loadPickle(filepath):\n\tf = open(filepath, 'rb')\n\tobj = pickle.load(f)\n\tf.close()\n\treturn obj", "def load_object(path):\r\n with open(path,\"rb\") as f:\r\n object = pickle.load(f) \r\n return object", "def _tokens_from_disk(self, filepath=None):\n assert filepath and isinstance(filepath, str)\n with open(filepath, 'rb') as f:\n self.tokens = pickle.load(f)\n return self.tokens", "def load_object(fpath):\r\n with open(fpath, 'rb') as i:\r\n return pickle.load(i)", "def load(self):\n data = None\n try:\n with open(self.__filepath, 'r') as file:\n text = file.read()\n data = jsonpickle.decode(text)\n except FileNotFoundError:\n data = None\n except IOError as e:\n print(e)\n return data", "def load_reader(path):\n if path[-4:] != '.pkl':\n path+='.pkl'\n with open(path,\"r+b\") as f:\n log(\"Loading reader from {}\".format(path))\n r = pickle.load(f)\n return r", "def load_obj(path: str):\n with open(path, 'rb') as h:\n return pickle.load(h)", "def loadStuff(path=None):\n\n if path == None:\n print(\"No path specified\")\n return\n\n try:\n pkl_file = open(path, 'rb')\n obj = cPickle.load(pkl_file)\n pkl_file.close()\n print('Data correctly loaded and returned')\n return obj\n\n except IOError as e:\n #print \"I/O error({0}):{1}\".format(e.errno, e.strerror)\n print('I/O error')\n except:\n print(\"Unexpected error\" % sys.exc_info()[0])\n raise", "def readAssembledObjects(self):\n # get the classifier to use, if any, from the Assembler\n ## this is used to cluster the ROM segments\n self._divisionClassifier = self._assembledObjects.get('Classifier', [[None]*4])[0][3]\n self._metricClassifiers = self._assembledObjects.get('Metric', None)", "def read_model(self):\n \n # words dictionary\n f = open(self.name + \"_words\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.words = d\n\n # word_lengths dictionary\n f = open(self.name + \"_word_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.word_lengths = d\n\n # stems dictionary\n f = open(self.name + \"_stems\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.stems = d\n\n # sentence_lengths dictionary\n f = open(self.name + \"_sentence_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.sentence_lengths = d\n\n # ten most common words\n f = open(self.name + \"_common_word\", 'r') \n d_str = f.read()\n f.close()\n \n d = list(eval(d_str))\n self.common_word = d", "def load_serialized(self):\n if not os.path.isfile(cfg.serialized):\n serialized = self.write_serialized()\n else:\n print(\"Serialized object exists. Reading from disk...\")\n with open(cfg.serialized, 'rb') as file:\n serialized = pickle.load(file)\n\n return serialized[0], serialized[1], serialized[2] # users, movies, ratings", "def load(self):", "def read_from_file(name):\n print 'reading structures from pickle'\n print '------------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'rb')\n new_obj = pickle.load(file)\n file.close()\n\n return new_obj", "def loads(data):\n return cPickle.loads(data)", "def getData():\n with open('obj/documents.pkl', 'rb') as file:\n data = pickle.load(file)\n return data", "def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()", "def load_data(self):", "def _read_data(self):", "def load_objects(self):\n \n # Load classifier\n with open('../twitterClass/classifier/classifier.p','r') as f:\n self.classifier = cPickle.load(f)\n \n #Load blocked keywords\n regex_str2 = []\n with open('../twitterClass/twitterMiningClass/private/blocked_keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n print key\n regex_str2.append(key[1])\n # create regex compiler for blocked keyword search\n regex_str2 = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str2)\n self.blocked_keywords_re = re.compile(r'('+'|'.join(regex_str2)+')',re.IGNORECASE)\n \n # Load keywords\n with open('../twitterClass/twitterMiningClass/private/keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n self.keywords[key[0]] = key[1]\n # create regex compiler for keyword search\n regex_str = []\n for keys,pattern in self.keywords.iteritems():\n regex_str.append(pattern)\n regex_str = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str)\n self.keywords_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)", "def load(self):\n \n with open(os.path.join(self.output_dir, 'terms.dict'), 'rb') as f:\n self.term_id_map = pkl.load(f)\n with open(os.path.join(self.output_dir, 'docs.dict'), 'rb') as f:\n self.doc_id_map = pkl.load(f)", "def loads(self):\n return self._loads", "def read_pickle_object_in_file(self):\n outobj = None\n if os.path.exists(self.pickle_file):\n with gzip.open(self.pickle_file, 'rb') as pkl_file:\n outobj = pickle.load(pkl_file)\n return outobj", "def load_similarities(self):\n if not os.path.isfile(cfg.similarities):\n return None\n else:\n print(\"Serialized object exists. Reading from disk...\")\n with open(cfg.similarities, 'rb') as file:\n data = pickle.load(file)\n\n return data" ]
[ "0.60844487", "0.60844284", "0.59511757", "0.59002733", "0.587763", "0.5824237", "0.5801722", "0.5801091", "0.57834816", "0.57699907", "0.5766996", "0.57638127", "0.57596767", "0.57547075", "0.5739896", "0.5739467", "0.57228065", "0.57223886", "0.57158464", "0.5714373", "0.5708382", "0.5706876", "0.56971115", "0.5693761", "0.5692432", "0.5688565", "0.56828135", "0.5677501", "0.56680816", "0.56597954" ]
0.67977774
0
Creates an Experiment with totaly artificial data. Experiment has one setup with two modalities, EMG and kin. EMG has four channels, KIN has three channels. Two sessions are "recorded" for two different subjects. All EMG recordings have sampling rate of 20Hz, all KIN recordings sampling rate of 5Hz.
def setup(cls): cls.logger = logging.getLogger('ModelTestLogger') cls.logger.setLevel(logging.DEBUG) s1 = model.Subject('subject1') s2 = model.Subject('subject2') cls.experiment = model.Experiment() cls.experiment.put_subject(s1) cls.experiment.put_subject(s2) setup1 = model.Setup(cls.experiment) modality1 = model.Modality(setup1, 20, 'emg') modality2 = model.Modality(setup1, 5, 'kin') model.Channel(modality1, 'brachoradialis') model.Channel(modality1, 'musculus sterno clavicularis') model.Channel(modality1, 'musculus rhombideus') model.Channel(modality1, 'musculus lattisimus') model.Channel(modality2, 'Pos-X') model.Channel(modality2, 'Pos-Y') model.Channel(modality2, 'Pos-Z') session1 = model.Session(cls.experiment, setup1, s1, 'session1') arr = np.column_stack(( np.tile( np.concatenate(( np.arange(0., 1., 0.1), np.arange(1., 0., -0.1) )), 10 ), np.tile( np.concatenate(( np.arange(10), np.arange(10, 0, -1) )), 10 ), np.tile( np.concatenate(( np.arange(0.0, 0.1, 0.01), np.arange(0.1, 0.0, -0.01) )), 10 ), np.tile( np.concatenate(( np.arange(0.5, 1.5, 0.1), np.arange(1.5, 0.5, -0.1) )), 10 ), )) recording1 = model.Recording(session1, modality1, data=arr, identifier='emg_recording1') arr2 = np.column_stack(( np.sum(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1), np.prod(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1), np.square(np.sum(np.mean(arr.reshape(-1, 4, 4), axis=1), axis=1)) )) recording2 = model.Recording(session1, modality2, data=arr2, identifier='kin_recording1') for i in range(5): model.Trial(recording1, i * 2, 2) model.Trial(recording2, i * 2, 2) session2 = model.Session(cls.experiment, setup1, s2, 'session2') arr = np.add(arr, np.random.randn(*arr.shape)) recording1 = model.Recording(session2, modality1, data=arr, identifier='emg_recording2') arr2 = np.column_stack(( np.sin(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1))), np.cos(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1))), np.tan(np.mean(np.sum(arr.reshape(-1, 4, 4), axis=1))) )) recording2 = model.Recording(session2, modality2, data=arr2, identifier='kin_recording2') for i in range(5): model.Trial(recording1, i * 2, 2) model.Trial(recording2, i * 2, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_exp2(args):\n\n # load data.\n SC = np.load(args.SC)\n sc_lbls = np.load(args.sc_lbls)\n c_lbls = np.load(args.c_lbls)\n b_lbls = np.load(args.b_lbls)\n\n # compute dimensions.\n n = args.n\n m = SC.shape[0]\n k = c_lbls.shape[0]\n l = SC.shape[1]\n t = args.t\n q = args.q\n e = args.e\n\n # build the S simulation object.\n if os.path.isfile(args.sim_obj) == False:\n sim = SimSingleCell(SC, sc_lbls)\n sim.save(args.sim_obj)\n else:\n sim = SimSingleCell(SC, sc_lbls, load=args.sim_obj)\n\n # loop over the number of experiments.\n Xs = list()\n Cs = list()\n Ss = list()\n Zs = list()\n ys = list()\n\n # create master S.\n S = _avg_S(sc_lbls, SC)\n\n # build list of house keepers.\n tmp = ['ACTB', 'GAPDH']\n hkeepers = list()\n for h in tmp:\n hkeepers += list(np.where(b_lbls == h)[0])\n hkeepers = sorted(hkeepers)\n\n # normalize S/\n Snorm = S.copy()\n for a in range(k):\n Snorm[:,a] = S[:,a] / gmean(S[hkeepers,a])\n\n # simulate single-cells.\n #TMP, we = sim.sample_1(t * 1000)\n\n # loop over each experiment.\n for gg in range(q):\n\n # simulate the single-cells.\n Z, y = sim.sample_1(t)\n\n # create S-timate\n H = _avg_S(y, Z)\n\n # simulate the concentrations.\n C = np.zeros((k, n), dtype=np.float)\n for j in range(n):\n if args.c_type == 1:\n\n # uniform.\n C[:,j] = 1.0 / float(k)\n\n elif args.c_type == 2:\n\n # arithmetic.\n x = [float(x) for x in range(1,k+1)]\n random.shuffle(x)\n x = np.array(x)\n x = x / np.sum(x)\n C[:,j] = x\n\n elif args.c_type == 3:\n\n # geometric.\n x = list(np.vander([k], k)[0])\n random.shuffle(x)\n x = [float(z) for z in x]\n x = np.array(x)\n x = x / np.sum(x)\n C[:,j] = x\n\n else:\n logging.error(\"unknown C type\")\n sys.exit(1)\n\n # compute mixtures directly from our model\n X2 = np.dot(H, C)\n\n '''\n # sample to compute mixtures.\n cheat_cnt = 0\n X = np.zeros((m, n), dtype=np.float)\n for j in range(n):\n\n # loop over each class.\n for l in range(k):\n\n # decide how many samples.\n scnt = int(50 * C[l,j])\n\n print scnt\n\n # choose appropriate subset.\n sub = Z[:, y == l]\n\n # choose randomly among these subsets.\n idxs = np.random.choice(sub.shape[1], size=scnt, replace=True)\n cheat_cnt += len(idxs)\n\n # sum these up gene by gene.\n for i in range(m):\n X[i, j] = np.sum(sub[i,idxs])\n\n\n\n print \"wago\"\n sys.exit()\n '''\n # add noise.\n N = _log_noise(X2.shape[0], X2.shape[1], e)\n X2 = X2 * N\n '''\n # normalize by geometric mean of housekeepers.\n for j in range(n):\n X2[:,j] = X2[:,j] / gmean(X2[hkeepers,j])\n\n for j in range(n):\n X[:,j] = X[:,j] / gmean(X[hkeepers,j])\n\n for b in range(l):\n Z[:,b] = Z[:,b] / gmean(Z[hkeepers,b])\n '''\n\n\n # save to list.\n Xs.append(X2)\n Cs.append(C)\n #Ss.append(Snorm)\n Ss.append(S)\n Zs.append(Z)\n ys.append(y)\n\n # save experiment.\n save_pickle(args.ref_file, {'Xs':Xs, 'Ss':Ss, 'Cs':Cs, 'Zs':Zs, 'ys':ys})\n save_pickle(args.test_file, {'Xs':Xs, 'Zs':Zs, 'ys':ys})\n\n # save simulated single-cells for plotting.\n Z, y = sim.sample_1(50)\n np.save('%s/Z.npy' % args.sim_dir, Z)\n np.save('%s/z_lbls.npy' % args.sim_dir, y)", "def newExperiment(self):\n experiment = Experiment()\n newtitle = 'Untitled ' + self.getNextUntitled()\n experimentFrame = SequenceFrame(self, experiment, True, newtitle)\n experiment.setInteractionParameters(parentFrame=experimentFrame,\n graphManagerClass=StandardGraphManager)\n self.frames.append(experimentFrame)\n self.names.append(newtitle)\n log.info('Created experiment ' + newtitle)\n experimentFrame.Show()\n testFrame = tf.TestingFrame(experimentFrame, experiment)\n testFrame.Show()\n self.Show(False)", "def exp_main(self, use_remote_data=True, test=False):\n test = str_to_bool(test)\n use_remote_data = str_to_bool(use_remote_data)\n root_uri = get_root_uri(use_remote_data)\n root_uri = os.path.join(root_uri, rv_output_dir)\n spacenet_config = VegasBuildings(use_remote_data)\n experiments = []\n runs = [0]\n\n noise_modes = [\n NoiseMode(NoiseMode.SHIFT, 0),\n NoiseMode(NoiseMode.SHIFT, 10),\n NoiseMode(NoiseMode.SHIFT, 20),\n NoiseMode(NoiseMode.SHIFT, 30),\n NoiseMode(NoiseMode.SHIFT, 40),\n NoiseMode(NoiseMode.SHIFT, 50),\n NoiseMode(NoiseMode.DROP, 0.0),\n NoiseMode(NoiseMode.DROP, 0.1),\n NoiseMode(NoiseMode.DROP, 0.2),\n NoiseMode(NoiseMode.DROP, 0.3),\n NoiseMode(NoiseMode.DROP, 0.4),\n NoiseMode(NoiseMode.DROP, 0.5)\n ]\n\n for nm in noise_modes:\n for run in runs:\n exp_id = get_exp_id(nm, run)\n task = build_task(spacenet_config.get_class_map())\n backend = build_fastai_backend(task, test)\n analyzer = rv.AnalyzerConfig.builder(rv.STATS_ANALYZER) \\\n .build()\n dataset = build_dataset(task, spacenet_config, test, nm)\n\n experiment = rv.ExperimentConfig.builder() \\\n .with_id(exp_id) \\\n .with_analyze_key('shift-0-0') \\\n .with_task(task) \\\n .with_backend(backend) \\\n .with_analyzer(analyzer) \\\n .with_dataset(dataset) \\\n .with_root_uri(root_uri) \\\n .build()\n experiments.append(experiment)\n\n return experiments", "def createSignalModelExponential(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n #Modeling these parameters this way is why wf needs to be normalized\n exp_rate = Uniform('exp_rate', lower=0, upper=.1)\n exp_scale = Uniform('exp_scale', lower=0, upper=.1)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n## @deterministic(plot=False, name=\"test2\")\n## def adjusted_scale(s=switchpoint, s1=exp_scale):\n## out = np.empty(len(data))\n## out[:s] = s1\n## out[s:] = s1\n## return out\n#\n# scale_param = adjusted_scale(switchpoint, exp_scale)\n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, r=exp_rate, scale=exp_scale):\n out = np.zeros(len(data))\n out[s:] = scale * ( np.exp(r * (timestamp[s:] - s)) - 1.)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()", "def setUp(self):\n TestExperiment.setUp(self)\n i = self.i\n self.ex = Experiment(\n sampler=self.sampler,\n range=[i, range(100, 2001, 100)],\n nreps=10,\n calls=[\n Signature(\n \"dgemm\",\n Trans(\"transA\"), Trans(\"transB\"),\n Dim(\"m\"), Dim(\"n\"), Dim(\"k\"),\n dScalar(),\n dData(\"A\", \"ldA * (k if transA == 'N' else m)\"),\n Ld(\"ldA\", \"m if transA == 'N' else k\"),\n dData(\"B\", \"ldB * (n if transB == 'N' else k)\"),\n Ld(\"ldB\", \"k if transB == 'N' else n\"),\n dScalar(\"beta\"),\n sData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"m\"),\n flops=\"2 * m * n * k\"\n )(\"N\", \"N\", i, i, i, 1, \"A\", i, \"B\", i, 1, \"C\", i)\n ]\n )", "def make_experiment(self,signal=None):\n # Maximum likelihood estimators for 's' parameters\n # under the observed data, ignoring correlations\n # between nuisance observations. Useful for seeding\n # certain fits.\n self.s_MLE = np.array(self.SR_n) - np.array(self.SR_b)\n\n # Nominal signal parameters, for testing. User should provide this from their model \n self.test_signal = {'s_{0}'.format(i): self.s_MLE[i] for i in range(self.N_SR)}\n\n if self.cov is not None:\n e = self.make_experiment_cov()\n else:\n e = self.make_experiment_nocov(signal)\n return e", "def simulate_test_data(self):\n # simulation config\n if self.instrument == 'IMA':\n sim_config = SimConfig.makeSim(name=\"IMA Simulation\", rel_obsdate=0.0, scene=\"scene.ini\", POP='IMA',\n ConfigPath='IMA_FULL', Dither=False,StartInd=1, NDither=4,\n DitherPat=\"ima_recommended_dither.dat\", filter=\"F1130W\",\n readDetect= 'FULL', ima_mode= 'FAST', ima_exposures=1,\n ima_integrations=1, ima_frames=50, disperser= 'SHORT', detector= 'SW',\n mrs_mode= 'SLOW', mrs_exposures=5,mrs_integrations=4,mrs_frames=10)\n\n # scene config\n background = Background(level='low', gradient=5., pa=15.0, centreFOV=(0., 0.))\n\n SED1 = BBSed(Temp=300., wref=10., flux=1.e8)\n Gal1 = Galaxy(Cen=(0., 0.), n=1., re=200, q=0.99, pa=0.1)\n Gal1.set_SED(SED1)\n targets = [Gal1]\n\n scene_config = SceneConfig.makeScene(loglevel=0, background=background, targets=targets)\n\n elif self.instrument == 'MRS':\n sim_config = SimConfig.makeSim(name=\"MRS Simulation\", rel_obsdate=0.0, scene=\"scene.ini\",\n POP='MRS', ConfigPath='MRS_1SHORT', Dither=False, StartInd=1,\n NDither=4, DitherPat=\"mrs_recommended_dither.dat\", filter=\"F1130W\",\n readDetect='FULL', ima_mode='FAST', ima_exposures=1,\n ima_integrations=1, ima_frames=20, disperser='SHORT', detector='SW',\n mrs_mode='FAST', mrs_exposures=1, mrs_integrations=1, mrs_frames=50)\n\n # scene config\n background = Background(level='low',gradient=5.,pa=15.0,centreFOV=(0., 0.))\n\n SED1 = BBSed(Temp=300., wref=10., flux=5.e6)\n Gal1 = Galaxy(Cen=(0.,0.), n=1., re=2, q=0.99, pa=0.1)\n Gal1.set_SED(SED1)\n targets = [Gal1]\n\n scene_config = SceneConfig.makeScene(loglevel=0, background=background, targets=targets)\n\n # simulator config\n if self.noise:\n simulator_config = SimulatorConfig.makeSimulator(max_fsm=0.050, max_dither=20.0, mrs_ref_channel=1,\n mrs_ref_band=\"SHORT\", tau_telescope=0.88,tau_eol=0.8,\n telescope_area=25.032, telescope_pupil_diam=6.6052,\n take_webbPsf=False, include_refpix=True,\n include_poisson=True, include_readnoise=True,\n include_badpix=True, include_dark=True,\n include_flat=False, include_gain=True,\n include_nonlinearity=self.linearity, include_drifts=True,\n include_latency=False, cosmic_ray_mode='NONE')\n else:\n simulator_config = SimulatorConfig.makeSimulator(max_fsm=0.050, max_dither=20.0, mrs_ref_channel=1,\n mrs_ref_band=\"SHORT\", tau_telescope=0.88, tau_eol=0.8,\n telescope_area=25.032, telescope_pupil_diam=6.6052,\n take_webbPsf=False, include_refpix=True,\n include_poisson=False, include_readnoise=False,\n include_badpix=True, include_dark=True,\n include_flat=False, include_gain=True,\n include_nonlinearity=self.linearity, include_drifts=True,\n include_latency=False, cosmic_ray_mode='NONE')\n\n\n # run the simulation\n simulation = MiriSimulation(sim_config=sim_config, scene_config=scene_config,\n simulator_config=simulator_config, loglevel='DEBUG')\n simulation.run()\n\n # we only need the sim file so move it to output_dir and remove everthing else\n det_image_file = glob.glob(os.path.join(simulation.path_out, 'det_images', '*.fits'))[0]\n self.ramp_file = os.path.join(self.output_dir, os.path.basename(det_image_file))\n shutil.move(det_image_file, self.ramp_file)\n shutil.rmtree(simulation.path_out)", "def record(self):\n\n # TODO: Make the Metadata transmission automatic\n n_channels = 32\n sampling_rate = 500\n channel_types = 'eeg'\n\n # Info class required by mne\n info = mne.create_info(ch_names=n_channels, sfreq=sampling_rate, ch_types=channel_types)\n\n # TODO: Dynamically reduce array size\n\n while self.flag_event.is_set():\n sample, timestamp = self.inlet.pull_sample()\n self.timeObj.append(timestamp)\n self.sampleObj.append(sample)\n self.data = np.array(self.sampleObj).reshape((n_channels, -1)) * 1e-6\n if (self.data.shape[1]+1) % sampling_rate == 0:\n custom_raw = mne.io.RawArray(self.data, info)\n custom_raw.save(\"./Data/sample_raw.fif\", overwrite=True)\n\n # TODO: Finish real time data plotting\n # print(self.data.shape)\n # if (self.data.shape[1]+1) % sampling_rate == 0:\n # # custom_raw = mne.io.RawArray(self.data, info)\n # # custom_raw.plot()\n # # plt.plot(self.timeObj, data.T * 1e-6)\n # # plt.pause(0.05)\n # # plt.show()\n # ani = animation.FuncAnimation(fig, self.animate, interval=10)\n # plt.pause(0.05)\n # plt.show()", "def experiment_init(self):\n raise NotImplementedError(\"this needs to be implemented!\")", "def setUp(self):\n self.m = m = random.randint(1, 100)\n self.n = n = random.randint(1, 100)\n self.sig = sig = Signature(\"name\", Dim(\"m\"), Dim(\"n\"),\n sData(\"A\", \"ldA * n\"), Ld(\"ldA\", \"m\"),\n dData(\"B\", \"ldB * m\"), Ld(\"ldB\", \"m\"),\n cData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"n\"))\n self.ex = ex = Experiment()\n ex.calls = [sig(m, n, \"X\", None, \"Y\", None, \"Z\", None)]\n ex.infer_lds()\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'cluttered_nist_ix1',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['val_augmentations'] = [\n [\n 'grayscale',\n 'center_crop',\n # 'left_right',\n # 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'zero_one'\n ]]\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 4\n exp['model_name'] = 'unet'\n exp['exp_name'] = exp['model_name'] + '_' + exp['dataset'][0]\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 200\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def emb_experiment():\n print(\"EMBEDDINGS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'emb_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'use_word_emb'\n changing_param_value = [0, 1]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)", "def create_experiment_if_needed(tr):\n exp = tr.getExperiment(EXPERIMENT_ID)\n if None == exp:\n create_project_if_needed(tr)\n exp = tr.createNewExperiment(EXPERIMENT_ID, 'DEFAULT_EXPERIMENT')\n \n return exp", "def eeg_simulate(duration=1, length=None, sampling_rate=1000, noise=0.1, random_state=None):\n # Try loading mne\n try:\n import mne\n import mne.datasets\n import mne.simulation\n\n except ImportError as e:\n raise ImportError(\n \"The 'mne' module is required for this function to run. \",\n \"Please install it first (`pip install mne`).\",\n ) from e\n\n # Seed the random generator for reproducible results\n rng = check_random_state(random_state)\n\n # Generate number of samples automatically if length is unspecified\n if length is None:\n length = duration * sampling_rate\n if duration is None:\n duration = length / sampling_rate\n\n # Get paths to data\n path = mne.datasets.sample.data_path() / \"MEG\" / \"sample\"\n raw_file = path / \"sample_audvis_raw.fif\"\n fwd_file = path / \"sample_audvis-meg-eeg-oct-6-fwd.fif\"\n\n # Load real data as the template\n raw = mne.io.read_raw_fif(raw_file, preload=True, verbose=False)\n raw = raw.set_eeg_reference(projection=True, verbose=False)\n\n n_dipoles = 4 # number of dipoles to create\n\n def data_fun(times, n_dipoles=4):\n \"\"\"Generate time-staggered sinusoids at harmonics of 10Hz\"\"\"\n n = 0 # harmonic number\n n_samp = len(times)\n window = np.zeros(n_samp)\n start, stop = [int(ii * float(n_samp) / (2 * n_dipoles)) for ii in (2 * n, 2 * n + 1)]\n window[start:stop] = 1.0\n n += 1\n data = 25e-9 * np.sin(2.0 * np.pi * 10.0 * n * times)\n data *= window\n return data\n\n times = raw.times[: int(raw.info[\"sfreq\"] * 2)]\n fwd = mne.read_forward_solution(fwd_file, verbose=False)\n stc = mne.simulation.simulate_sparse_stc(\n fwd[\"src\"],\n n_dipoles=n_dipoles,\n times=times,\n data_fun=data_fun,\n random_state=rng,\n )\n\n # Repeat the source activation multiple times.\n raw_sim = mne.simulation.simulate_raw(raw.info, [stc] * int(np.ceil(duration / 2)), forward=fwd, verbose=False)\n cov = mne.make_ad_hoc_cov(raw_sim.info, std=noise / 1000000)\n raw_sim = mne.simulation.add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], verbose=False, random_state=rng)\n\n # Resample\n raw_sim = raw_sim.resample(sampling_rate, verbose=False)\n\n # Add artifacts\n # mne.simulation.add_ecg(raw_sim, verbose=False)\n # mne.simulation.add_eog(raw_sim, verbose=False)\n\n eeg = raw_sim.pick_types(eeg=True, verbose=False).get_data()\n return eeg[0, 0 : int(duration * sampling_rate)]", "def make_dummy_dataset(PATTERN='T%task%/S%session%/sub%subject%_%acquisition%_%run%',\n DATASET = 'DUMMY',\n NSUBS = 2,\n NSESSIONS = 2,\n NTASKS = 2,\n NACQS = 2,\n NRUNS = 2,\n NCHANNELS = 2,\n SFREQ = 200,\n STOP = 10,\n NUMEVENTS = 10,\n PREFIXES = {'subject':'SU','session':'SE','task':'TA','acquisition':'AC','run':'RU'},\n ROOT=None):\n\n if ROOT is None:\n this_dir = os.path.dirname(__file__)\n data_dir = os.path.abspath(os.path.join(this_dir,'..','_data'))\n else:\n data_dir = ROOT\n os.makedirs(data_dir,exist_ok=True)\n\n\n\n sub_zeros = get_num_digits(NSUBS)\n subs = [ PREFIXES['subject']+ str(x).zfill(sub_zeros) for x in range(NSUBS)]\n\n task_zeros = get_num_digits(NTASKS)\n tasks = [ PREFIXES['task']+str(x).zfill(task_zeros) for x in range(NTASKS)]\n\n run_zeros = get_num_digits(NRUNS)\n runs = [str(x).zfill(run_zeros) for x in range(NRUNS)]\n\n ses_zeros = get_num_digits(NSESSIONS)\n sessions = [ PREFIXES['session']+str(x).zfill(ses_zeros) for x in range(NSESSIONS)]\n\n acq_zeros = get_num_digits(NACQS)\n acquisitions = [ PREFIXES['acquisition']+str(x).zfill(acq_zeros) for x in range(NACQS)]\n\n # Create some dummy metadata\n n_channels = NCHANNELS\n sampling_freq = SFREQ # in Hertz\n info = mne.create_info(n_channels, sfreq=sampling_freq)\n\n times = np.linspace(0, STOP, STOP*sampling_freq, endpoint=False)\n data = np.zeros((NCHANNELS,times.shape[0]))\n\n raw = mne.io.RawArray(data, info)\n raw.set_channel_types({x:'eeg' for x in raw.ch_names})\n new_events = mne.make_fixed_length_events(raw, duration=STOP//NUMEVENTS)\n\n for task in tasks:\n for session in sessions:\n for run in runs:\n for sub in subs:\n for acq in acquisitions:\n dummy = PATTERN.replace('%dataset%',DATASET)\n dummy = dummy.replace('%task%',task)\n dummy = dummy.replace('%session%',session)\n dummy = dummy.replace('%subject%',sub)\n dummy = dummy.replace('%run%',run)\n dummy = dummy.replace('%acquisition%',acq)\n path = [data_dir] +dummy.split('/')\n fpath = os.path.join(*path)\n _write_raw_brainvision(raw,fpath,new_events)", "def add_noise(emg):\n MAX_AMPLITUDE = 32767\n\n # Sampling\n # 1 second of data requires 600 frames. And 600 fps is 600 Hz, sampling rate of EMG.\n Ts = 1/EMG_F_SAMPLE\n\n # Time vector\n t = np.arange(0, len(emg)/EMG_F_SAMPLE, Ts) # each unit of t is a second\n\n # Noise\n randAmplitudeScale = np.random.random()*0.1\n randOffset = np.random.random() * 2*np.pi\n \n fNoise = 50; # Frequency [Hz]\n aNoise = randAmplitudeScale*MAX_AMPLITUDE # Amplitude\n noise = aNoise * np.sin(2 * np.pi * t * fNoise + randOffset)\n\n # Add noise to signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] += noise\n return emg", "def setUp(self):\n self._m = 100\n self._n = 30\n self._k = 5\n self._increment = 20\n self._A = get_data(ExperimentType.ExampleNo2)(self._m, np.arange(2 * self._k).astype(float))\n self._approximation = random_id(self._A, self._k, self._increment)\n self._B = self._approximation.B\n self._P = np.array(self._approximation.P)\n self._A = self._A.as_numpy_arr()\n self._n = self._A.shape[1]\n self._approximation = self._approximation.as_numpy_arr()", "def gen_simple_test():\n count = 1\n mdict = {\n 'operating_frequency': 3e8,\n 'sample_rate': 8e3,\n 'signal': [1] * 5,\n 'origin_pos': [1000, 0, 0],\n 'dest_pos': [300, 200, 50],\n 'origin_vel': [0] * 3,\n 'dest_vel': [0] * 3,\n }\n io.savemat('{}{}_input'.format(tests_path, count), mdict)", "def test_create_experiment_hit_no_config(self):\n with OrionState(experiments=[config]) as cfg:\n experiment = create_experiment(config[\"name\"], storage=cfg.storage_config)\n\n assert experiment.name == config[\"name\"]\n assert experiment.version == 1\n assert experiment.space.configuration == config[\"space\"]\n assert experiment.algorithm\n assert experiment.algorithm.configuration == config[\"algorithm\"]\n assert experiment.max_trials == config[\"max_trials\"]\n assert experiment.max_broken == config[\"max_broken\"]\n assert experiment.working_dir == config[\"working_dir\"]", "def main():\n ex = Experiment(SEED)\n ex.main()", "def collect_data(self, cue_size=10, sigma=0.1, test_factors=[2], \n cue_offset=9*pi/8, **kwargs):\n self.results['cue_size'] = cue_size = (pi/180) * cue_size\n self.results['cue_offset'] = cue_offset\n self.results['test_factors'] = test_factors = [0, 1] + test_factors\n self.results['sigma'] = sigma\n \n # Set up model parameters\n pdict = dict( N_outputs=500, \n N_theta=1000,\n C_W=0.05,\n N_cues_local=1, \n N_cues_distal=1, \n local_cue_std=cue_size,\n cue_offset=cue_offset,\n init_random=False,\n gamma_distal=0, \n num_trials=2*len(test_factors),\n monitoring=True )\n pdict.update(kwargs)\n \n # Create the simulation object and save the cue peak (gamma)\n self.out('Running training simulation...')\n model = VMONoiseModel(**pdict)\n if 'T' in kwargs:\n model.T = kwargs['T']\n cue_gamma = model.gamma_local\n \n # Simulate the phase noise test trials without, then with, cue\n for gamma in 0.0, cue_gamma:\n model.gamma_local = gamma\n for factor in test_factors:\n model.sigma = sigma * factor\n model.advance()\n \n # Compute responses and save session data\n self.out('Computing and saving session data files...')\n sessions = VMOSession.get_session_list(model)\n VMOSession.save_session_list(sessions, os.path.join(self.datadir, 'sessions'))\n \n # Save raw simulation data file and clean up\n model.post_mortem().tofile(os.path.join(self.datadir, 'data'))\n \n # Compute population and population lap matrices and save to data directory\n self.out('Computing and saving population responses...')\n clusts = np.arange(pdict['N_outputs'])\n R = [SD.get_population_matrix(clusters=clusts, inplace=True) for SD in sessions]\n R_laps = [SD.get_population_lap_matrix(clusters=clusts, inplace=True) for SD in sessions]\n np.save(os.path.join(self.datadir, 'R_session'), np.asarray(R))\n np.save(os.path.join(self.datadir, 'R_laps'), np.asarray(R_laps))\n \n # All done!\n self.out('Good bye!')", "def gen_ep_data(self,min_trial_len=2,max_trial_len=3,ntrials=2):\n # self.randomize_emat()\n tseq,xseq,yseq = self.gen_seqs_multitrial(min_trial_len,max_trial_len,ntrials)\n xseq_embed = self.embed_xseq(xseq)\n # np to torch\n tseq = tr.unsqueeze(tr.LongTensor(tseq),1)\n xseq_embed = tr.unsqueeze(tr.Tensor(xseq_embed),1)\n yseq = tr.unsqueeze(tr.LongTensor(yseq),1)\n return tseq,xseq_embed,yseq", "def experiment_init(self):\n pass", "def make_test_data(self):\r\n\r\n \r\n\r\n print (\"Creating Test Sample:\")\r\n\r\n print (' Period, rate, reps, phases: ', self.period, self.framerate, self.nrepetitions, self.nPhases)\r\n\r\n nframes = int(self.period * self.framerate * self.nrepetitions)\r\n\r\n print (' nframes: ', nframes)\r\n\r\n if self.bkgdNoise > 0.:\r\n\r\n d = np.random.normal(size=(nframes,self.imageSize[0],self.imageSize[1]),\r\n\r\n loc=self.bkgdIntensity, scale=self.bkgdNoise).astype('float32')\r\n\r\n else:\r\n\r\n d = self.bkgdIntensity*np.ones((nframes,self.imageSize[0],self.imageSize[1])).astype('float32')\r\n\r\n \r\n\r\n ds = d.shape\r\n\r\n print (' data shape: ', ds)\r\n\r\n dx = int(ds[2]/4)\r\n\r\n xc = int(ds[2]/2)\r\n\r\n xo = [xc-dx, xc+dx]\r\n\r\n ywidth = int(ds[2]/(self.nPhases+2))\r\n\r\n framedelay = 4\r\n\r\n\r\n\r\n if not self.mode:\r\n\r\n self.phasex = []\r\n\r\n self.phasey = []\r\n\r\n for i in range(0,self.nPhases):\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # each phase is assigned to a region\r\n\r\n self.resp = np.zeros((nframes,))\r\n\r\n self.resp = np.cos(\r\n\r\n np.linspace(0, 2.0*np.pi*nframes/(self.period*self.framerate), nframes-framedelay)+i*np.pi/8 - np.pi/2.0)\r\n\r\n self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n d[:, xo[0]:xo[1], dy:dy+ywidth ] += self.resp[:, np.newaxis, np.newaxis]\r\n\r\n self.phasey.append( (2+(dy+int(ds[2]/self.nPhases))/2))\r\n\r\n self.phasex.append((6+int(ds[1]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)\r\n\r\n else:\r\n\r\n self.nPhases = 4\r\n\r\n self.spotsize = 16\r\n\r\n nrpts = 20\r\n\r\n nsites = 4\r\n\r\n one_rep = int(self.period*self.framerate)\r\n\r\n isi = int(self.period*self.framerate/self.nPhases)\r\n\r\n print('period, isi: ', self.period, isi)\r\n\r\n r = np.arange(0, nrpts, 1.)\r\n\r\n alpha = 4.\r\n\r\n A = r/alpha *np.exp(-(r-alpha)/alpha) # scaled alpha function\r\n\r\n self.spot= self.gauss_spot(self.spotsize, 3.) # the 2d spot\r\n\r\n sigsize = np.random.normal(size=self.nPhases, loc=self.signal_size, scale=self.signal_size*2)\r\n\r\n sigsize = [np.abs(s) for s in sigsize] # restrict to positive amplitudes\r\n\r\n print ('sigsize: ', sigsize)\r\n\r\n for j in range(self.nrepetitions):\r\n\r\n for i in range(self.nPhases):\r\n\r\n self.resp = np.zeros((nrpts, self.spot.shape[0], self.spot.shape[1]))\r\n\r\n for k in range(nrpts):\r\n\r\n self.resp[k,:,:] += sigsize[i]*A[k] * self.spot # make response an alpha time course of gaussian spot\r\n\r\n start = j*one_rep + i*isi + framedelay\r\n\r\n stop = start + nrpts\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # location for phase\r\n\r\n #dy = dy + 2*z\r\n\r\n# print ('start, stop: ', start, stop)\r\n\r\n for z in range(nsites):\r\n\r\n #self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n xp = xo[0] + i*10 - 10*z\r\n\r\n yp = dy - i*10 + 10*z\r\n\r\n d[start:stop, xp:xp+self.spotsize, yp:yp+self.spotsize ] += self.resp\r\n\r\n self.imageData = d # reduce to a 16-bit map to match camera data type\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.times = np.arange(0, nframes/self.framerate, 1.0/self.framerate)\r\n\r\n print( \" Test Image Created\")\r\n\r\n # imv = pg.ImageView()\r\n\r\n # imv.show()\r\n\r\n # imv.setImage(self.imageData)\r\n\r\n\r\n\r\n if self.layout is not None:\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 0, 1)\r\n\r\n self.adjust_image_data()\r\n\r\n self.avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n print (' Test file, original Image Info: ')\r\n\r\n self.print_image_info()\r\n\r\n self.rebin_image()\r\n\r\n #self.clean_windowerrors()\r\n\r\n # pg.image(self.imageData)\r\n\r\n # pg.show()\r\n\r\n # mpl.figure(1)\r\n\r\n # mpl.show()\r\n\r\n if not self.mode: # FFT analysis\r\n\r\n self.analysis_fourier_map(target=1, mode=0)\r\n\r\n self.plot_maps(mode=2, gfilter=self.gfilter)\r\n\r\n else:\r\n\r\n self.analysis_dFF_map()\r\n\r\n mpl.show()", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'curv_contour_length_14',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'left_right',\n 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'resize',\n # 'per_image_standardization',\n 'zero_one'\n ]]\n exp['val_augmentations'] = exp['data_augmentations']\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 16\n exp['exp_name'] = 'hgru_bn_pathfinder_14'\n exp['model_name'] = 'hgru'\n # exp['clip_gradients'] = 7.\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 50\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def start_experiment():\r\n check_parameters()\r\n try:\r\n EXP.start()\r\n except InputError as inst:\r\n tkMessageBox.showinfo(inst.expr, inst.msg)", "def __init__(self, time_grid=None, space_grid=None,\n sensors=None,\n loc_onramp=None, loc_offramp=None,\n vm_cells=None, beta_cells=None, rhoc_cells=None, wc_cells=None,\n num_ensembles=0,\n std_model_noise=None, queue_threshold=17.88,\n init_rho=0, init_qin=0.5, init_qout=0.0):\n\n self.__debug = False\n self.__debug_entrance_sensor = 'IDEALLoc100m'\n self.__debug_exit_sensor = 'IDEALLoc8300m'\n\n # initialize the superclass Estimator\n Estimator.__init__(self, time_grid, space_grid,\n loc_onramp, loc_offramp,\n sensors,\n queue_threshold)\n\n # build the index for the system state\n self.x_index, dim_state = self.__build_state_index()\n\n # initialize the super class\n EnKF.__init__(self, dim_state, num_ensembles)\n\n # y_index, and dim_obs, which will be dynamically updated upon arrival of each new data\n self.y_index = None\n self.dim_obs = None\n\n # keep track of the flow between cells for each ensemble which will be used to construct the observation\n self.__f_flow = {}\n self.__f_flow['time'] = np.array(self.time_grid[1:])\n self.__f_flow['data'] = OrderedDict()\n for i in range(0, self.num_ensembles):\n self.__f_flow['data'][i] = []\n\n # keep track of the speed between cells for each ensemble which will be used to construct the observation\n self.__f_speed = {}\n self.__f_speed['time'] = np.array(self.time_grid[1:])\n self.__f_speed['data'] = OrderedDict()\n for i in range(0, self.num_ensembles):\n self.__f_speed['data'][i] = []\n\n # save all the estimated states here\n self.est_state_all = np.matrix(np.zeros((self.dim_state, self.num_steps), float))\n\n # =================================================\n # Add additive noise to state.\n self.Q = OrderedDict()\n # initialize with all cell var\n self.Q = np.diag(np.ones(dim_state) * (std_model_noise['cell'] ** 2))\n\n # print('onramps:{0}; offramps:{1}'.format(self.cell_onramp, self.cell_offramp))\n # add onramp larger noise\n if self.cell_onramp is not None:\n for on_cell in self.cell_onramp:\n if 0 <= on_cell <= self.num_cells:\n idx = self.x_index['density'][on_cell]\n self.Q[idx, idx] = std_model_noise['oncell'] ** 2\n # add offramp larger noise\n if self.cell_offramp is not None:\n for off_cell in self.cell_offramp:\n if 0 <= off_cell <= self.num_cells:\n idx = self.x_index['density'][off_cell]\n self.Q[idx, idx] = std_model_noise['offcell'] ** 2\n # add qin variance\n idx = self.x_index['qin']\n self.Q[idx, idx] = std_model_noise['qin'] ** 2\n # add qout variance\n idx = self.x_index['qout']\n self.Q[idx, idx] = std_model_noise['qout'] ** 2\n\n # self.Q = std_model_noise\n # if np.size( self.Q['vm'] ) == 1:\n # # if it was a single value, then it was specified as std, not var (which = std^2)\n # self.Q['vm'] = np.diag( np.ones( self.num_cells )*(self.Q['vm']**2) )\n # if np.size( self.Q['beta'] ) == 1:\n # self.Q['beta'] = np.diag( np.ones( self.num_cells )*(self.Q['beta']**2) )\n # if np.size( self.Q['rhoc'] ) == 1:\n # self.Q['rhoc'] = np.diag( np.ones( self.num_cells )*(self.Q['rhoc']**2) )\n # if np.size( self.Q['wc'] ) == 1:\n # self.Q['wc'] = np.diag( np.ones( self.num_cells )*(self.Q['wc']**2) )\n #\n # if self.loc_onramp is not None and np.size(self.Q['onramp']) == 1:\n # self.Q['onramp'] = np.diag( np.ones(len(loc_onramp))*(self.Q['onramp']**2) )\n # if self.loc_offramp is not None and np.size(self.Q['offramp']) == 1:\n # self.Q['offramp'] = np.diag( np.ones(len(loc_offramp))*(self.Q['offramp']**2) )\n\n\n # =================================================\n # save the fundamental diagram for each cell\n # vm parameter\n if isinstance(vm_cells, numbers.Number):\n self.vm_cells = np.ones((self.num_cells, 1)) * float(vm_cells)\n else:\n self.vm_cells = np.array(vm_cells).astype(float)\n self.vm_cells = self.vm_cells.reshape((self.num_cells, 1))\n\n # beta parameter\n if isinstance(beta_cells, numbers.Number):\n self.beta_cells = np.ones((self.num_cells, 1)) * float(beta_cells)\n else:\n self.beta_cells = np.array(beta_cells).astype(float)\n self.beta_cells = self.beta_cells.reshape((self.num_cells, 1))\n\n # rhoc parameter\n if isinstance(rhoc_cells, numbers.Number):\n self.rhoc_cells = np.ones((self.num_cells, 1)) * float(rhoc_cells)\n else:\n self.rhoc_cells = np.array(rhoc_cells).astype(float)\n self.rhoc_cells = self.rhoc_cells.reshape((self.num_cells, 1))\n\n # wc parameter\n if isinstance(wc_cells, numbers.Number):\n self.wc_cells = np.ones((self.num_cells, 1)) * float(wc_cells)\n else:\n self.wc_cells = np.array(wc_cells).astype(float)\n self.wc_cells = self.wc_cells.reshape((self.num_cells, 1))\n\n # other use ful parameters\n self.qmax_cells = self.vm_cells * self.rhoc_cells - \\\n self.vm_cells * (self.rhoc_cells ** 2) / self.beta_cells\n\n self.rhomax_cells = - self.qmax_cells / self.wc_cells + self.rhoc_cells\n\n # =======================================================================\n self.init_rho = init_rho\n self.init_qin = init_qin\n self.init_qout = init_qout\n\n # =======================================================================\n # FOR DEBUGGING\n # recored the forecast and analysis value for qin and qout\n if self.__debug:\n self.qin_f = []\n self.qin_a = []\n self.qin_obs = []\n self.qout_f = []\n self.qout_a = []\n self.qout_obs = []", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def gen_ep_data(self,num_trials=1,trial_len=20,pm_probe_positions=None):\n # insert extra positive trials than expected by chance\n pos_og_bias=np.random.randint(1,100,1)\n # initialize returnables\n ep_len = num_trials*(trial_len+self.nmaps)\n inst_seq = -np.ones([ep_len])\n stim_seq = -np.ones([ep_len,self.sdim])\n action_seq = -np.ones([ep_len])\n\n # loop over trails\n for trial in range(num_trials):\n ## randomize emats\n self.shuffle_pms()\n # generate trial idx_seq\n inst_stim_seq_int,inst_action_seq_int = self.gen_trial_inst_phase()\n resp_stim_seq_int,resp_action_seq_int = self.gen_trial_resp_phase(\n trial_len,pos_og_bias,pm_probe_positions)\n # embed stim idx_seq\n inst_stim_seq = self.emat[inst_stim_seq_int]\n resp_stim_seq = self.emat[resp_stim_seq_int]\n # collect\n t0 = trial*(trial_len+self.nmaps)\n t1 = t0+trial_len+self.nmaps\n inst_seq[t0:t1] = np.concatenate([inst_stim_seq_int,np.zeros(trial_len)],axis=0)\n stim_seq[t0:t1] = np.concatenate([inst_stim_seq,resp_stim_seq],axis=0)\n action_seq[t0:t1] = np.concatenate([inst_action_seq_int,resp_action_seq_int],axis=0)\n inst_seq = tr.LongTensor(inst_seq).unsqueeze(1) # batch dim\n stim_seq = tr.Tensor(stim_seq).unsqueeze(1) \n action_seq = tr.LongTensor(action_seq).unsqueeze(1) \n return inst_seq,stim_seq,action_seq" ]
[ "0.62235945", "0.6049082", "0.6045395", "0.5995295", "0.5993515", "0.5865029", "0.5863797", "0.579151", "0.5732537", "0.57056975", "0.56597024", "0.56380254", "0.56178087", "0.56018513", "0.55858195", "0.5584628", "0.5583475", "0.55769795", "0.5555405", "0.55462915", "0.55410975", "0.554037", "0.5529047", "0.55285174", "0.5523626", "0.5494804", "0.54930544", "0.54929054", "0.5489978", "0.54839057" ]
0.7082174
0
provides a number to sort related in ascending length of description
def sorter(r): ans = len(r.description) if additional: for rr in r.monkey_additional: ans += len(rr) return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort():\n return -1", "def Order(self) -> int:", "def _natural_sort_worksheet(x):\n l = re.findall(r\"\\d+$\", x.title)\n if l:\n return int(l[0])\n\n return -1", "def _wiki_sort_key(doc):\n url = doc['url']\n return 1 if url.startswith('https://en.wikipedia') else -1", "def bigSorting(unsorted):\n lookup = defaultdict(lambda: [])\n print(lookup)\n for num_string in unsorted:\n lookup[len(num_string)].append(num_string)\n\n results = []\n lengths = list(lookup.keys())\n lengths.sort()\n for length in lengths:\n x = lookup[length]\n x.sort()\n results = results + x\n print(results)\n return results", "def sort_key(self):\n ...", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def sortkey(item):\n chrom, pos, ref, alt = item[0]\n if chrom.startswith('chr'):\n chrom = chrom[3:]\n if chrom.isdigit():\n chrom = int(chrom)\n return (chrom, pos, len(ref), len(alt))", "def _status_sort(self):\n s = 0\n if self.status == 'failed':\n s += 10\n if self.image_status == 'diff':\n s += 3\n elif self.image_status == 'missing':\n s += 4\n if self.hash_status == 'diff':\n s += 1\n elif self.hash_status == 'missing':\n s += 5\n return f\"{s:02.0f}\"", "def order(self):\n return self.n", "def sortby(self):\n ...", "def get_sort_field(self, kind, order, is_number):\n pass", "def sortValue(self, data):\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''", "def compare(self) -> int:", "def sortValue(self, data):\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0", "def sort_format(src):\n src_list= src.split('-')\n res_list= []\n for elm in src_list:\n try:\n res_list.append('%05d' % int(elm))\n except:\n res_list.append(elm)\n res= '-'.join(res_list)\n return res", "def get_longest_digit_count_desc(lst):\n rezultat = []\n for i in lst:\n if is_desc(i):\n rezultat.append(i)\n return rezultat", "def sort_and_digitize(line):\n return int(''.join(sorted(str(line))))", "def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)", "def local_h( author, data ) :\n papers = papers_and_cites( author, data )\n papers.sort( lambda a,b : cmp(int(b[1]), int(a[1])) )\n i = 0\n while i < (len(papers)-1) and i < int(papers[i][1] ) :\n i = i + 1\n\n return (i+1, int(papers[i][1]) )", "def get_sort_query(self, kind, order, is_number):\n pass", "def get_word_len(self, length):\n\n sortedList = self.sortedList\n\n step = rd.randint(0, length) * round(time.time())\n print(step)\n\n n = rd.randint(0, len(sortedList) - 1)\n print(n)\n while len(sortedList[n]) != length:\n if len(sortedList[n]) > length:\n n += step\n else:\n n -= step\n if n < 0 or n > len(sortedList):\n n = rd.randint(0, len(sortedList) - 1)\n\n return sortedList[n]", "def sort_1(l):\n pass", "def humanSort(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )", "def test_calc_sort_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object._calc_sort_value(sort_base_length=3,\n increment=1,\n sort_prefix_parts=[test_object.datum_group.sort]\n )\n expected = 10101\n self.assertEqual(expected, actual)", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if utils.is_int(end):\n return (start, int(end))\n return name", "def _get_field_sort_key(self, field):\n if not field.is_relation:\n return -1\n return 0 if field.many_to_many else 1", "def countingSort(my_list, target):\r\n count = [0 for i in range(26)]\r\n position = [0 for i in range(26)]\r\n output = [0 for i in range(len(my_list))]\r\n if len(my_list) > 1:\r\n for i in range(len(my_list)):\r\n count[ord(my_list[i][0][target]) - 97] += 1\r\n position[0] = 0\r\n for i in range(1, len(position)):\r\n position[i] = position[i - 1] + count[i - 1]\r\n for i in range(len(my_list)):\r\n key = my_list[i][0]\r\n pos=0\r\n index = ord(my_list[i][0][target]) - 97\r\n if count[index] != 0:\r\n pos = position[index]\r\n position[index] += 1\r\n output[pos] = my_list[i]\r\n\r\n return output", "def cmp_numcite( a, b ) :\n return cmp( int(b['Z9']), int(a['Z9']) )", "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items" ]
[ "0.6401724", "0.62728316", "0.60654205", "0.60012025", "0.59754735", "0.59544206", "0.59370434", "0.59157306", "0.58810264", "0.58680815", "0.58301955", "0.5825593", "0.57766974", "0.5709983", "0.5653453", "0.560097", "0.5593121", "0.555219", "0.5539747", "0.5538889", "0.5534576", "0.55307794", "0.5512389", "0.5501747", "0.5498305", "0.5471895", "0.5465556", "0.54616284", "0.5441643", "0.54373246" ]
0.6969995
0
Initialise with a MIP document
def __init__(self, document): self._settemplates(self.onecol, self.twocol) assert document.type_key == 'cim.2.designing.Project' self.doc = document # We will populate the "mip" variable with the mip era self.mips = 'CMIP6' self.related = [] for r in self.doc.required_experiments: self.related.append(esd.retrieve(r.id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, doc):\n\n self.doc = doc\n if self.doc.doi:\n self._populate()\n self.populated = True\n else:\n self.populated = False", "def __init__(self, document):\n self._settemplates(self.onecol, self.twocol)\n assert document.type_key == 'cim.2.designing.NumericalExperiment'\n self.doc = document\n\n # In most cases there is only one related mip.\n # Handle the edge case here rather than in template.\n mips = ''\n for m in document.related_mips:\n mips += m.name+', '\n self.mips = mips[:-2]\n\n self.related = []\n for r in self.doc.requirements:\n req = esd.retrieve(r.id)\n # Are there additional requirements?\n # If so, let's monkey patch their long names onto the linkage...\n # The other choice would be to have a requirements class WITH\n # it's own template.\n req.monkey_additional = []\n if req.additional_requirements:\n for rr in req.additional_requirements:\n rreq = esd.retrieve(rr.id)\n req.monkey_additional.append(rreq.long_name)\n self.related.append(req)", "def create(init_document: 'Document') -> 'DocumentArray':", "def __init__(self, doc_set):\n self.__doc_set = doc_set\n self.__preprocessor = InputPreprocessor(doc_set)", "def __init__(self):\n super(BlipDocument, self).__init__()\n self.annotations = Annotations()\n self.rotation = 0", "def __init__(self, blip_data, context):\n super(OpBasedDocument, self).__init__(blip_data)\n self.__context = context", "def __init__(self, doc, target=None, stylesheet=\"\"):\n self.document = doc\n self.stylesheet = stylesheet\n self.target = target if target is not None else StringIO()", "def __init__(self, *args):\n this = _libsbml.new_SBMLDocument(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _libsbml.new_CompSBMLDocumentPlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def Init(self, *args):\n return _XCAFDoc.XCAFDoc_DocumentTool_Init(self, *args)", "def __init__(self, txt='', unicodeEncoding='utf-8'):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup, self).__init__(__txt=None,\n __rawtxt=txt,\n __scope=None,\n __SCOPEUPDATED=False)\n self.__document = nx.DiGraph()\n self.__document.add_node(\"top\", category=\"document\")\n self.__VERBOSE = False\n self.__tagID = 0\n self.__unicodeEncoding = unicodeEncoding", "def __init__(self, template=site_template):\n self.template = template\n self.template_file = None\n if not os.path.exists(self.template):\n raise OpagMissingPrecondition, \"%s does not exist\" % self.template", "def __init__(self, xdoc):\n \n self.xdoc = parse(xdoc)\n self.mapa = mapa.Mapa(self.xdoc)\n self.scrollx = 0\n self.scrolly = 0\n self.eventos = self.load_events()", "def initialise(self):", "def __init__(self,inp='INP.mcnp'):\n # Material dictionary for the moderator, light guide, and detector\n self.material = {'Moderator':None,'Detector':None,'LightGuide':None}\n self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector\n self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA\n self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE\n \n # Cell and Surface Inital Numbering\n self.CellStartNum = 600\n self.SurfaceStartNum = 600\n self.ZeroSurfaceNum = 500\n self.UniverseNum = 200\n self.surfGeo = None\n self.inp = inp\n self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'\n self.setMaterial(0.1,'PS')", "def __init__(self, docx, strict=False):\n self.strict = strict\n document_part = Package.open(docx).main_document_part\n if document_part.content_type != CONTENT_TYPE.WML_DOCUMENT_MAIN:\n tmpl = \"file '%s' is not a Word file, content type is '%s'\"\n raise ValueError(tmpl % (docx, document_part.content_type))\n super().__init__(document_part._element, document_part)", "def __init__(self, *args):\n this = _libsbml.new_SBMLDocumentPlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def init(self) -> None:", "def __init__(self, infile, runparser=True):\n super(FluoViewMosaic, self).__init__(infile)\n self.tree = self.validate_xml()\n self.mosaictrees = self.find_mosaictrees()\n if runparser:\n self.add_mosaics()", "def __init__(self, ):\n super(MayaRefobjInterface, self).__init__()", "def init(self):\n logger.info(mm_cnofs.ackn_str)\n self.acknowledgements = mm_cnofs.ackn_str\n self.references = '\\n'.join((mm_cnofs.refs['mission'],\n mm_cnofs.refs['vefi']))\n\n return", "def __init__(self, doc, artDirection, path=None, mdText=None, startPage=1,\n name=None, **kwargs):\n self.doc = doc # Reuired Document instance. Assummed to be initialized with size and styles.\n self.artDirection = artDirection\n self.path = path # Optional path to markdown file, if it has content.\n self.mdText = mdText # Optional markdown string.\n self.name = name or 'Untitled'\n self.startPage = startPage", "def __init__(self):\n # Initialise class attributes (visibility ease)\n self.__corpus__ = None\n self.__pron_det_pos_words__ = None\n self.__triples_corpus__ = None\n self.__entities_in_doc__ = None\n self.__wvmodel__ = None\n \n # For purpose of parsing relation triplets later\n # Load pretrained embedding model\n #plog('Loading pretrained word embeddings. This will take some time to load...')\n #self.__wvmodel__ = api.load('fasttext-wiki-news-subwords-300')\n #plog('Pretrained word embeddings loaded!')", "def __init__(self, model_pool):\n super(Template, self).__init__(model_pool)\n self.nlp_model = None", "def init_doc(self):\n raise NotImplementedError()", "def __init__(self, sourcefile, title=\"\", genre=\"classic\"):\n\n if self.__class__.__name__ != \"SentencePPT\":\n self._assert_class_variables()\n self._template = self.__class__._templates[genre]\n self._prs = Presentation(self._template)\n self._title = title\n self._sourcefile = sourcefile\n self._assert_content()\n else:\n raise TypeError(self.__class__.__doc__)", "def __init__(self, morphit):\r\n self.MorphItFileName = morphit\r\n\r\n self.verbTags = ['VER', 'AUX', 'MOD', 'CAU', 'ASP']\r\n self.words = self.LoadWords()\r\n if not self.words:\r\n self.ReadMorphit()\r\n self.SaveWords()", "def __init__(self, resource_path=None):\n\n (\n self.tokenized_document,\n self.stack,\n self.source_provider,\n self.__parse_properties,\n ) = (None, None, None, None)\n\n if not resource_path:\n resource_path = os.path.join(os.path.split(__file__)[0], \"resources\")\n InlineHelper.initialize(resource_path)", "def __init__(self, reporoot, relnotesdir=None):\n self.reporoot = reporoot\n if relnotesdir is None:\n relnotesdir = defaults.RELEASE_NOTES_SUBDIR\n self.relnotesdir = relnotesdir\n # Initialize attributes from the defaults.\n self.override(**{o.name: o.default for o in _OPTIONS})\n\n self._contents = {}\n self._load_file()", "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()\n \"\"\"\n API Documentation for CMU dictionary corpus\n http://www.nltk.org/api/nltk.corpus.reader.html#module-nltk.corpus.reader.cmudict\n \"\"\"" ]
[ "0.6678455", "0.6528227", "0.61595803", "0.6156851", "0.6055568", "0.5943074", "0.5899902", "0.58804005", "0.5799892", "0.5787662", "0.5785426", "0.57476616", "0.5745662", "0.5730278", "0.5711061", "0.57024944", "0.57002866", "0.56933963", "0.56916255", "0.5650309", "0.5642371", "0.56372166", "0.563107", "0.5619945", "0.5611441", "0.56067646", "0.56033146", "0.559048", "0.55903476", "0.5570163" ]
0.7006556
0
make sure the html output works
def testHTML(self): html = self.E.html()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_html_output(self):\n pass", "def test_error_html_using_patch(self):\n pass", "def test_prep_fields_called_html_output(self):\n pass", "def get_html(self):\r\n pass", "def rawHTMLrendered(self):", "def __html__(self):\n return self.html", "def test_error_html_using_put(self):\n pass", "def test_error_html_using_get(self):\n pass", "def test_error_html_using_post(self):\n pass", "def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n self.assertIn(\n b'<h1>42 Coffee Cups Test Assignment</h1>',\n response.content)", "def test_error_html_using_head(self):\n pass", "def get_html(self):\r\n return u'This is supposed to be test html.'", "def assert_studio_view_valid_html(block, html):\r\n pass", "def test_html_structure(self):\n self.assertContains(self.response, '<form', 1)\n self.assertContains(self.response, '<input', 3)\n #3 pois são 2 filefield mais o csrf\n self.assertContains(self.response, 'type=\"file\"', 1)\n self.assertContains(self.response, 'type=\"submit\"', 1)", "def test_error_html_using_options(self):\n pass", "def convert_html():\n return", "def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output", "def test_home_page_returns_correct_html(self):\n\n request = HttpRequest()\n response = home_view(request)\n html = response.content.decode('utf8')\n self.assertTrue(html.startswith('<!doctype html>'))\n self.assertIn('<title>home</title>', html)\n self.assertTrue(html.endswith('</html>'))", "def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)", "def assert_student_view_valid_html(block, html):\r\n pass", "def test_gethtml_multiple(self):\r\n mock_module = VerticalWithModulesFactory.create()\r\n out_html = mock_module.render('student_view').content\r\n self.assertTrue('Test numerical problem.' in out_html)\r\n self.assertTrue('Another test numerical problem.' in out_html)", "def test_get_root_html(self):\n pass", "def has_html_gui(self):\r\n return True", "def test_lessthan(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)", "def test_get_root_html2(self):\n pass", "def test_gettesttools_html(self):\n pass", "def plain(self):\n return not self.html", "def assert_studio_view_invalid_html(block, html):\r\n assert False, \"studio_view should produce valid html\"", "def test_html_repr():\n repr_html = grid._repr_html_()\n assert repr_html is not None", "def initFormat(self):\n self.html = True" ]
[ "0.84040964", "0.69680446", "0.6919716", "0.687463", "0.6762656", "0.67169726", "0.66983867", "0.65646785", "0.6555906", "0.6548506", "0.6533556", "0.65294725", "0.6498341", "0.6488538", "0.6470183", "0.64627945", "0.6459197", "0.64506996", "0.64353263", "0.64152896", "0.6384148", "0.6376362", "0.6356995", "0.6349154", "0.6324273", "0.63223004", "0.62979436", "0.62601465", "0.6223759", "0.62206346" ]
0.7505332
1
For example purposes, we do not remove the outputs, which is why this is NOtearDown. If you really want to use this for unit tests, rename to tearDown.
def NOtearDown(self): for f in self.testoutput: if os.path.exists(f): os.remove(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tearDown(self):\n print('Calling \\'tearDown\\'')", "def tearDown(self):\n self.logger.info(\"tearDown begin\")\n self.logger.info(\"tearDown end\\n\")", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def tearDown(self) -> None:\n pass", "def tearDown(self) -> None:\n pass", "def tearDown(self) -> None:\n pass", "def tearDown(self):\n if not self.test_manager.leave_output:\n shutil.rmtree(self.directory)", "def tearDown(self):\n\t\tprint(\"end test\")\n\t\tpass", "def XXtearDown(self):\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")", "def XXtearDown(self):\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")", "def tearDown(self):\n pass #because we dont have anything to tearDown.", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def tearDown(self):\n print('tearDown method\\n')", "def tearDown(self):\n \n return", "def tearDown(self):\n\t\tpass", "def tearDown(self):\r\n testing.tearDown()", "def tearDown(self):\n print 'unittest.tearDown()'\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass" ]
[ "0.80834854", "0.7997705", "0.7978577", "0.7940476", "0.7940476", "0.7940476", "0.78952134", "0.78137136", "0.77655464", "0.77655464", "0.77423745", "0.7723258", "0.7723258", "0.7723258", "0.7717584", "0.7717088", "0.77133566", "0.7675167", "0.76713866", "0.76457465", "0.76457465", "0.76457465", "0.76457465", "0.76457465", "0.76457465", "0.76457465", "0.76457465", "0.76457465", "0.76457465", "0.76457465" ]
0.80674917
1
Generate bootstrap replicate of 1D data.
def bootstrap_replicate_1d(data, func): bs_sample = np.random.choice(data, len(data)) return func(bs_sample)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n return func(bs_sample)", "def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n\n return func(bs_sample)", "def bootstrap(X):\n return X[np.random.choice(list(range(X.shape[0])), size=X.shape[0]), :]", "def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]", "def bootstrap_sample(data):\n return [random.choice(data) for _ in data]", "def bootstrap(data):\r\n size = int(len(data))\r\n train = resample(data, n_samples=size, replace=True)\r\n test = data.drop(train.index) \r\n return train[encoded_features], train[target], test[encoded_features], test[target]", "def bootstrap(data,func,nboot):\n\n n = len(data)\n resamples = np.array([[random.choice(data) for i in range(n)]\n for j in range(nboot)])\n return np.apply_along_axis(func, 1, resamples)", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def _get_bootstrap_sample(x, y, num_reps):\r\n combined = array(list(x) + list(y))\r\n total_obs = len(combined)\r\n num_x = len(x)\r\n for i in range(num_reps):\r\n # sampling with replacement\r\n indices = randint(0, total_obs, total_obs)\r\n sampled = combined.take(indices)\r\n # split into the two populations\r\n sampled_x = sampled[:num_x]\r\n sampled_y = sampled[num_x:]\r\n yield sampled_x, sampled_y", "def dataset_augmentation(data_start, bootstrapping = 1, epurate = 1, shuffle = True):\n data = data_start\n for ii in range(bootstrapping):\n data = data.append(data_start.apply(bootstrap_sample, axis=1), ignore_index=True)\n\n#Bugged version that weirdly works well....\n# for ii in range(bootstrapping):\n # data = data.append(bootstrap_sample(data_start), ignore_index=True)\n\n for ii in range(epurate):\n data = data.append(data_start.apply(epurate_sample, axis=1), ignore_index=True)\n\n # Shuffling (Important)\n if shuffle == True:\n data = data.sample(frac=1)\n return data", "def bootstrap_resample(labels):\n idxs = np.arange(len(labels))\n num_labels = max(labels) + 1\n bootstrap_idxs = np.zeros_like(idxs)\n ptr = 0\n for i in range(num_labels):\n strat = idxs[labels == i]\n bootstrap_idxs[ptr:ptr + len(strat)] = np.random.choice(strat, len(strat), replace=True)\n ptr += len(strat)\n return bootstrap_idxs", "def bs_replicate(data, func=np.mean):\n bs_sample = np.random.choice(data, replace=True, size=len(data))\n return func(bs_sample)", "def get_bootstraps(self):\n col_range = range(self.response.shape[1])\n random_state = np.random.RandomState(seed=self.random_seed)\n return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()", "def bootstrap_sample_from_data(data, weights=None, seed=0):\n # Set up the random number generator\n RNG = np.random.default_rng(seed)\n N = data.shape[0]\n\n # Set up weights\n if weights is not None:\n cutoffs = np.cumsum(weights)\n else:\n cutoffs = np.linspace(0, 1, N)\n\n # Draw random indices\n indices = np.searchsorted(cutoffs, RNG.uniform(size=N))\n\n # Create a bootstrapped sample\n new_data = deepcopy(data[indices,])\n return new_data", "def bootstrap_sample_generator_1D(samples: Union[NumpyFloatArray, NumpyIntArray]):\n n_samples = samples.shape[0]\n\n while True:\n _indices = np.random.randint(0, high=n_samples, size=n_samples)\n\n yield samples[_indices]", "def boot_matrix(z, B):\n\n n = len(z) # sample size\n idz = np.random.randint(0, n, size=(B, n)) # indices to pick for all boostrap samples\n return z[idz]", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n \r\n resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def boot_matrix(z, B):\n z = np.array(z).flatten()\n n = len(z) # sample size\n idz = np.random.randint(0, n, size=(B, n)) # indices to pick for all boostrap samples\n return z[idz]", "def genBootstrapData(fullData, dirName=\"bootstrap_data/\", ti=None, tf=None, n=1, blockLen=7):\n # If initial (final) time not given, apply block bootstrap to whole data set\n if ti == None:\n ti = 0\n if tf == None:\n tf = fullData.shape[0]\n\n # Reset seed\n np.random.seed()\n\n bsSets = []\n\n for i in range(0, n):\n bsSet = fullData.copy()\n\n # Loop over the sensors\n for sensor in range(fullData.shape[1]):\n # Loop over the blocks\n for tStart in range(ti, tf, blockLen):\n # Resample only the non-nan datapoints\n # TODO: is this a valid way of doing this???\n oldBlockNonNans = bsSet[tStart:tStart+blockLen, sensor].copy()\n oldBlockNonNans = oldBlockNonNans[np.isfinite(oldBlockNonNans)]\n\n for t in range(tStart, min(tStart + blockLen, fullData.shape[0])):\n if not np.isnan(bsSet[t, sensor]):\n bsSet[t, sensor] = np.random.choice(oldBlockNonNans, 1, replace=False)\n\n bsSets.append(bsSet)\n\n # Save the dataset\n np.savetxt(dirName + \"/blockLen=%i_%i.csv\"%(blockLen, i), bsSet, delimiter=\" \", fmt=\"%f\")\n\n return bsSets", "def compute_bootstrapped_sample(X_table, y_table):\n n = len(X_table)\n X_sample = []\n y_sample = []\n for _ in range(n):\n rand_index = random.randrange(0, n)\n X_sample.append(X_table[rand_index])\n y_sample.append(y_table[rand_index])\n return X_sample, y_sample", "def bootstrap_data_array(data, H, R, permute=None):\n\n if permute is None:\n permute = randint(data.shape[-1], size=data.shape[-1])\n assert R.shape == H.shape\n assert len(permute) == R.shape[-1]\n R = R[permute]\n data = dot(data, (H+R).T)\n return data", "def bootstrap(items, choices, repeats):\n for i in range(repeats):\n yield sample(items, choices, replace=True)", "def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)", "def bootstrap_array(f_values, values, bootstraps=1000):\n x = np.array(values)\n x_bs = x[np.random.randint(x.size, size=(bootstraps, x.size))]\n return np.array(map(f_values, x_bs))", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n\r\n resample_i = N.floor(N.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def bootstrap( M ):\n tot_ct_per_ob = M.sum(axis = 0)\n zero_list = []\n for i in range(len(tot_ct_per_ob)):\n if tot_ct_per_ob[i] <= 0:\n zero_list.append(i)\n M = np.delete(M, zero_list, 1)\n new_M = np.zeros(M.shape)\n tot_ct_per_ob = M.sum(axis = 0)\n for i in range(len(M)):\n for j in range(len(M[0])):\n new_M[i][j] = M[i][j]/tot_ct_per_ob[j]\n new_M = np.transpose(new_M)\n bootstrap = []\n for i in range(len(tot_ct_per_ob)):\n rnd_vec = np.random.multinomial(tot_ct_per_ob[i], new_M[i])\n bootstrap.append(rnd_vec)\n \n bootstrap = np.transpose(np.asarray(bootstrap)) \n return bootstrap", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def bootstrap_resample(self, X, n=None):\n if n == None:\n n = len(X)\n\n resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)\n X_resample = X.iloc[resample_i, :]\n return X_resample", "def draw_bootstrap_pairs(x, y, func, size=1):\n\n # Set up array of indices to sample from: inds\n inds = np.arange(len(x))\n\n # Initialize replicates: bs_replicates\n bs_replicates = np.empty(size)\n\n # Generate replicates\n for i in range(size):\n bs_inds = np.random.choice(inds, len(inds))\n bs_x, bs_y = x[bs_inds], y[bs_inds]\n bs_replicates[i] = func(bs_x, bs_y)\n\n return bs_replicates", "def eg_pre():\n\n print(\"\\teg3\")\n\n d = 1\n\n for _ in range(10):\n t1 = []\n t2 = []\n\n for _ in range(32):\n t1.append(utils.gaussian(10, 1))\n t2.append(utils.gaussian(d * 10, 1))\n\n print(\"\", \"\", d, d < 1.1, stats.bootstrap(\n t1, t2), stats.bootstrap(t1, t1), sep=\"\\t\")\n\n d = round(d + .05, 2)" ]
[ "0.7789665", "0.77793765", "0.740078", "0.7145126", "0.6915617", "0.676676", "0.65354204", "0.6499645", "0.6398692", "0.62889034", "0.62032557", "0.6193025", "0.6095193", "0.601948", "0.5998936", "0.58984625", "0.5879488", "0.58694214", "0.5786773", "0.5758723", "0.5742155", "0.57388103", "0.5737552", "0.5737403", "0.56797403", "0.5634709", "0.5575294", "0.557007", "0.55336916", "0.5503485" ]
0.78276443
0
Actualiza los canvas, los pinta en esta ventana, y lleva a cabo el flip para mostrar los cambios
def display(self): for c in self.canvas.values(): c.update() self.superficie.blit(c.superficie, c.origen) pygame.display.flip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loop(self, frame):\n self.root = frame\n self.drawUI()\n cv2.imshow('Fotopasca', self.root)", "def renderizar(self):\n\t\t# Limpiar la pantalla\n\t\tglClear(GL_COLOR_BUFFER_BIT)\n\t\t# Renderizar la escena\n\t\tself.escena.renderizar()\n\t\t# Renderizar los buffers a la pantalla\n\t\tpygame.display.flip()", "def cambiar_escena(self, escena):\n\t\t# Reemplazo directo\n\t\tself.escena = escena\n\t\t# Reiniciar la ventana con el tamaño de la nueva escena\n\t\tprint(\"Iniciando nuevo contexto OpenGL...\")\n\t\tv_ancho, v_alto = escena.tam\n\t\topciones = OPENGL | DOUBLEBUF\n\t\tif escena.pant_compl:\n\t\t\topciones |= FULLSCREEN\n\t\tpygame.display.set_mode((v_ancho, v_alto), opciones)\n\t\t# Título por defecto de la ventana\n\t\tpygame.display.set_caption(escena.nombre)\n\t\t# Reiniciar OpenGL\n\t\tself.gl_ini(v_ancho, v_alto)\n\t\t# Darle los datos del núcleo a la ventana\n\t\tself.escena.nucleo = self\n\t\tself.escena.eventos = self.mapa_eve\n\t\tglClearColor(*escena.color)\n\t\t# Ejecutar la lógica inicial de la escena\n\t\tprint(\"Iniciando escena...\")\n\t\tself.escena.logica_ini()", "def draw(self, screen):", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r", "def before_flip(self):\n from klibs.KLGraphics import blit\n\n if P.show_gaze_dot and self.el.recording:\n try:\n blit(self.tracker_dot, 5, self.el.gaze())\n except RuntimeError:\n pass", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def update(self):\n # Desplazar izquierda/derecha\n self.rect.x += self.cambio_x\n global COL\n COL = False\n # Hemos chocado contra la pared despues de esta actualizacion?\n lista_impactos_bloques = pygame.sprite.spritecollide(self, self.paredes, False)\n for bloque in lista_impactos_bloques:\n #Si nos estamos desplazando hacia la derecha, hacemos que nuestro lado derecho sea el lado izquierdo del objeto que hemos tocado-\n if self.cambio_x > 0:\n self.rect.right = bloque.rect.left\n else:\n # En caso contrario, si nos desplazamos hacia la izquierda, hacemos lo opuesto.\n self.rect.left = bloque.rect.right\n COL = True\n \n # Desplazar arriba/izquierda\n self.rect.y += self.cambio_y\n \n # Comprobamos si hemos chocado contra algo\n lista_impactos_bloques = pygame.sprite.spritecollide(self, self.paredes, False) \n for bloque in lista_impactos_bloques:\n \n # Reseteamos nuestra posicion basandonos en la parte superior/inferior del objeto.\n if self.cambio_y > 0:\n self.rect.bottom = bloque.rect.top \n else:\n self.rect.top = bloque.rect.bottom \n COL = True", "def _drawOnCanvas(self):\n self.canvas=np.ones(self.canvas.shape,dtype=np.uint8)*255\n for key in self.elements:\n graphElement=self.elements[key]\n graphElement.draw(self.canvas)\n self.sync=True", "def creditos():\n playSegundaria()\n img_creditos = pygame.image.load(\"imagenes/creditos.jpg\")\n screen.blit(img_creditos, (0,0))\n pygame.display.update()\n time.sleep(8)\n run()", "def preGameScreen(self) -> None:\n\n # load all images\n # created with: https://de.flamingtext.com/Free-Logo-Designs/\n logo = Image(Configuration.windowWidth / 2 - 750 / 2, Configuration.windowHeight / 4 - 120 / 2, (750, 120),\n \"PongLogo.png\", pathToImage=\"images/Pong/\")\n keys_player_one = Image(Configuration.windowWidth / 4 - 150, Configuration.windowHeight * 3 / 4 - 50,\n (300, 100),\n \"AandD.png\", pathToImage=\"images/Pong/\")\n keys_player_two = Image(Configuration.windowWidth * 3 / 4 - 150, Configuration.windowHeight * 3 / 4 - 50,\n (300, 100),\n \"ArrowLeftRight.png\", pathToImage=\"images/Pong/\")\n\n # draw text and images\n self.surface.fill(Colors.Black)\n self.drawImageOnSurface(logo)\n self.drawImageOnSurface(keys_player_one)\n if not self.hasComputerPlayer: # only draw the control of the second player, if he isn´t a computer player\n self.drawImageOnSurface(keys_player_two)\n self.drawTextOnSurface(\"First player that reaches 1000 points wins!\",\n (Configuration.windowWidth / 2, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n\n self.drawTextOnSurface(\"Controls\",\n (Configuration.windowWidth / 2, Configuration.windowHeight * 3 / 4), Colors.ByteGreen,\n font=self.font)\n\n super().updateScreen() # display images on screen\n\n logger.info(\"Displaying prescreen animation\")\n\n sleep(4) # wait for seconds", "def pysweep_before_finish_init(self):\n self.displaycanvas = DisplayCanvas(self.pysweep.master, self.boardsize, self.lcounterlength, self.rcounterlength, self.images)\n self.displaycanvas.pack()\n\n self.pysweep.master.update_idletasks()\n self.displaycanvas.update_idletasks()\n # enode = self.arbitrary()\n # print('DisplayCanvas:', enode)", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def draw(self):\n self.game.screen.blit(self.image, self.game.off(self.pos))", "def reDraw(self):\n self.canvasIGetDrawnOn.delete(self.spriteOnCanvas)\n self.spriteImage = ImageTk.PhotoImage(self.spriteImageFile.rotate(self.faceHeading, expand=True))\n self.spriteOnCanvas=self.canvasIGetDrawnOn.create_image(self.xPos,self.yPos,image=self.spriteImage)", "def draw(self):\n ui.clear()\n ui.draw_board(self)\n ui.output_buffer()", "def draw(self): \n pygame.event.clear()\n self.window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])\n self.paintScreen()\n self.paintAvatar()\n self.paintTags()\n self.paintCustomizeZone()\n self.paintButtons()\n self.window.zOrder = 90000\n self.window.depth = 2\n return self.window", "def main(self):\n update = self.update\n draw = self.draw\n screen = self.screen\n flip = pg.display.update\n clock = time.time\n frame_length = (1. / self.fps)\n time_since_draw = 0\n last_update = clock()\n fps_timer = 0\n frames = 0\n\n while not self.done:\n clock_tick = clock() - last_update\n last_update = clock()\n time_since_draw += clock_tick\n update(clock_tick)\n if time_since_draw >= frame_length:\n time_since_draw -= frame_length\n draw(screen)\n flip()\n frames += 1\n\n fps_timer, frames = self.handle_fps(clock_tick, fps_timer, frames)\n time.sleep(.01)", "def draw():", "def actualise_fenetre(self,plateau,fenetre,joueur,info,bouton,etape_texte):\n\n self.affiche_plateau(plateau,fenetre)\n liste_im_joueur = [pygame.image.load(\"joueur\"+str(i)+\".png\").convert_alpha() for i in range(1,5)]\n for i in range (4) :\n x_joueur = 60\n y_joueur = 60\n liste_im_joueur[i] = pygame.transform.scale(liste_im_joueur[i], (int(x_joueur),int(y_joueur)))\n\n for i in range(len(plateau.dico_joueurs)) :\n fenetre.blit(liste_im_joueur[i],(1030,320+i*80))\n fenetre.blit(police_small.render(str(plateau.dico_joueurs[i].nom) + \" : \",False,pygame.Color(\"#000000\")),(800,340+i*75))\n fenetre.blit(police1.render(\"Score : \"+str(plateau.dico_joueurs[i].points),False,pygame.Color(\"#000000\")),(800,340+i*75+15))\n fenetre.blit(police1.render(\"Ordre de mission : \"+str(sorted(plateau.dico_joueurs[i].fantome_target)),False,pygame.Color(\"#000000\")),(800,340+i*75+30))\n fenetre.blit(police1.render(\"Jokers restants : \"+str(plateau.dico_joueurs[i].nb_joker),False,pygame.Color(\"#000000\")),(800,340+i*75+45))\n \n #test texte pour afficher le joueur qui joue\n if self.turn == True :\n fenetre.blit(police.render(\"C'est à vous de jouer\",False,pygame.Color(0,0,0)),(800,240))\n else:\n fenetre.blit(police.render(\"C'est le tour de votre adversaire\",False,pygame.Color(0,0,0)),(800,240))\n \n #affichage du message d'erreur\n for i in range(len(info)) : \n fenetre.blit(police.render(info[i],False,pygame.Color(\"#000000\")),(760,180+i*20))\n \n fenetre.blit(police.render(etape_texte,False,pygame.Color(\"#000000\")),(760,160))\n \n \n bouton.draw(fenetre)\n \n pygame.display.flip()", "def on_draw(self):\n arcade.start_render()", "def draw(self):\n self.write_image()\n self.update()", "def on_draw(self):\n # draw everything", "def ventanaprincipal():\r\n titulo_principal=pygame.image.load(\"../recursor/Imagenes juegos/parejas_1.png\")\r\n ventana.blit(titulo_principal, (405,0))\r\n logo_plaython=pygame.image.load(\"../recursor/Imagenes juegos/plaython.png\")#Cargamos la imagen del logo\r\n imagen_decoracion=pygame.image.load(\"../recursor/Imagenes juegos/imagenpc.png\")\r\n imagen_decoracion = pygame.transform.scale(imagen_decoracion, (210, 210))\r\n ventana.blit(logo_plaython, (900, 180))\r\n ventana.blit(imagen_decoracion,(100,240))\r\n titulo_plaython = pygame.image.load(\"../recursor/Imagenes juegos/TITULOPLAYTHON.png\")\r\n titulo_plaython = pygame.transform.scale(titulo_plaython, (240, 150))\r\n ventana.blit(titulo_plaython, (505, 550))", "def draw(self, p):\r\n self.active = True\r\n surface = pygame.surfarray.make_surface(p)\r\n self.screen.blit(surface, (0, 0))\r\n pygame.display.flip()\r\n return", "def pre_draw(self):", "def drawBoard(self):\r\n self.outer.draw(self.surface)\r\n self.background.draw(self.surface)\r\n for point in self.points:\r\n point.draw(self.surface)\r\n point.drawCheckers(self.surface)\r\n self.dice.draw(self.surface)\r\n self.message.draw(self.surface)\r\n self.checkerBox.draw(self.surface)\r\n self.checkerBox.drawCheckers(self.surface)\r\n for bar in self.bar:\r\n bar.draw(self.surface)\r\n bar.drawCheckers(self.surface)\r\n pygame.display.flip()", "def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())", "def show(self):\n if self.video:\n self.video.write(self.img)\n cv2.imshow('Simpy', self.img)\n cv2.waitKey(1000 // self.fps)" ]
[ "0.6508464", "0.6444371", "0.6252179", "0.62192374", "0.6206115", "0.6204564", "0.6166355", "0.61551905", "0.6130094", "0.60961246", "0.6088001", "0.60873604", "0.6068308", "0.6057062", "0.60507476", "0.6043697", "0.60300726", "0.60265714", "0.60210216", "0.6011016", "0.6006273", "0.5990426", "0.5981349", "0.5977395", "0.59744024", "0.59697944", "0.5960354", "0.5955739", "0.59447724", "0.5932965" ]
0.71416545
0
Setup injections Note that the actual injected current is proportional to dt of the clock So, you need to use the same dt for stimulation as for the model Strangely, the pulse gen in compartment_net refers to firstdelay, etc.
def setupinj(model, delay,width,neuron_pop): pg = moose.PulseGen('pulse') pg.firstDelay = delay pg.firstWidth = width pg.secondDelay = 1e9 for ntype in neuron_pop.keys(): for num, name in enumerate(neuron_pop[ntype]): injectcomp=moose.element(name +'/'+model.param_cond.NAME_SOMA) print("INJECT:", name, injectcomp.path) moose.connect(pg, 'output', injectcomp, 'injectMsg') return pg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(timestep=None, min_delay=None, max_delay=None, **kwargs):\n global controller\n\n logger.info(\"PACMAN103 (c) 2014 APT Group, University of Manchester\")\n logger.info(\" Release version 2014.4.1 - April 2014\")\n # Raise an exception if no SpiNNaker machine is specified\n if kwargs.has_key(\"machine\"):\n machine_name = kwargs.get(\"machine\")\n logger.warn(\"The machine name from kwargs is overriding the machine \"\n \"name defined in the pacman.cfg file\")\n elif conf.config.has_option(\"Machine\", \"machineName\"):\n machine_name = conf.config.get(\"Machine\", \"machineName\")\n else:\n raise Exception(\"A SpiNNaker machine must be specified in pacman.cfg.\")\n if machine_name == 'None':\n raise Exception(\"A SpiNNaker machine must be specified in pacman.cfg.\")\n\n reload_time = None\n if conf.config.has_option(\"Execute\", \"reload_date\"):\n reload_time = conf.config.get(\"Execute\", \"reload_date\")\n if reload_time != 'None':\n logger.warn(\"The reload parameter was set, therefore not recompiling\")\n else:\n reload_time = None\n\n #deal with params allowed via the setup optimals\n if timestep is not None:\n timestep *= 1000 # convert into ms from microseconds\n conf.config.set(\"Machine\", \"machineTimeStep\", timestep)\n else:\n timestep = conf.config.get(\"Machine\", \"machineTimeStep\")\n if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep:\n raise exceptions.ConfigurationException(\"Pacman does not support min \"\n \"delays below {} ms with the current \"\n \"machine time step\".format(1.0 * timestep))\n\n natively_supported_delay_for_models = MAX_SUPPORTED_DELAY_TICS\n delay_extention_max_supported_delay = MAX_DELAY_BLOCKS * MAX_TIMER_TICS_SUPPORTED_PER_BLOCK\n\n max_delay_tics_supported = \\\n natively_supported_delay_for_models + delay_extention_max_supported_delay\n\n if max_delay is not None and float(max_delay * 1000) > max_delay_tics_supported * timestep:\n raise exceptions.ConfigurationException(\"Pacman does not support max delays \"\n \"above {} ms with the current machine \"\n \"time step\".format(0.144 * timestep))\n if min_delay is not None:\n conf.config.add_section(\"Model\")\n conf.config.set(\"Model\", \"min_delay\", (min_delay * 1000) / timestep)\n\n if max_delay is not None:\n if not conf.config.has_section(\"Model\"):\n conf.config.add_section(\"Model\")\n conf.config.set(\"Model\", \"max_delay\", (max_delay * 1000) / timestep)\n\n time_scale_factor = None\n if (conf.config.has_option(\"Machine\", \"timeScaleFactor\") and\n conf.config.get(\"Machine\", \"timeScaleFactor\") != \"None\"):\n time_scale_factor = conf.config.getint(\"Machine\", \"timeScaleFactor\")\n if timestep * time_scale_factor < 1000:\n logger.warn(\"the combination of machine time step and the machine \"\n \"time scale factor results in a real timer tic that is \"\n \"currently not reliably supported by the spinnaker \"\n \"machine.\")\n else:\n time_scale_factor = max(1, math.ceil(1000.0 / float(timestep)))\n if time_scale_factor > 1:\n logger.warn(\"A timestep was entered that has forced pacman103 to \"\n \"automatically slow the simulation down from real time \"\n \"by a factor of {}. To remove this automatic behaviour\"\n \", please enter a timescaleFactor value in \"\n \"your .pacman.cfg\".format(time_scale_factor))\n\n\n\n\n \n # Create a new Controller to run PyNN:\n controller = control.Controller(sys.modules[__name__],\n machine_name, reload_time=reload_time)\n # Set the app ID:\n appID = conf.config.getint(\"Machine\", \"appID\")\n controller.dao.app_id = appID\n logger.info(\"Setting appID to %d.\" % appID)\n # Set the machine time step for the simulation:\n machineTimeStep = conf.config.getint(\"Machine\", \"machineTimeStep\")\n controller.dao.machineTimeStep = machineTimeStep\n logger.info(\"Setting machine time step to %d micro-seconds.\" % machineTimeStep)\n controller.dao.time_scale_factor = time_scale_factor\n logger.info(\"Setting time scale factor to %d.\" % time_scale_factor)\n # Set boolean variable writeTextSpecs in DAO if we are required to:\n writeTextSpecs = False\n if conf.config.getboolean(\"Reports\", \"reportsEnabled\"):\n writeTextSpecs = conf.config.getboolean(\"Reports\", \"writeTextSpecs\")\n controller.dao.writeTextSpecs = writeTextSpecs\n\n if conf.config.has_option(\"Recording\", \"send_live_spikes\"):\n if conf.config.getboolean(\"Recording\", \"send_live_spikes\") == True:\n port = None\n if conf.config.has_option(\"Recording\", \"live_spike_port\"):\n port = conf.config.getint(\"Recording\", \"live_spike_port\")\n hostname = \"localhost\"\n if conf.config.has_option(\"Recording\", \"live_spike_host\"):\n hostname = conf.config.get(\"Recording\", \"live_spike_host\")\n tag = None\n if conf.config.has_option(\"Recording\", \"live_spike_tag\"):\n tag = conf.config.getint(\"Recording\", \"live_spike_tag\")\n if tag == None:\n raise exceptions.PacmanException(\"Target tag for live spikes \"\n \"has not been set\")\n\n # Set up the forwarding so that monitored spikes are sent to the\n # requested location\n controller.set_tag_output(tag, port, hostname, 10)\n #takes the same port for the visualiser if being used\n if conf.config.getboolean(\"Visualiser\", \"enable\") and \\\n conf.config.getboolean(\"Visualiser\", \"have_board\"):\n controller.set_visulaiser_port(port)\n \n # Create special AppMonitor vertex, to receive spikes for immediate \n # transfer to host:\n monitorVertex = AppMonitor()\n \n # Add the special vertex to the list of vertices:\n controller.add_vertex(monitorVertex)\n \n # Get track of this special vertex as it will be used as a target\n # for recorded spikes:\n global appMonitorVertex\n appMonitorVertex = monitorVertex\n\n # PyNN API says something must be returned, \n # so we return something useful: our controller instance\n return controller", "def __init__(self,n,\n d=3,\n maxn=125,\n controllers=[],\n xmax=XMAX,\n ymax=YMAX,\n zmax=ZMAX,\n vmax=VMAX,\n simbox=None,\n mass=1.0,\n rinit=None,\n source=None,\n side=(5,5,5),\n integrator='rk4',\n spacing=0.1):\n self.n = n\n self.dim = d\n self.maxn = maxn\n self.dt = 0.0\n self.steps = 0\n\n # Basic mechanical properties\n if not simbox:\n self.box = box.MirrorBox(p=self,xmax=xmax,ymax=ymax,zmax=zmax)\n else:\n self.box = simbox\n \n # Select integrator\n integrator_mapping = {'euler':euler,\n 'ieuler':imp_euler,\n 'rk4':rk4}\n\n self.step = integrator_mapping[integrator]\n\n # Start with a random configuration\n self.r = self.box.xmax * np.random.random([self.maxn,self.dim])\n self.m = np.zeros(self.maxn,dtype=float)\n self.v = vmax * (np.random.random([self.maxn,self.dim]) - 0.5)\n self.rdot = np.zeros(self.r.shape)\n self.vdot = np.zeros(self.v.shape)\n self.mdot = np.zeros(self.m.shape)\n\n if rinit == 'grid':\n self.r[0:n,:] = configuration.grid3d(n,side,(xmax/2.,ymax/2.,zmax/2.)\n ,spacing=spacing)\n elif rinit == 'fcc':\n self.r[0:n,:] = configuration.fcc3d(n,side,(xmax/2.,ymax/2.,zmax/2.)\n ,spacing=spacing)\n elif rinit == 'load':\n # Load the configuration from a target.\n # Today the target is hard-coded\n source = os.environ.get('SPDATA') + '/' + source #nanobox_eq_2.nc'\n read_step(source,self,step='last')\n # Make some assumptions about the source file.\n\n # Initialise values\n #self.r[0:self.n]=configuration.grid3d(self.n,5,5,(20,20,20),spacing=0.8)\n self.m[:] = mass \n self.colour = 1.0,0.0,0.0 \n\n # State vectors to pass to numerical integrators\n n_variables = 7\n self.x = np.zeros([n_variables,self.maxn])\n self.xdot = np.zeros([n_variables,self.maxn])\n\n self.nlists = []\n self.forces = []\n\n self.controllers = controllers\n for controller in self.controllers:\n controller.bind_particles(self)\n\n \"\"\" Variables for measuring performance. \"\"\"\n self.timing = {}\n self.timing['force time'] = -1\n self.timing['deriv time'] = -1\n self.timing['pairsep time'] = -1\n self.timing['update time'] = -1\n self.timing['integrate time'] = -1", "def startup(self):\n # Initializing the cycle data (cd) dictionary\n self.cd[\"started_up\"] = False\n self.cd[\"peak_pressure\"] = 0\n self.cd[\"tidal_volume\"] = 0\n self.cd[\"inhale_duration\"] = 0\n self.cd[\"exhale_duration\"] = 0\n self.cd[\"IE_ratio\"] = 1\n self.cd[\"PEEP\"] = 0\n\n to = 2 # Timeout\n startup_cycles = 0\n limit = 20\n # If the piston position is unknown\n last_cycle = time.time()\n while not self.piston.piston_at_bottom and not self.piston.piston_at_top:\n if self.pst_dir == 1:\n self.piston.pst_up()\n if time.time() - last_cycle > to:\n self.pst_dir = 0\n startup_cycles += 1\n last_cycle = time.time()\n else:\n self.piston.pst_down()\n if time.time() - last_cycle > to:\n self.pst_dir = 1\n startup_cycles += 1\n last_cycle = time.time()\n if startup_cycles >= limit:\n print(\"There is a problem at startup, check compressed air\")\n print(f\"Tried to startup for {startup_cycles} cycles\")\n # Breaks the loop so that the controller doesn't start\n self.signal_startup_error.emit(True)\n return\n while not self.piston.piston_at_top:\n self.piston.pst_up()\n self.piston.stop()\n\n print(f\"startup_cycles: {startup_cycles}\")\n self.cd[\"started_up\"] = True\n self.signal_cycle_data.emit(self.cd)\n # Duration of the first tare of the system\n tare_duration = 5.0\n time.sleep(tare_duration)\n self.signal_get_tare.emit(tare_duration)\n # Waits a little bit just to make sure that the respirator isn't working when the controller \n # is called\n time.sleep(0.5)\n self.piston_control()", "def setup_ssem(self):\n \n groundBox = box_polygon_shape(-20,-500,1,1) # A tiny box that just acts as the ground body for everything else\n groundBody = self.world.CreateStaticBody(shapes=groundBox)\n self.memory_sender_y = -500\n self.groundBody = groundBody\n # Initial charge for main injector\n self.ball_bearing_block(0,190,cols=16)\n self.add_static_polygon([ (0,20), (100,0), (100,5), (0,25)], -132, 220)\n self.add_static_polygon([ (0,0), (3,0), (3,20), (0,20)], -132, 240)\n self.injector_cranks = []\n self.parts.main_injector_raiser = self.injector(-32,150, groundBody, injector_crank_array=self.injector_cranks)\n\n (self.parts.memory_selector_holdoff, self.parts.memory_follower_holdoff) = self.memory_module(0,0, groundBody)\n self.upper_regenerators = []\n self.parts.accumulator_diverter_lever = self.diverter_set(0,130, groundBody, slope_x=-240, slope_y=180, return_weight=10) # Diverter 1. Splits to subtractor reader.\n\n self.parts.discard_lever_2 = self.diverter_set(-5,-30, groundBody, discard=470) # Diverter 2a. Discard reader-pulse data.\n self.parts.upper_regen_control = self.regenerator(-5,-65, groundBody, self.upper_regenerators) # Regenerator 1. For regenning anything read from memory.\n self.parts.ip_diverter_lever = self.diverter_set(-5,-95, groundBody, slope_x=320, slope_y=170, start_at=3, return_weight=5) # Diverter 1. Splits to instruction counter.\n self.parts.diverter_3 = self.diverter_set(-10,-158, groundBody, slope_x=200, slope_y=310) # Diverter 3; splits to instruction reg/PC\n \n # PC injector\n pc_injector_x = 230\n pc_injector_y = -290\n self.pc_injector_cranks = [] \n self.parts.pc_injector_raiser = self.injector(pc_injector_x,pc_injector_y, groundBody, injector_crank_array=self.pc_injector_cranks, columns=5)\n # Initial charge for PC injector\n self.ball_bearing_block(250,-240,cols=8)\n\n\n sub_pos_x = -15\n sub_pos_y = -220\n self.parts.accumulator_reset_lever = self.subtractor(sub_pos_x,sub_pos_y, groundBody, discard_bands=True, toggle_joint_array=self.accumulator_toggles, comparison_output=True)\n self.dropper = self.slow_drop_unit(groundBody, sub_pos_x-18, sub_pos_y+40)\n\n # The 'skip lever' - the large balance arm which injects one\n # into the program counter when comparison (CMP) succeeds\n skip_lever_x = sub_pos_x - 200\n skip_lever_y = sub_pos_y - 200\n a = fixtureDef(shape=polygonShape(vertices=[(-50,-32), (0,-30), (0,-25), (-50,-30)]), filter=filters[0], density=1.0)\n b = fixtureDef(shape=box_polygon_shape(0,-30,5,30), filter=filters[0], density=1.0)\n c = fixtureDef(shape=box_polygon_shape(-30,-30,5,30), filter=filters[0], density=1.0)\n d = fixtureDef(shape=box_polygon_shape(0,0,300,5), filter=filters[2], density=1.0)\n e = fixtureDef(shape=box_polygon_shape(285,-15,15,15), filter=filters[2], density=2.10)\n f = fixtureDef(shape=box_polygon_shape(150,-50,5,50), filter=filters[0], density=1.0)\n skip_lever=self.add_multifixture([a,b,d,e,f], skip_lever_x, skip_lever_y)\n skip_lever.attachment_point = (150,-50)\n skip_lever.origin = (skip_lever_x,skip_lever_y)\n\n self.revolving_joint(groundBody, skip_lever, (skip_lever_x+150, skip_lever_y+2.5), friction=0)\n self.add_static_polygon(polygonShape(vertices=box_vertices(0, 0, 10,10)), skip_lever_x+270, skip_lever_y-15, filter=filters[2])\n self.parts.cmp_injector = self.horizontal_injector(skip_lever_x-48,skip_lever_y+257, groundBody)\n self.ball_bearing_block(skip_lever_x-30,skip_lever_y+280,cols=1)\n self.add_static_polygon(polygonShape(vertices=[(0,0), (20,0), (0,20)]), skip_lever_x-30,skip_lever_y+230)\n \n self.lower_regenerators = []\n self.parts.lower_regen_control = self.regenerator(-203,-400, groundBody, self.lower_regenerators)\n #Program counter\n self.parts.pc_reset_lever = self.subtractor(400,-320, groundBody, lines=5, toggle_joint_array=self.ip_toggles, is_actually_adder=True)\n # Thing that adds one ball bearing to the PC\n pc_incrementer_x = 457\n pc_incrementer = self.horizontal_injector(pc_incrementer_x,-240, groundBody)\n # Overspill loose ball bearings from the PC incrementer to the PC reader\n overspill_width = pc_incrementer_x - pc_injector_x - 125\n self.add_static_polygon([ (0, 0), (overspill_width,15), (overspill_width,20), (0,5) ], pc_injector_x+125,pc_injector_y+65)\n\n # Block to stop right-side overspill on incrementer\n self.add_static_polygon([ (0, 0), (5,0), (5,20), (0,20) ], pc_incrementer_x+27,-240+40)\n\n\n self.ball_bearing_block(457+20,-250+30,cols=1)\n self.distance_joint(skip_lever, pc_incrementer)\n\n self.connect_regenerators()\n\n # Large collection plates at the bottom\n self.add_static_polygon([ (-300,-600),(700,-550), (700,-610), (-300,-610)])\n self.add_static_polygon([ (600,-610),(700,-610), (850,-400), (800,-400)])\n self.add_static_polygon([ (-500,-400),(-450,-400), (-310,-610), (-400,-610)])\n\n # Instruction decoder ROM\n self.rom_followers = []\n self.rom_selectors = []\n (self.parts.instruction_selector_holdoff, self.parts.instruction_follower_holdoff) = self.add_row_decoder(200, 0, groundBody, self.rom_followers, self.rom_selectors)\n self.labels.append((\"Instruction decoder\", 400,0,0))\n\n self.add_instruction_cranks(groundBody, 550, 140)\n self.parts.sender_eject = self.memory_sender(198,self.memory_sender_y, groundBody)\n self.connect_memory()\n\n # A guard which stops waste data from the subtractor falling into the instruction register\n self.add_static_polygon([ (0,0), (120,120), (120,123), (0,3)], 120, self.memory_sender_y-10)\n\n # Add one final transfer band to move everything back into band 0\n self.transfer_bands.append((-550+10, -550, [ (-300,800)], 1))", "def setup(self, tau1=None, tau2=None, template_tmax=None, dt=None, \n delay=0.0, sign=1, eventstartthr=None, risepower=4., min_event_amplitude=2.0):\n assert sign in [-1, 1]\n self.sign = sign\n self.taus = [tau1, tau2]\n self.dt = dt\n self.template_tmax = template_tmax\n self.idelay = int(delay/dt) # points delay in template with zeros\n self.template = None # reset the template if needed.\n self.eventstartthr = eventstartthr\n self.risepower = risepower\n self.min_event_amplitude = min_event_amplitude", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def prepare_experiment(assumptions):\n print(\"\\nGenerate species parameters\")\n np.random.seed(assumptions['seed']) \n params = MakeParams(assumptions) \n if assumptions[\"selected_function\"] == \"f5_invader_suppression\":\n print(\"\\nDraw invader feature\")\n params = create_invader(params, assumptions)\n print(params[\"c\"])\n \n print(\"\\nDraw per-capita function and cost\")\n f1_species_smooth, f1_species_rugged, f2_species_smooth, f2_species_rugged = draw_species_function(assumptions)\n params.update({\"f1_species_smooth\": f1_species_smooth, \"f1_species_rugged\": f1_species_rugged, \"f2_species_smooth\": f2_species_smooth, \"f2_species_rugged\": f2_species_rugged})\n gi = draw_species_cost(f1_species_smooth, assumptions)\n params.update({\"g\": gi})\n \n print(\"\\nConstruct plate\")\n np.random.seed(assumptions['seed']) \n plate = make_plate(assumptions,params)\n \n print(\"\\nAdd community function to plate\")\n plate = add_community_function(plate, assumptions, params)\n \n if not pd.isnull(assumptions[\"overwrite_plate\"]) :\n print(\"\\nUpdating the initial plate composition by overwrite_plate\")\n plate = overwrite_plate(plate, assumptions)\n \n print(\"\\nPrepare Protocol\")\n #Extract Protocol from protocol database\n algorithms = make_algorithms(assumptions)\n params_algorithm = algorithms[algorithms['algorithm_name'] == assumptions['protocol']]\n \n #Params_simulation by default contains all assumptions not stored in params.\n params_simulation = dict((k, assumptions[k]) for k in assumptions.keys() if k not in params.keys())\n \n return params, params_simulation , params_algorithm, plate", "def setup(self, tau1=None, tau2=None, template_tmax=0.05, dt=None, \n delay=0.0, sign=1, eventstartthr=None, risepower=4.0, min_event_amplitude=2.0):\n assert sign in [-1, 1]\n self.sign = sign\n self.taus = [tau1, tau2]\n self.dt = dt\n self.template_tmax = template_tmax\n self.idelay = int(delay/dt) # points delay in template with zeros\n self.template = None # reset the template if needed.\n self.eventstartthr = eventstartthr\n self.risepower = risepower\n self.min_event_amplitude = min_event_amplitude", "def setup(env, channel, interT, station, mean):\r\n network = Network(env, channel)\r\n system = System(env, network, station, mean)\r\n transmitList = []\r\n \r\n while True:\r\n system.stations", "def __init__(self,\n setup,\n host=None,\n dof=6,\n control_type='position',\n derivative_type='none',\n target_type='position',\n reset_type='random',\n reward_type='linear',\n deriv_action_max=10,\n first_deriv_max=10, # used only with second derivative control\n vel_penalty=0,\n obs_history=1,\n actuation_sync_period=1,\n episode_length_time=None,\n episode_length_step=None,\n rllab_box = False,\n servoj_t=ur_utils.COMMANDS['SERVOJ']['default']['t'],\n servoj_gain=ur_utils.COMMANDS['SERVOJ']['default']['gain'],\n speedj_a=ur_utils.COMMANDS['SPEEDJ']['default']['a'],\n speedj_t_min=ur_utils.COMMANDS['SPEEDJ']['default']['t_min'],\n movej_t=2, # used for resetting\n accel_max=None,\n speed_max=None,\n dt=0.008,\n delay=0.0, # to simulate extra delay in the system\n **kwargs):\n\n\n # Check that the task parameters chosen are implemented in this class\n assert dof in [2, 6]\n assert control_type in ['position', 'velocity', 'acceleration']\n assert derivative_type in ['none', 'first', 'second']\n assert target_type in ['position', 'angle']\n assert reset_type in ['random', 'zero', 'none']\n assert actuation_sync_period >= 0\n\n if episode_length_step is not None:\n assert episode_length_time is None\n self._episode_length_step = episode_length_step\n self._episode_length_time = episode_length_step * dt\n elif episode_length_time is not None:\n assert episode_length_step is None\n self._episode_length_time = episode_length_time\n self._episode_length_step = int(episode_length_time / dt)\n else:\n #TODO: should we allow a continuous behaviour case here, with no episodes?\n print(\"episode_length_time or episode_length_step needs to be set\")\n raise AssertionError\n\n # Task Parameters\n self._host = setups[setup]['host'] if host is None else host\n self._obs_history = obs_history\n self._dof = dof\n self._control_type = control_type\n self._derivative_type = derivative_type\n self._target_type = target_type\n self._reset_type = reset_type\n self._reward_type = reward_type\n self._vel_penalty = vel_penalty # weight of the velocity penalty\n self._deriv_action_max = deriv_action_max\n self._first_deriv_max = first_deriv_max\n self._speedj_a = speedj_a\n self._delay = delay\n self.return_point = None\n if accel_max==None:\n accel_max = setups[setup]['accel_max']\n if speed_max==None:\n speed_max = setups[setup]['speed_max']\n if self._dof == 6:\n self._joint_indices = [0, 1, 2, 3, 4, 5]\n self._end_effector_indices = [0, 1, 2]\n elif self._dof == 2:\n self._joint_indices = [1, 2]\n self._end_effector_indices = [1, 2]\n\n # Arm/Control/Safety Parameters\n self._end_effector_low = setups[setup]['end_effector_low']\n self._end_effector_high = setups[setup]['end_effector_high']\n self._angles_low = setups[setup]['angles_low'][self._joint_indices]\n self._angles_high = setups[setup]['angles_high'][self._joint_indices]\n self._speed_low = -np.ones(self._dof) * speed_max\n self._speed_high = np.ones(self._dof) * speed_max\n self._accel_low = -np.ones(self._dof) * accel_max\n self._accel_high = np.ones(self._dof) * accel_max\n\n self._box_bound_buffer = setups[setup]['box_bound_buffer']\n self._angle_bound_buffer = setups[setup]['angle_bound_buffer']\n self._q_ref = setups[setup]['q_ref']\n self._ik_params = setups[setup]['ik_params']\n\n # State Variables\n self._q_ = np.zeros((self._obs_history, self._dof))\n self._qd_ = np.zeros((self._obs_history, self._dof))\n\n self._episode_steps = 0\n\n self._pstop_time_ = None\n self._pstop_times_ = []\n self._safety_mode_ = ur_utils.SafetyModes.NONE\n self._max_pstop = 10\n self._max_pstop_window = 600\n self._clear_pstop_after = 2\n self._x_target_ = np.frombuffer(Array('f', 3).get_obj(), dtype='float32')\n self._x_ = np.frombuffer(Array('f', 3).get_obj(), dtype='float32')\n self._reward_ = Value('d', 0.0)\n\n if self._target_type == 'position':\n if self._dof == 2:\n self._target_ = np.zeros((2))\n self._target_low = self._end_effector_low[self._end_effector_indices]\n self._target_high = self._end_effector_high[self._end_effector_indices]\n elif self._dof == 6:\n self._target_ = np.zeros((3))\n self._target_low = self._end_effector_low\n self._target_high = self._end_effector_high\n elif self._target_type == 'angle':\n self._target_ = np.zeros((self._dof))\n self._target_low = self._angles_low\n self._target_high = self._angles_high\n\n # Set up action and observation space\n\n if self._derivative_type== 'second' or self._derivative_type== 'first':\n self._action_low = -np.ones(self._dof) * self._deriv_action_max\n self._action_high = np.ones(self._dof) * self._deriv_action_max\n else: # derivative_type=='none'\n if self._control_type == 'position':\n self._action_low = self._angles_low\n self._action_high = self._angles_high\n elif self._control_type == 'velocity':\n self._action_low = self._speed_low\n self._action_high = self._speed_high\n elif self._control_type == 'acceleration':\n self._action_low = self._accel_low\n self._action_high = self._accel_high\n\n # TODO: is there a nicer way to do this?\n if rllab_box:\n from rllab.spaces import Box as RlBox # use this for rllab TRPO\n Box = RlBox\n else:\n from gym.spaces import Box as GymBox # use this for baselines algos\n Box = GymBox\n\n self._observation_space = Box(\n low=np.array(\n list(self._angles_low * self._obs_history) # q_actual\n + list(-np.ones(self._dof * self._obs_history)) # qd_actual\n + list(self._target_low) # target\n + list(-self._action_low) # previous action in cont space\n ),\n high=np.array(\n list(self._angles_high * self._obs_history) # q_actual\n + list(np.ones(self._dof * self._obs_history)) # qd_actual\n + list(self._target_high) # target\n + list(self._action_high) # previous action in cont space\n )\n )\n\n\n self._action_space = Box(low=self._action_low, high=self._action_high)\n\n if rllab_box:\n from rllab.envs.env_spec import EnvSpec\n self._spec = EnvSpec(self.observation_space, self.action_space)\n\n # Only used with second derivative control\n self._first_deriv_ = np.zeros(len(self.action_space.low))\n\n # Communicator Parameters\n communicator_setups = {'UR5':\n {\n 'num_sensor_packets': obs_history,\n\n 'kwargs': {'host': self._host,\n 'actuation_sync_period': actuation_sync_period,\n 'buffer_len': obs_history + SharedBuffer.DEFAULT_BUFFER_LEN,\n }\n }\n }\n if self._delay > 0:\n from senseact.devices.ur.ur_communicator_delay import URCommunicator\n communicator_setups['UR5']['kwargs']['delay'] = self._delay\n else:\n from senseact.devices.ur.ur_communicator import URCommunicator\n communicator_setups['UR5']['Communicator'] = URCommunicator\n\n super(ReacherEnv, self).__init__(communicator_setups=communicator_setups,\n action_dim=len(self.action_space.low),\n observation_dim=len(self.observation_space.low),\n dt=dt,\n **kwargs)\n\n # The last action\n self._action_ = self._rand_obj_.uniform(self._action_low, self._action_high)\n\n # Defining packet structure for quickly building actions\n self._reset_packet = np.ones(self._actuator_comms['UR5'].actuator_buffer.array_len) * ur_utils.USE_DEFAULT\n self._reset_packet[0] = ur_utils.COMMANDS['MOVEJ']['id']\n self._reset_packet[1:1 + 6] = self._q_ref\n self._reset_packet[-2] = movej_t\n\n self._servoj_packet = np.ones(self._actuator_comms['UR5'].actuator_buffer.array_len) * ur_utils.USE_DEFAULT\n self._servoj_packet[0] = ur_utils.COMMANDS['SERVOJ']['id']\n self._servoj_packet[1:1 + 6] = self._q_ref\n self._servoj_packet[-3] = servoj_t\n self._servoj_packet[-1] = servoj_gain\n\n self._speedj_packet = np.ones(self._actuator_comms['UR5'].actuator_buffer.array_len) * ur_utils.USE_DEFAULT\n self._speedj_packet[0] = ur_utils.COMMANDS['SPEEDJ']['id']\n self._speedj_packet[1:1 + 6] = np.zeros((6,))\n self._speedj_packet[-2] = speedj_a\n self._speedj_packet[-1] = speedj_t_min\n\n self._stopj_packet = np.zeros(self._actuator_comms['UR5'].actuator_buffer.array_len)\n self._stopj_packet[0] = ur_utils.COMMANDS['STOPJ']['id']\n self._stopj_packet[1] = 2.0\n\n # Tell the arm to do nothing (overwritting previous command)\n self._nothing_packet = np.zeros(self._actuator_comms['UR5'].actuator_buffer.array_len)\n\n self._pstop_unlock_packet = np.zeros(self._actuator_comms['UR5'].actuator_buffer.array_len)\n self._pstop_unlock_packet[0] = ur_utils.COMMANDS['UNLOCK_PSTOP']['id']\n\n\n # self.info['reward_dist'] = 0\n # self.info['reward_vel'] = 0", "def setup(self, tau1=None, tau2=None, template_tmax=None, dt=None, \n delay=0.0, sign=1, eventstartthr=None, risepower=4., min_event_amplitude=2.0):\n assert sign in [-1, 1] # must be selective, positive or negative events only\n self.sign = sign\n self.taus = [tau1, tau2]\n self.dt = 1/20000.\n self.template_tmax = template_tmax\n self.idelay = int(delay/dt) # points delay in template with zeros\n self.template = None # reset the template if needed.\n self.eventstartthr = eventstartthr\n self.risepower = risepower\n self.min_event_amplitude = min_event_amplitude", "def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ##### Structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5\n model.structure = np.array(list(np.tile([2] + np.tile([1],model.nof_segments_internodes).tolist(),model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes / model.nof_segments_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # same diameter for whole fiber\n model.compartment_diameters[:] = model.diameter_fiber\n \n ##### conductivity of leakage channels\n model.g_L = model.g_L_node/model.surface_aria_node\n\n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacities\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # nodes\n model.c_m[model.structure == 2] = model.c_m_node/model.surface_aria_node\n # internodes\n model.c_m[structure == 1] = model.c_m_layer/(1+model.nof_myelin_layers)\n \n ##### Condactivities internodes\n # initialize\n model.g_m = np.zeros_like(model.structure)*msiemens/cm**2\n # internodes\n model.g_m[model.structure == 1] = model.g_m_layer/(1+model.nof_myelin_layers)\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2\n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Noise term\n model.gamma_Na_vector = np.zeros(model.nof_comps)*psiemens\n model.gamma_Na_vector[model.structure == 2] = model.gamma_Na\n model.noise_term = np.sqrt(model.A_surface*model.gamma_Na_vector*model.rho_Na)\n \n ##### Compartments to plot\n # get indexes of all compartments that are not segmented\n model.indexes_comps = np.where(model.structure == 2)[0]\n # calculate middle compartments of internodes\n model.middle_comps_internodes = np.ceil(model.indexes_comps[:-1] + model.nof_segments_internodes/2).astype(int)\n # create array with all compartments to plot\n model.comps_to_plot = np.sort(np.append(model.indexes_comps, model.middle_comps_internodes))\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.h = model.h_init\n neuron.n = model.n_init\n \n ##### Set parameter values of differential equations\n # conductances nodes\n neuron.gamma_Na = model.gamma_Na\n neuron.gamma_K = model.gamma_K\n neuron.g_L = model.g_L\n \n # conductances internodes\n neuron.g_myelin = model.g_m\n neuron.gamma_Na[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.gamma_K[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.g_L[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # conductances peripheral terminal\n neuron.gamma_Na[np.where(model.structure == 0)[0]] = model.gamma_Na_terminal\n neuron.gamma_K[np.where(model.structure == 0)[0]] = model.gamma_K_terminal\n neuron.g_L[np.where(model.structure == 0)[0]] = model.g_L_terminal\n \n # conductances soma\n neuron.gamma_Na[index_soma] = 0*psiemens\n neuron.gamma_K[index_soma] = 0*psiemens\n neuron.g_L[index_soma] = 0*msiemens/cm**2\n \n # Nernst potential for leakage current\n neuron.E_Leak = model.E_L\n neuron.E_Leak[np.where(model.structure == 0)[0]] = E_L_terminal\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.rho_Na = model.rho_Na\n neuron.rho_K = model.rho_K\n \n return neuron, model", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def simulate(initstate, t, timestep=forward, drive=donothing, bounds = [0.97, 0.97, 0.97, 0.97], saveinterval=10, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x = dudt, dvdt_x = dvdt, dndt_x = dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # gives surface height array of the system after evert dt\n bounds = np.asarray(bounds, dtype=np.float32)\n h, n, u, v, f, dx, dy, dt = [initstate[k] for k in ('h', 'n', 'u', 'v', 'lat', 'dx', 'dy', 'dt')]\n \n f = np.float32(((2*2*np.pi*np.sin(f*np.pi/180))/(24*3600))[:,np.newaxis])\n \n \n du0 = np.zeros_like(u)\n dv0 = np.zeros_like(v)\n dn0 = np.zeros_like(n)\n \n \n dndt_x(h, n, u, v, dx, dy, dn0)\n dn = (dn0, np.copy(dn0), np.copy(dn0))\n \n dudt_x(h, n, f, u, v, dx, dy, du0)\n du = (du0, np.copy(du0), np.copy(du0), np.copy(du0))\n \n dvdt_x(h, n, f, u, v, dx, dy, dv0)\n dv = (dv0, np.copy(dv0), np.copy(dv0), np.copy(dv0))\n \n nu = (dx+dy)/1000\n \n mmax = np.max(np.abs(n))\n landthresh = 1.5*np.max(n) # threshhold for when sea ends and land begins\n itrs = int(np.ceil(t/dt))\n saveinterval = np.int(saveinterval//dt)\n assert (dt >= 0), 'negative dt!' # dont try if timstep is zero or negative\n \n ntt = np.zeros((np.int(np.ceil(itrs/saveinterval)),)+n.shape, dtype=np.float32)\n maxn = np.zeros(n.shape, dtype=n.dtype) # max height in that area\n \n coastx = np.less(h, landthresh) # where the reflective condition is enforced on the coast\n \n print('simulating...')\n try:\n for itr in range(itrs):# iterate for the given number of iterations\n if itr%saveinterval == 0:\n ntt[np.int(itr/saveinterval),:,:] = n\n print(np.argmax( ntt[np.int(itr/saveinterval),:,:],axis=0)[5])\n \n \n maxn = np.max((n, maxn), axis=0) # record new maxes if they are greater than previous records \n \n # pushes n, u, v one step into the future\n n,u,v, du, dv, dn = timestep(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=beta, eps=eps, gamma=gamma, mu=mu, nu=nu, dudt_x=dudt_x, dvdt_x=dvdt_x, dndt_x=dndt_x, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn)\n\n land(h, n, u, v, coastx) # how to handle land/coast\n border(n, u, v, 15, bounds) \n drive(h, n, u, v, f, dt, dx, dy, nu, coastx, bounds, mu, itr)\n print('simulation complete')\n except Exception as e:\n print('timestep: ', itr)\n raise e\n return ntt, maxn#, minn, timemax # return surface height through time and maximum heights", "def setup(env, NUM_TRACKS, landtime, t_inter):\n # Create the airport\n airport = Airport(env, NUM_TRACKS, landtime)\n\n # Create 4 initial planes\n for i in range(1):\n env.process(plane(env, 'Aviao %d' % i, airport))\n\n # Create more planes while the simulation is running\n while True:\n yield env.timeout(random.randint(t_inter-2, t_inter+2))\n# yield env.timeout(random.expovariate(1.0 / t_inter))\n i += 1\n env.process(plane(env, 'Aviao %d' % i, airport))", "def Inject(self,injector):\n # Inject constant source of wind mass & energy (as pure KE)\n dt = integrator.Integrator().dt\n injector.AddMass(self._massloss*dt)\n injector.AddKE(self._lum*dt)\n integrator.Integrator().CourantLimiter(self._vcourant)", "def __init__(self,\n debug=False,\n urdf_version=None,\n control_time_step=0.005,\n action_repeat=5,\n control_latency=0,\n pd_latency=0,\n on_rack=False,\n motor_kp=1.0,\n motor_kd=0.02,\n render=False,\n num_steps_to_log=2000,\n env_randomizer=None,\n log_path=None,\n signal_type='ik',\n target_position=None,\n backwards=None,\n gait_type='trot',\n terrain_type='plane',\n terrain_id='plane',\n mark='base',\n ):\n self.phase = 0\n\n self._gait_type = gait_type \n # for observation space bounding \n self.max_speed = 1.0\n self.min_speed = 0.5 # change back to 0.2 for OLD TD3 model evaluation\n \n self.min_side_speed = 0.0\n self.max_side_speed = 0.0\n\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des = [self.speed, self.side_speed]\n\n # Initialization variables for periodic reward sum composition\n self.theta_FL = phase_constants.PHASE_VALS[self._gait_type]['front_left']\n self.theta_FR = phase_constants.PHASE_VALS[self._gait_type]['front_right']\n self.theta_RL = phase_constants.PHASE_VALS[self._gait_type]['rear_left']\n self.theta_RR = phase_constants.PHASE_VALS[self._gait_type]['rear_right']\n\n self.min_swing_ratio = 0.6\n self.max_swing_ratio = 0.8\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n super(rexPeriodicRewardEnv,\n self).__init__(urdf_version=urdf_version,\n accurate_motor_model_enabled=True,\n motor_overheat_protection=False,\n motor_kp=motor_kp,\n motor_kd=motor_kd,\n remove_default_joint_damping=False,\n control_latency=control_latency,\n pd_latency=pd_latency,\n on_rack=on_rack,\n render=render,\n num_steps_to_log=num_steps_to_log,\n env_randomizer=env_randomizer,\n log_path=log_path,\n control_time_step=control_time_step,\n action_repeat=action_repeat,\n target_position=target_position,\n signal_type=signal_type,\n backwards=backwards,\n debug=debug,\n terrain_id=terrain_id,\n terrain_type=terrain_type,\n mark=mark,\n ratio=self.ratio,\n forward_reward_cap=5\n )\n\n self.height_des = 0.206 # this is init standing height for rex\n\n self.cycle_complete = 0\n self.cycle_len = 1000 # this is L\n \n # vonmises variables\n self.kappa = phase_constants.VON_MISES_KAPPA\n\n rex_joints = p.getNumJoints(bodyUniqueId=self.rex.quadruped)\n link_name_to_ID = {}\n for i in range(rex_joints):\n name = p.getJointInfo(self.rex.quadruped, i)[12].decode('UTF-8')\n link_name_to_ID[name] = i\n\n self.link_name_to_ID = link_name_to_ID\n self.toe_pos_last = { 'front_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_left_toe_link'])[0],\n 'front_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_right_toe_link'])[0],\n 'rear_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_left_toe_link'])[0],\n 'rear_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_right_toe_link'])[0]\n\n } \n\n print('Using Periodic Reward Composition Rex Environment')", "def main():\n geom = dde.geometry.TimeDomain(0, 3)\n y0 = initial_condition()\n for i in range(0,C)\n # ic3 = dde.IC(geom, lambda X: 27, boundary, component=5)\n data = dde.data.PDE(\n geom,\n SLE_DL,\n [ic1, ic2, ic3, observe_y0, observe_y1, observe_y2],\n num_domain=400,\n num_boundary=2,\n anchors=observe_t,\n )", "def init_servos():\n for i in range(0, 7):\n kit.servo[i].actuation_range = 180\n kit.servo[i].set_pulse_width_range(450, 2550)", "def Inject(self,injector):\r\n\r\n # Calculate the star's current age\r\n integrator = weltgeist.integrator.Integrator()\r\n t = integrator.time\r\n dt = integrator.dt\r\n age = t-self._tbirth\r\n Teff = 0.0\r\n star = self._star\r\n\r\n # If it's been born, start the tracks\r\n if age > 0.0:\r\n if self._wind:\r\n # Get mass loss and energy from winds\r\n ml0 = star.WindMassLoss(age)\r\n lum0 = star.WindLuminosity(age)\r\n massloss = 0.5*(ml0+star.WindMassLoss(age+dt))*dt\r\n energy = 0.5*(lum0+star.WindLuminosity(age+dt))*dt\r\n # Add mass FIRST since KE needs to be added elastically\r\n injector.AddMass(massloss)\r\n # Add energy to grid as kinetic energy\r\n injector.AddKE(energy)\r\n # Add some thermal energy to account for star's temperature\r\n Teff = 0.5*(star.Teff(age)+star.Teff(age+dt))\r\n TE = 1.5 * wunits.kB * massloss/(wunits.mH/wunits.X)*Teff\r\n injector.AddTE(TE)\r\n # Set the Courant condition\r\n vwind = np.sqrt(2.0*lum0/ml0)\r\n integrator.CourantLimiter(vwind)\r\n if self._radiation:\r\n # Get radiation properties for ionising and non-ionising UV\r\n Lionising = 0.5*(star.LIonising(age)+star.LIonising(age+dt))\r\n Lnonionising = 0.5*(star.LNonIonising(age)+star.LNonIonising(age+dt))\r\n Eionising = 0.5*(star.EPhoton(age)+star.EPhoton(age+dt)) \r\n Teff = 0.5*(star.Teff(age)+star.Teff(age+dt)) \r\n Tion = weltgeist.radiation.IonisedGasTemperature(Teff, self._metal)\r\n injector.AddPhotons(Lionising, Lnonionising, Eionising, Tion)", "def _setup_simulation(self\n ) -> None:\n pass", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def set_up_and_parameterise_experiment(self):\n # Update experiment using capacity\n capacity = self._parameter_values[\"Nominal cell capacity [A.h]\"]\n for op_conds in self.experiment.operating_conditions_steps:\n if op_conds.type == \"C-rate\":\n op_conds.type = \"current\"\n op_conds.value = op_conds.value * capacity\n\n # Update terminations\n termination = op_conds.termination\n for term in termination:\n term_type = term[\"type\"]\n if term_type == \"C-rate\":\n # Change type to current\n term[\"type\"] = \"current\"\n # Scale C-rate with capacity to obtain current\n term[\"value\"] = term[\"value\"] * capacity\n\n # Add time to the experiment times\n dt = op_conds.duration\n if dt is None:\n if op_conds.type == \"current\":\n # Current control: max simulation time: 3h / C-rate\n Crate = op_conds.value / capacity\n dt = 3 / abs(Crate) * 3600 # seconds\n else:\n # max simulation time: 1 day\n dt = 24 * 3600 # seconds\n op_conds.duration = dt\n\n # Set up model for experiment\n self.set_up_and_parameterise_model_for_experiment()", "def simulation(self, pvmod=True):\r\n \r\n self.Real.Ppv2ac_out, self.Real.Ppv2bat_in, self.Real.Ppv2bat_in0, self.Real.Pbat2ac_out, self.Real.Pbat2ac_out0, self.Real.Ppvbs, self.Real.Pbat, self.Real.soc, self.Real.soc0 = batmod_dc(\r\n self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.Ppv2bat_in0, self.Real.Ppv2bat_in,\r\n self.Real.Pbat2ac_out0, self.Real.Pbat2ac_out, self.Real.Ppv2ac_out, self.Real.Ppvbs, self.Real.Pbat)\r\n\r\n self.Ideal.Pbat, self.Ideal.soc, self.Ideal.soc0 = batmod_dc_ideal(self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)\r\n\r\n # Define missing parameters\r\n self.Real.Ppv2ac = self.Real.Ppv2ac_out # AC output power of the PV2AC conversion pathway\r\n self.Real.Ppv2bat = self.Real.Ppv2bat_in # DC input power of the PV2BAT conversion pathway\r\n\r\n self.Ideal.Ppvbs = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) - (np.minimum(0, self.Ideal.Pbat)) # Realized AC power of the PV-battery system\r\n self.Ideal.Ppv2ac = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) # AC output power of the PV2AC conversion pathway\r\n self.Ideal.Ppv2bat = np.maximum(0, self.Ideal.Pbat) # DC input power of the PV2BAT conversion pathway\r\n\r\n print()", "def run(self):\n\n # initializing random network activity\n s_rand_T = np.zeros((self.T, self.N_rand))\n p_rand_T = np.zeros((self.T, self.N_rand))\n r_rand_T = np.zeros((self.T, self.N_rand))\n\n s_rand_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_rand))\n\n # initializing sensory networks\n s_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n p_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n r_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n s_sens_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_sensory_nets * self.N_sensory))\n\n # extend input to be T timesteps and only nonzero for 100 ts\n s_ext_T = np.broadcast_to(self.s_ext, (self.T, self.N_sensory * self.N_sensory_nets)).copy()\n # stimulus is presented for 100 ms\n stim_T = int(200/self.rand_net.dt)\n s_ext_T[:100] = 0\n s_ext_T[100+stim_T:] = 0\n # s_ext_T *= 0\n\n for t in range(1, self.T):\n if (t + 1) % 100 == 0:\n print(f'step {t} of {self.T}')\n s_sens_prev = s_sens_T[t - 1]\n s_rand_prev = s_rand_T[t - 1]\n p_rand_prev = p_rand_T[t - 1]\n s_ext = s_ext_T[t - 1]\n step = self.forward(s_ext=s_ext, s_rand_prev=s_rand_prev, s_sens_prev=s_sens_prev, p_rand_prev=p_rand_prev)\n s_sens_T[t] = step['s_sens']\n p_sens_T[t] = step['p_sens']\n r_sens_T[t] = step['r_sens']\n s_rand_T[t] = step['s_rand']\n r_rand_T[t] = step['r_rand']\n p_rand_T[t] = step['p_rand']\n\n p_sens_T = p_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_ext_T = s_ext_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n r_sens_T = r_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_sens_T = s_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n\n return dict(\n n_sensory=self.N_sensory,\n n_rand=self.N_rand,\n mus=self.mus,\n sigma=self.sigma,\n s_ext=s_ext_T,\n s_sens=s_sens_T,\n r_sens=r_sens_T,\n p_sens=p_sens_T,\n s_rand=s_rand_T,\n r_rand=r_rand_T,\n p_rand=p_rand_T\n )", "def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()", "def __init__(self, services, config):\n #pytau.setNode(1)\n #timer = pytau.profileTimer(\"component.__init__\", \"\", str(os.getpid()))\n #pytau.start(timer)\n self.component_id = None\n self.invocation_q = None\n self.services = weakref.proxy(services)\n self.config = config\n self.start_time=0.0\n self.sys_exit = None\n for i in config.keys():\n try:\n setattr(self, i, config[i])\n except Exception, e:\n print 'Error setting Component parameter : ', i, ' - ', e\n #pytau.stop(timer)\n raise\n\n #pytau.stop(timer)", "def __init__(self, n): # this is equivalent to starting a random one\n self.n = n\n # From table S1 in the supplemental materials\n # each c parameters is [body,limb]\n self.cv0 = [0.3, 0.0]\n self.cv1 = [0.2, 0.2]\n self.cR0 = [0.196,0.131]\n self.cR1 = [0.065,0.131]\n #[[dbodylow,dbodyhigh],[dlimblow,dlimbhigh]]\n self.d_params = [[1,5],[1,3]]\n # which oscillators are limb oscillators and which ones are body oscillators is pretty constant\n n_body = n - 4\n self.osc_class = [0 if i < n_body else 1 for i in range(self.n)] # 0 for body oscillator, 1 for limb oscillator\n # list of keys that can be mutated during evolution\n self.evolvables = ['w', 'phi', 'a', 'gsl', 'gsh', 'gb1', 'gb2', 'theta', 'ampl', 'ampl_dot']\n self.scalars = set(['gsl', 'gsh', 'gb1', 'gb2'])\n self.nonzeros = set([int(i) for i in \"8 160 29 181 50 202 71 223 92 244 113 265 134 286 155 307 1 20 22 41 43 62 64 83 85 104 106 125 127 146 169 188 190 209 211 230 232 251 253 272 274 293 295 314 320 321 322 323 364 365 366 367 348 349 350 351 392 393 394 395 338 376 337 356 359 397 379 398\".split(\" \")])\n self.shapes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}\n self.sizes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}", "def set_up_orbit_correctors(ps_beg, delay, id_slice1, ds_slice, zplot, id_slices, U_core, lambdaref):\n SXSS = Chicane(3.2716, 0.362, 0.830399, delay[0])\n HXSS = Chicane(3.2, 0.3636, 0.5828, delay[1])\n\n OC2 = [CORR08, D1_SXSS, SXSS, D2_SXSS, QUAD09, CORR09]\n OC3 = [CORR15, D1_HXSS, HXSS, D2_HXSS, QUAD16, CORR16]\n\n ps_end1 = beam_transportation(ps_beg, U_core[0])\n\n # ps_end1 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the first undulator section.\n\n # The id of the slice on the axis in the second undulator section\n on_axis_id_U2 = int(id_slice1+delay[0]/ds_slice+ (8*110)*lambdaref/ds_slice) # The last part is slippage\n\n print(on_axis_id_U2)\n\n ps_end_slice1 = beam_property_along_s(ps_end1, id_slices)[0:4, :]\n ps_on_axis_2 = np.ravel(ps_end_slice1[:, on_axis_id_U2])\n\n # print(ps_on_axis_2)\n\n OC2_optimized = analyze_orbit_corrector(OC2[0], OC2[-1], OC2[1:-1], ps_on_axis_2)\n print(OC2_optimized)\n CORR08_new = Orbit_Corrector(OC2[0].length, OC2_optimized[0], OC2_optimized[2])\n CORR09_new = Orbit_Corrector(OC2[-1].length, OC2_optimized[1], OC2_optimized[3])\n\n # The whole U2 with optimized orbit correctors\n U2_new = [CORR08_new] + OC2[1:-1] + [CORR09_new] + U_core[1]\n ps_end2 = beam_transportation(ps_end1, U2_new)\n\n # ps_end2 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the second undulator section.\n\n # The id of the slice on the axis in the third undulator section\n on_axis_id_U3 = int(id_slice1+(delay[0]+delay[1])/ds_slice +(14*110*lambdaref)/ds_slice) # The last term is the slipage\n\n print(on_axis_id_U3)\n\n ps_end_slice2 = beam_property_along_s(ps_end2, id_slices)[0:4, :]\n ps_on_axis_3 = np.ravel(ps_end_slice2[ :, on_axis_id_U3])\n\n # print(ps_on_axis_3)\n\n OC3_optimized = analyze_orbit_corrector(OC3[0], OC3[-1], OC3[1:-1], ps_on_axis_3)\n print(OC3_optimized)\n CORR15_new = Orbit_Corrector(OC3[0].length, OC3_optimized[0], OC3_optimized[2])\n CORR16_new = Orbit_Corrector(OC3[-1].length, OC3_optimized[1], OC3_optimized[3])\n\n U3_new = [CORR15_new] + OC3[1:-1] + [CORR16_new] + U_core[2]\n\n Undulator_Beamline = U_core[0]+U2_new+U3_new\n\n return Undulator_Beamline", "def Inject(self,injector):\n dt = integrator.Integrator().dt\n injector.AddPhotons(self._QH*dt)" ]
[ "0.60317343", "0.5933767", "0.59041595", "0.5831756", "0.57520694", "0.57439333", "0.5739036", "0.57341063", "0.5705485", "0.5683423", "0.5623893", "0.5612428", "0.5607956", "0.5603607", "0.5600375", "0.5599384", "0.55948526", "0.55602384", "0.5559706", "0.55517477", "0.55408883", "0.55262274", "0.5508686", "0.5468752", "0.54408324", "0.54368925", "0.54319364", "0.54122734", "0.54115576", "0.5409604" ]
0.611647
0
Wrap a collection to print iteration progress as a percentage.
def progress_iterator(collection: Collection, message: str) -> Iterable: num_items = len(collection) last_percentage = -1 for i, item in enumerate(collection): percentage = 100 * i // num_items if percentage > last_percentage: last_percentage = percentage print(f"{message} {percentage}%", end='\r') yield item print(f"{message} 100%")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def progress(items, desc='', total=None, min_delay=0.1):\n total = total or len(items)\n t_start = time.time()\n t_last = 0\n for n, item in enumerate(items):\n t_now = time.time()\n if t_now - t_last > min_delay:\n print('\\r%s%d/%d (%6.2f%%)' % (desc, n+1, total, n / float(total) * 100), end=' ')\n if n > 0:\n t_done = t_now - t_start\n t_total = t_done / n * total\n print('(ETA: %d:%02d)' % divmod(t_total - t_done, 60), end=' ')\n sys.stdout.flush()\n t_last = t_now\n yield item\n t_total = time.time() - t_start\n print('\\r%s%d/%d (100.00%%) (took %d:%02d)' % ((desc, total, total) + divmod(t_total, 60)))", "def progress(i, my_list, message=\"\"):\n\tmy_progress = (i / len(my_list)) * 100\n\tmy_progress = str(round(my_progress, 1)) + \"% \" + message\n\tsys.stdout.write('\\r')\n\tsys.stdout.write(my_progress)\n\tsys.stdout.flush()", "def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))", "def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)", "def progressBar(iterable, prefix = 'Progress:', suffix = 'Complete', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n total = len(iterable)\n\n #-- Progress Bar Printing Function\n def printProgressBar (iteration):\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {iteration:,} of {total:,} ({100 * (iteration / float(total)):.1f}%) {suffix}', end = printEnd)\n\n #-- Initial Call\n printProgressBar(0)\n\n #-- Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n\n #--- Print New Line on Complete\n print()", "def log_progress(sequence, every=None, size=None, name='Items'):\n from ipywidgets import IntProgress, HTML, VBox\n from IPython.display import display\n\n is_iterator = False\n if size is None:\n try:\n size = len(sequence)\n except TypeError:\n is_iterator = True\n if size is not None:\n if every is None:\n if size <= 200:\n every = 1\n else:\n every = int(size / 200) # every 0.5%\n else:\n assert every is not None, 'sequence is iterator, set every'\n\n if is_iterator:\n progress = IntProgress(min=0, max=1, value=1)\n progress.bar_style = 'info'\n else:\n progress = IntProgress(min=0, max=size, value=0)\n label = HTML()\n box = VBox(children=[label, progress])\n display(box)\n\n index = 0\n try:\n for index, record in enumerate(sequence, 1):\n if index == 1 or index % every == 0:\n if is_iterator:\n label.value = '{name}: {index} / ?'.format(\n name=name,\n index=index\n )\n else:\n progress.value = index\n label.value = '{name}: {index} / {size}'.format(\n name=name,\n index=index,\n size=size\n )\n yield record\n except:\n progress.bar_style = 'danger'\n raise\n else:\n progress.bar_style = 'success'\n progress.value = index\n label.value = \"{name}: {index}\".format(\n name=name,\n index=str(index or '?')\n )", "def progressBar(iterable, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n total = len(iterable)\n # Progress Bar Printing Function\n def printProgressBar (iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()", "def progressBar(iterable, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n total = len(iterable)\n # Progress Bar Printing Function\n def printProgressBar (iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()", "def progress_bar(iterable, prefix='', suffix='', decimals=1, length=50, fill='█', print_end=\"\\r\"):\n total = len(iterable)\n start_time = time()\n\n # Progress Bar Printing Function\n def printProgressBar(iteration):\n delta_time = (time() - start_time) / (iteration + 1)\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = fill * filled_length + '-' * (length - filled_length)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}, {delta_time:.2f} it/s, {iteration = }', end=print_end)\n\n # Initial Call\n printProgressBar(0)\n\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()", "def progressBar(iterable, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n total = len(iterable)\n # Progress Bar Printing Function\n\n def printProgressBar(iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 *\n (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()", "def progressbar(iterator, verbosity, length=None):\n\n if verbosity == logging.INFO:\n if not length:\n length = len(iterator)\n\n with click.progressbar(iterator, length=length) as _iterator:\n yield _iterator\n else:\n yield iterator", "def progress_func(completed, total):\n if not self.log:\n return\n dots = (completed * dot_count) / total\n if dots > dot_count:\n dots = dot_count\n self.progress_lock.acquire()\n if self.dots_written < dot_count:\n dots_to_write = dots - self.dots_written\n self.dots_written = dots\n os.write(old_stdout, '.' * dots_to_write)\n self.progress_lock.release()", "def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n import sys\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s ' %\n (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(barLength * iteration // total)\n bar = fill * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def printProgress(iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#'* filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '*' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '█' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def printProgress(iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n\tfilledLength\t= int(round(barLength * iteration / float(total)))\n\tpercents\t\t= round(100.00 * (iteration / float(total)), decimals)\n\tbar\t\t\t = '#' * filledLength + '-' * (barLength - filledLength)\n\tsys.stdout.write('%s [%s] %s%s %s (%s/%s total)\\r' % (prefix, bar, percents, '%', suffix, iteration, total))\n\tsys.stdout.flush()\n\tif iteration == total:\n\t\tprint(\"\\n\")", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def print_progress(self, info_dict):\n if self.n_print != 0:\n t = info_dict['t']\n if t == 1 or t % self.n_print == 0:\n string = 'Iteration {0}'.format(str(t).rjust(len(str(self.n_iter))))\n string += ' [{0}%]'.format(str(int(t / self.n_iter * 100)).rjust(3))\n print(string)", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")", "def as_percent(self, value):\n new_vec = self.copy()\n new_vec.length = value * self.length\n return new_vec", "def progress(iteritem, update=1, stderr=False, start_newline=True):\n if stderr:\n stream = sys.stderr\n else:\n stream = sys.stdout\n start_time = time.time()\n curr_iter = 0\n if start_newline:\n stream.write('\\n')\n\n max_iter = len(iteritem)\n dlen = len(str(max_iter))\n memory = 0\n for idx, item in enumerate(iteritem):\n\n elapsed = int(time.time() - start_time)\n\n curr_iter += 1\n not_update = elapsed % update\n\n if not not_update and elapsed != memory:\n memory = elapsed\n remain = (max_iter - curr_iter) * (curr_iter / elapsed)\n out = '\\r%*d/%*d | Elapsed: %d sec | Remaining: %d sec '\\\n % (dlen, curr_iter, dlen, max_iter, elapsed, remain)\n stream.write(out)\n stream.flush()\n\n yield item\n\n out = '\\r%*d/%*d | Elapsed: %d sec | Remaining: 0 sec '\\\n % (dlen, curr_iter, dlen, max_iter, elapsed)\n stream.write(out)\n stream.flush()", "def _progress(self, size: float):\n downloaded = 0\n\n def progress(chunk):\n nonlocal downloaded\n downloaded += chunk\n done = int(50 * downloaded / size)\n sys.stdout.write(f'\\r[{\"=\" * done}{\" \" * (50-done)}]')\n sys.stdout.flush()\n\n return progress", "def progress(count, total):\r\n bar_len = 45\r\n filled_len = int(round(bar_len * count / float(total)))\r\n\r\n percents = round(100 * count / float(total), 1)\r\n p_bar = '=' * filled_len + '.' * (bar_len - filled_len)\r\n try:\r\n sys.stdout.write(' File {} of {} [{}] {}{}\\r'.format(count, total, p_bar, percents, '%'))\r\n except:\r\n pass\r\n sys.stdout.flush()", "def print_progress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '*' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def get_progress(count, block_size, total_size) -> None:\r\n percent = int(count * block_size * 100 / total_size)\r\n print(f\"Downloading clip... {percent}%\", end=\"\\r\", flush=True)", "def _wrap_index__in_verbose(iterations):\n m = len(iterations)\n digits = len(str(m))\n progress = '\\r [ {s:{d}} / {m} ] {s:3.0f}% - ? it/s'\n progress = progress.format(m=m, d=digits, s=0)\n stdout.write(progress)\n beginning = time()\n stdout.write(progress)\n for i, it in enumerate(iterations):\n yield it\n sec_left = ((m-i+1) * (time() - beginning)) / (i+1)\n time_left = str(timedelta(seconds=sec_left))[:7]\n progress = '\\r [ {i:{d}} / {m} ]'.format(i=i+1, d=digits, m=m)\n progress += ' {p:3.0f}%'.format(p=100*(i+1)/m)\n progress += ' - {time_left} left '.format(time_left=time_left)\n stdout.write(progress)" ]
[ "0.59742546", "0.59003246", "0.5892191", "0.58499545", "0.5846175", "0.5842051", "0.58191466", "0.58191466", "0.57947624", "0.5790346", "0.57512665", "0.5720828", "0.57070386", "0.57001543", "0.5694676", "0.5692206", "0.56794834", "0.5662845", "0.5627087", "0.561779", "0.5617453", "0.56135255", "0.56135255", "0.5606485", "0.55897343", "0.5576702", "0.55735457", "0.5570572", "0.55548674", "0.5542266" ]
0.78705937
0
Linearly mix two colors. A mix_amount of 0.0 gives color1, and 1.0 gives color2.
def mix_colors(color1: Color, color2: Color, mix_amount: float) -> Color: return [(1-mix_amount)*v1 + mix_amount*v2 for v1, v2 in zip(color1, color2)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mix(a, b, amount):\n return ((1.0 - amount) * a) + (amount * b)", "def mix(self, other, coef=0.5):\n def m(a, b):\n return a * (1 - coef) + b * coef\n\n return Color(from_rgba=(c(m(self.r, other.r)),\n c(m(self.g, other.g)),\n c(m(self.b, other.b)),\n c(m(self.a, other.a))))", "def combine_colors(c1, c2, factor=0.5):\n c3 = QtGui.QColor()\n c3.setRed(int((factor * c1.red() + (1 - factor) * c2.red())))\n c3.setGreen(int((factor * c1.green() + (1 - factor) * c2.green())))\n c3.setBlue(int((factor * c1.blue() + (1 - factor) * c2.blue())))\n return c3", "def combine_colors(c1, c2, factor=0.5):\n c3 = QtGui.QColor()\n c3.setRed(int((factor * c1.red() + (1 - factor) * c2.red())))\n c3.setGreen(int((factor * c1.green() + (1 - factor) * c2.green())))\n c3.setBlue(int((factor * c1.blue() + (1 - factor) * c2.blue())))\n return c3", "def blend(c: float, a: float) -> float:\n return 255 + (c - 255) * a", "def rgb_blend(col1, col2, fraction=0.5):\n return tuple([v1 + (v2-v1)*fraction for (v1, v2) in zip(col1, col2)])", "def mix(\n self,\n color: ColorInput,\n percent: float = util.DEF_MIX,\n *,\n in_place: bool = False,\n **interpolate_args: Any\n ) -> 'Color':\n\n # Mix really needs to be between 0 and 1 or steps will break\n domain = interpolate_args.get('domain')\n if domain is not None:\n interpolate_args['domain'] = interpolate.normalize_domain(domain)\n\n if not self._is_color(color) and not isinstance(color, (str, Mapping)):\n raise TypeError(\"Unexpected type '{}'\".format(type(color)))\n mixed = self.interpolate([self, color], **interpolate_args)(percent)\n return self._hotswap(mixed) if in_place else mixed", "def blend_palette(colors, n_colors=6, as_cmap=False, input=\"rgb\"):\n colors = [_color_to_rgb(color, input) for color in colors]\n name = \"blend\"\n pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n if not as_cmap:\n pal = _ColorPalette(pal(np.linspace(0, 1, n_colors)))\n return pal", "def addColors(*colorMultPairs):\n netRGB = [0, 0, 0]\n for color, mult in colorMultPairs:\n colorRGB = _getTkWdg().winfo_rgb(color)\n netRGB = [netRGB[ii] + (mult * colorRGB[ii]) for ii in range(3)]\n truncRGB = [max(min(int(val), 0xFFFF), 0) for val in netRGB]\n retColor = \"#%04x%04x%04x\" % tuple(truncRGB)\n #print \"mixColors(%r); netRGB=%s; truncRGB=%s; retColor=%r\" % (colorMultPairs, netRGB, truncRGB, retColor)\n return retColor", "def mix_with(self, base_color, factor=1.0):\n base_color = _to_rgb(base_color, \"base_color\")\n operation = _embed44((1 - factor) * np.eye(3))\n operation[:3, 3] = factor * base_color\n\n return self._then(operation)", "def mix(src_color, src_f, dst_color, dst_f):\n src_a = src_color[:, 3] / 255\n dst_a = dst_color[:, 3] / 255\n out_a = src_a * src_f + dst_a * dst_f\n outafilter = out_a > 0\n out_rgb = np.zeros((src_color.shape[0], 3), dtype='u1')\n out_rgb[outafilter] = np.clip(np.round((src_color[outafilter, 0:3] * np.tile(src_a[outafilter].reshape(-1, 1), (1, 3)) * np.tile(src_f[outafilter].reshape(-1, 1), (1, 3)) + dst_color[outafilter, 0:3] * np.tile(dst_a[outafilter].reshape(-1, 1), (1, 3)) * np.tile(dst_f[outafilter].reshape(-1, 1), (1, 3))) / np.tile(out_a[outafilter].reshape(-1, 1), (1, 3))), 0, 255)\n return np.concatenate([out_rgb, np.clip(np.round(out_a * 255), 0, 255).reshape(-1, 1)], axis=1).astype('u1').copy()", "def blend(ch1, ch2):\n if ch1.mode != \"LA\" or ch2.mode != \"LA\":\n raise ValueError(\"Images must be in LA\")\n src = ch2\n dst = ch1\n outa = src.channels[1] + dst.channels[1] * (1 - src.channels[1])\n dst.channels[0] = (src.channels[0] * src.channels[1] +\n dst.channels[0] * dst.channels[1] *\n (1 - src.channels[1])) / outa\n dst.channels[0][outa == 0] = 0\n dst.channels[1] = outa", "def mergedColors(colorA, colorB, factor = 50):\r\n return QColor(\r\n (colorA.red() * factor) / 100 + (colorB.red() * (100 - factor)) / 100, \r\n (colorA.green() * factor) / 100 + (colorB.green() * (100 - factor)) / 100, \r\n (colorA.blue() * factor) / 100 + (colorB.blue() * (100 - factor)) / 100)", "def blend_color(a, b, ratio) -> tuple:\n return (\n int(a[0] + (b[0] - a[0]) * ratio[0]),\n int(a[1] + (b[1] - a[1]) * ratio[1]),\n int(a[2] + (b[2] - a[2]) * ratio[2])\n )", "def blendRGB(r: float, g: float, b: float, a: float):\n return blend(r, a), blend(g, a), blend(b, a)", "def funky_sum(a, b, mix):\n if mix <= 0:\n return a\n elif mix >= 1:\n return b\n else:\n return (1 - mix) * a + mix * b", "def _mix(a, b, c):\n c = _cutoff32(c)\n a = _cutoff32(a-b-c) ^ c >> 13\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 8)\n c = _cutoff32(c-a-b) ^ b >> 13\n a = _cutoff32(a-b-c) ^ c >> 12\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 16)\n c = _cutoff32(c-a-b) ^ b >> 5\n a = _cutoff32(a-b-c) ^ c >> 3\n b = _cutoff32(b-c-a) ^ _cutoff32(a << 10)\n c = _cutoff32(c-a-b) ^ b >> 15\n return a, b, c", "def mix(a, b, c, d, e, f, g, h):\n a ^= (b << 11); d += a; b +=c\n b ^= c >> 2; e += b; c += d\n c ^= (d << 8); f += c; d += e\n d ^= e >> 16; g += d; e += f\n e ^= (f << 10); h += e; f += g\n f ^= g >> 4; a += f; g += h\n g ^= (h << 8); b += g; h += a\n h ^= a >> 9; c +=h; a += b\n return a, b, c, d, e, f, g, h", "def blend(image1, image2, factor):\n assert 0.0 <= factor <= 1.0\n image1 = tf.convert_to_tensor(image1)\n image2 = tf.convert_to_tensor(image2)\n dtype = image1.dtype\n if factor == 0.0:\n return image1\n if factor == 1.0:\n return image2\n\n image1 = tf.cast(image1, tf.float32)\n image2 = tf.cast(image2, tf.float32)\n assert image1.shape == image2.shape\n difference = image2 - image1\n scaled = factor * difference\n temp = image1 + scaled\n flip = 255 if dtype == tf.uint8 else 1.0\n temp = tf.clip_by_value(temp, 0.0, flip)\n return tf.cast(temp, dtype)", "def lighten(self, amount):\n h, light, s = colorsys.rgb_to_hls(self.r, self.g, self.b)\n\n light = light + amount\n\n if light < 0.0:\n light = 0.0\n if light > 1.0:\n light = 1.0\n\n r, g, b = colorsys.hls_to_rgb(h, light, s)\n return Color(from_rgba=(c(r), c(g), c(b), c(self.a)))", "def blend(image1, image2, factor, name=None):\n _check_image_dtype(image1)\n _check_image_dtype(image2)\n assert image1.dtype == image2.dtype, \"image1 type should exactly match type of image2\"\n\n if factor == 0.0:\n return image1\n elif factor == 1.0:\n return image2\n else:\n with tf.name_scope(name or \"blend\"):\n orig_dtype = image2.dtype\n\n image1, image2 = tf.image.convert_image_dtype(image1, tf.float32), tf.image.convert_image_dtype(image2, tf.float32)\n scaled_diff = (image2 - image1) * factor\n\n blended_image = image1 + scaled_diff\n\n blended_image = tf.image.convert_image_dtype(blended_image, orig_dtype, saturate=True)\n return blended_image", "def multiply_color(clip, factor):\n return clip.image_transform(\n lambda frame: np.minimum(255, (factor * frame)).astype(\"uint8\")\n )", "def fade_color(c1, c2, n):\n assert n >= 2\n\n # decompose RGB. ignore alpha if present.\n rgb1 = get_channels(c1)\n rgb2 = get_channels(c2)\n\n # find distances by chanel.\n step_by_channel = (rgb2 - rgb1) / (n - 1)\n\n # build steps.\n scale = [rgb1 + (i * step_by_channel) for i in range(n)]\n scale = [get_hexcode(c) for c in scale]\n\n assert scale[0] == c1\n assert scale[-1] == c2\n\n return scale", "def _alpha_blend_simple(rgb1, alpha1, rgb2, alpha2):\n c_alpha1 = (1.0 - alpha1)\n alpha3 = alpha1 + alpha2 * c_alpha1\n\n numer1 = (rgb1 * alpha1[..., None])\n numer2 = (rgb2 * (alpha2 * c_alpha1)[..., None])\n with np.errstate(invalid='ignore'):\n rgb3 = (numer1 + numer2) / alpha3[..., None]\n rgb3[alpha3 == 0] = 0\n return rgb3, alpha3", "def _lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n c = color\n amount += 0.5\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def _blend(img1, img2, alpha):\n return img1.mul(alpha).add(1 - alpha, img2)", "def interpolate ( hsl1, hsl2, amt ):\n if isinstance( hsl1, Colz ):\n h1 = hsl1.h\n s1 = hsl1.s\n l1 = hsl1.l\n a1 = hsl1.a\n if isinstance( hsl1, list ):\n h1 = hsl1[0]\n s1 = hsl1[1]\n l1 = hsl1[2]\n if len(hsl1) > 3:\n a1 = hsl1[3]\n\n if isinstance( h1, int ):\n h1 = h1 / 360.0\n if isinstance( s1, int ):\n s1 = s1 / 100.0\n if isinstance( l1, int ):\n l1 = l1 / 100.0\n\n if isinstance( hsl2, Colz ):\n h2 = hsl2.h\n s2 = hsl2.s\n l2 = hsl2.l\n a2 = hsl2.a\n if isinstance( hsl2, list ):\n h2 = hsl2[0]\n s2 = hsl2[1]\n l2 = hsl2[2]\n if len(hsl2) > 3:\n a2 = hsl2[3]\n\n if isinstance( h2, int ):\n h2 = h2 / 360.0\n if isinstance( s2, int ):\n s2 = s2 / 100.0\n if isinstance( l2, int ):\n l2 = l2 / 100.0\n\n h3 = Colz.hueLerp( h1, h2, amt )\n s3 = Colz.linearLerp( s1, s2, amt )\n l3 = Colz.linearLerp( l1, l2, amt )\n\n if 'a1' in locals() and 'a2' in locals():\n a3 = Colz.linearLerp( a1, a2, amt )\n else:\n a3 = 1.0\n\n c_result = Colz()\n c_result.setHsla( h3, s3, l3, a3 )\n return c_result", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])" ]
[ "0.6814503", "0.66157776", "0.65710086", "0.65704286", "0.6514247", "0.6078982", "0.60165936", "0.60012364", "0.5994862", "0.5946861", "0.5756687", "0.5738199", "0.57059467", "0.56846344", "0.56739783", "0.5672902", "0.55314547", "0.54851085", "0.5474063", "0.54560703", "0.5430097", "0.54221416", "0.5421654", "0.54205036", "0.539082", "0.53444254", "0.5315575", "0.528264", "0.528264", "0.528264" ]
0.80415565
0
Multiply two vectors elementwise.
def multiply_vectors(vec1: Iterable[float], vec2: Iterable[float]) -> Iterable[float]: return [v1*v2 for v1, v2 in zip(vec1, vec2)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mulVectors(X1,X2):\n _checkSize(X1,X2)\n return sum([ X1[i] * X2[i] for i in range(len(X1))])", "def dot_product(vector1, vector2):\n return [reduce_by_multiplication(pair) for pair in zip(vector1, vector2)]", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def vectorMultiply(v, f):\n return [x * f for x in v]", "def vec_product(vec1: List[int], vec2: List[int]) -> int:\n return sum(map(lambda v1, v2: v1 * v2, vec1, vec2))", "def multiplies(x, y):\n x[:] *= y[:]\n return x", "def vecDot(a, b):\n ret=0.0\n for i in range(len(a)):\n ret+=a[i]*b[i]\n return ret", "def multiply(self, other):\n checkVector(self, other)\n futures = self.client.map(_call_multiply, self.vecDask, other.vecDask, pure=False)\n daskD.wait(futures)\n return self", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def __mul__(self, other):\n return Vec2d(self.v[0] * other, self.v[1] * other)", "def __mul__(self, other):\n if isinstance(other, Vector):\n return self.dot(other)\n else:\n raise TypeError(other)", "def dotProduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def dotproduct(vec1, vec2):\n import operator\n return sum(map(operator.mul, vec1, vec2))", "def dotproduct(v1, v2):\n\treturn sum(imap(operator.mul, v1, v2))", "def dotproduct(vec1, vec2):\n return sum((a*b) for a, b in zip(vec1, vec2))", "def vec_dot(v1,v2):\r\n \r\n return np.dot(v1,v2)", "def scalar_multiply(s: float, v: Vector) -> Vector:\n return [s * v_item for v_item in v]", "def dot(vector1, vector2):\n return sum(a1 * a2 for a1, a2 in zip(vector1, vector2))", "def dotProduct(vectorA, vectorB):\r\n product =0\r\n for i in range(len(vectorA)):\r\n product += eval(vectorA[i])*eval(vectorB[i])\r\n return product", "def __mul__(self, other):\n x = self.x * other\n y = self.y * other\n return vec(x, y)", "def test_multiply_vec(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a * b\n assert c.x == 3\n assert c.y == 8", "def multiply_vector(self, dv, spm):\n product = []\n for a, b in zip(dv, spm):\n product.append(a * b)\n return product", "def pairwise_mult(a, b):\n return [a[i]*b[i] for i in xrange(0, min(len(a), len(b)))]", "def dot_product(first_vector, second_vector):\n first_unpacker = VectorUnpacker(first_vector)\n second_unpacker = VectorUnpacker(second_vector)\n if first_unpacker.unpacked_vector_length != second_unpacker.unpacked_vector_length:\n raise ApplicationError(\"Unpacked vector sizes are unequal\")\n\n # looks better than a 'map' one-liner to me\n value = 0\n for piece in zip(first_unpacker(), second_unpacker()):\n value += piece[0] * piece[1]\n\n return value", "def __mul__(self, other):\n\n newlist = [v for v in self.args]\n for i, v in enumerate(newlist):\n newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1])\n return Vector(newlist)", "def dot_vectors(u, v):\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z" ]
[ "0.8108592", "0.758668", "0.7563538", "0.75536615", "0.7483179", "0.7424614", "0.739436", "0.73315334", "0.73269993", "0.7323699", "0.7294078", "0.72823274", "0.72690797", "0.72641194", "0.72641194", "0.72514796", "0.72514236", "0.724465", "0.72237176", "0.7200534", "0.71415305", "0.70957965", "0.7093935", "0.7091023", "0.708319", "0.7082214", "0.70606995", "0.7050575", "0.7049439", "0.70379937" ]
0.8106799
1
Create a directional light given its direction and color. The dot_clip parameter adjusts the value of the dot product used in the lighting calculation; a lower value compresses the range of brightnesses produced by the light.
def __init__(self, direction: Point3D, color: Color, dot_clip: float = 0.0): self._direction = normalize(*direction) self._color = color self._dot_clip = dot_clip
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def directionalLight(*args, decayRate: int=0, discRadius: Union[float, bool]=0.0, exclusive:\n bool=True, intensity: Union[float, bool]=0.0, name: Union[AnyStr, bool]=\"\",\n position: Union[List[float, float, float], bool]=None, rgb:\n Union[List[float, float, float], bool]=None, rotation: Union[List[float,\n float, float], bool]=None, shadowColor: Union[List[float, float, float],\n bool]=None, shadowDither: Union[float, bool]=0.0, shadowSamples: Union[int,\n bool]=0, softShadow: bool=True, useRayTraceShadows: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[double], Any]:\n pass", "def line_darkening(clip: vs.VideoNode, strength: float = 0.2, **kwargs: Any) -> vs.VideoNode:\n import havsfunc as haf\n\n darken = haf.Toon(clip, strength, **kwargs)\n darken_mask = core.std.Expr(\n [core.std.Convolution(clip, [5, 10, 5, 0, 0, 0, -5, -10, -5], divisor=4, saturate=False),\n core.std.Convolution(clip, [5, 0, -5, 10, 0, -10, 5, 0, -5], divisor=4, saturate=False)],\n ['x y max {neutral} / 0.86 pow {peak} *'\n .format(neutral=1 << (clip.format.bits_per_sample-1), # type: ignore[union-attr]\n peak=(1 << clip.format.bits_per_sample)-1)]) # type: ignore[union-attr]\n return core.std.MaskedMerge(clip, darken, darken_mask)", "def light(color, dist):\n return tuple( float(x*dist*dist) for x in color )", "def offsetColor(self, input_color, amount, clamp=None):\n\t\tif amount == 0: # Do nothing\n\t\t\treturn input_color\n\n\t\telif amount > 0: # Lighten\n\t\t\tif clamp is None:\n\t\t\t\tmin_clamp = 0\n\t\t\telse:\n\t\t\t\tmin_clamp = clamp\n\t\t\tmax_clamp = 255\n\n\t\telif amount < 0: # Darken\n\t\t\tmin_clamp = 0\n\t\t\tif clamp is None:\n\t\t\t\tmax_clamp = 255\n\t\t\telse:\n\t\t\t\tmax_clamp = clamp\n\n\t\tlum = max(min_clamp, min(input_color.lightness()+amount, max_clamp))\n\t\treturn QtGui.QColor(lum, lum, lum)", "def compute_shaded_color(self, normal: Point3D, material_color: Color) -> Color:\n dot_product = sum(multiply_vectors(self._direction, normal))\n light_amount = max(dot_product, self._dot_clip)\n light_amount = (light_amount - self._dot_clip) / (1.0 - self._dot_clip)\n return [vm*vl*light_amount for vm, vl in zip(material_color, self._color)]", "def dot_with_light_vector(val):\n return val.dot(Vector([0, 0, 1]))", "def _create_example_light():\n return Light({\"warning\": False, \"off\": True})", "def light_source_directions():\n L = np.array([[-0.06059872, -0.44839055, 0.8917812],\n [-0.05939919, -0.33739538, 0.93948714],\n [-0.05710194, -0.21230722, 0.97553319],\n [-0.05360061, -0.07800089, 0.99551134],\n [-0.04919816, 0.05869781, 0.99706274],\n [-0.04399823, 0.19019233, 0.98076044],\n [-0.03839991, 0.31049925, 0.9497977],\n [-0.03280081, 0.41611025, 0.90872238],\n [-0.18449839, -0.43989616, 0.87889232],\n [-0.18870114, -0.32950199, 0.92510557],\n [-0.1901994, -0.20549935, 0.95999698],\n [-0.18849605, -0.07269848, 0.97937948],\n [-0.18329657, 0.06229884, 0.98108166],\n [-0.17500445, 0.19220488, 0.96562453],\n [-0.16449474, 0.31129005, 0.93597008],\n [-0.15270716, 0.4160195, 0.89644202],\n [-0.30139786, -0.42509698, 0.85349393],\n [-0.31020115, -0.31660118, 0.89640333],\n [-0.31489186, -0.19549495, 0.92877599],\n [-0.31450962, -0.06640203, 0.94692897],\n [-0.30880699, 0.06470146, 0.94892147],\n [-0.2981084, 0.19100538, 0.93522635],\n [-0.28359251, 0.30729189, 0.90837601],\n [-0.26670649, 0.41020998, 0.87212122],\n [-0.40709586, -0.40559588, 0.81839168],\n [-0.41919869, -0.29999906, 0.85689732],\n [-0.42618633, -0.18329412, 0.88587159],\n [-0.42691512, -0.05950211, 0.90233197],\n [-0.42090385, 0.0659006, 0.90470827],\n [-0.40860354, 0.18720162, 0.89330773],\n [-0.39141794, 0.29941372, 0.87013988],\n [-0.3707838, 0.39958255, 0.83836338],\n [-0.499596, -0.38319693, 0.77689378],\n [-0.51360334, -0.28130183, 0.81060526],\n [-0.52190667, -0.16990217, 0.83591069],\n [-0.52326874, -0.05249686, 0.85054918],\n [-0.51720021, 0.06620003, 0.85330035],\n [-0.50428312, 0.18139393, 0.84427174],\n [-0.48561334, 0.28870793, 0.82512267],\n [-0.46289771, 0.38549809, 0.79819605],\n [-0.57853599, -0.35932235, 0.73224555],\n [-0.59329349, -0.26189713, 0.76119165],\n [-0.60202327, -0.15630604, 0.78303027],\n [-0.6037003, -0.04570002, 0.7959004],\n [-0.59781529, 0.06590169, 0.79892043],\n [-0.58486953, 0.17439091, 0.79215873],\n [-0.56588359, 0.27639198, 0.77677747],\n [-0.54241965, 0.36921337, 0.75462733],\n [0.05220076, -0.43870637, 0.89711304],\n [0.05199786, -0.33138635, 0.9420612],\n [0.05109826, -0.20999284, 0.97636672],\n [0.04919919, -0.07869871, 0.99568366],\n [0.04640163, 0.05630197, 0.99733494],\n [0.04279892, 0.18779527, 0.98127529],\n [0.03870043, 0.30950341, 0.95011048],\n [0.03440055, 0.41730662, 0.90811441],\n [0.17290651, -0.43181626, 0.88523333],\n [0.17839998, -0.32509996, 0.92869988],\n [0.18160174, -0.20480196, 0.96180921],\n [0.18200745, -0.07490306, 0.98044012],\n [0.17919505, 0.05849838, 0.98207285],\n [0.17329685, 0.18839658, 0.96668244],\n [0.1649036, 0.30880674, 0.93672045],\n [0.1549931, 0.41578148, 0.89616009],\n [0.28720483, -0.41910705, 0.8613145],\n [0.29740177, -0.31410186, 0.90160535],\n [0.30420604, -0.1965039, 0.9321185],\n [0.30640529, -0.07010121, 0.94931639],\n [0.30361153, 0.05950226, 0.95093613],\n [0.29588748, 0.18589214, 0.93696036],\n [0.28409783, 0.30349768, 0.90949304],\n [0.26939905, 0.40849857, 0.87209694],\n [0.39120402, -0.40190413, 0.8279085],\n [0.40481085, -0.29960803, 0.86392315],\n [0.41411685, -0.18590756, 0.89103626],\n [0.41769724, -0.06449957, 0.906294],\n [0.41498764, 0.05959822, 0.90787296],\n [0.40607977, 0.18089099, 0.89575537],\n [0.39179226, 0.29439419, 0.87168279],\n [0.37379609, 0.39649585, 0.83849122],\n [0.48278794, -0.38169046, 0.78818031],\n [0.49848546, -0.28279175, 0.8194761],\n [0.50918069, -0.1740934, 0.84286803],\n [0.51360856, -0.05870098, 0.85601427],\n [0.51097962, 0.05899765, 0.8575658],\n [0.50151639, 0.17420569, 0.84742769],\n [0.48600297, 0.28260173, 0.82700506],\n [0.46600106, 0.38110087, 0.79850181],\n [0.56150442, -0.35990283, 0.74510586],\n [0.57807114, -0.26498677, 0.77176147],\n [0.58933134, -0.1617086, 0.7915421],\n [0.59407609, -0.05289787, 0.80266769],\n [0.59157958, 0.057798, 0.80417224],\n [0.58198189, 0.16649482, 0.79597523],\n [0.56620006, 0.26940003, 0.77900008],\n [0.54551481, 0.36380988, 0.7550205]], dtype=float)\n return L", "def blend(self, color, alpha):\n return Color(rgb=lerp(self.rgb, color.rgb, alpha))", "def createLight(type, pos, centroid):\n light = cmds.shadingNode('areaLight', asLight=True)\n lookThruAndFrame(light)\n cmds.xform(light, ws=True, piv=centroid)\n if pos == 'key':\n cmds.setAttr(light+'.rotateY', -45)\n cmds.setAttr(light+'.rotateZ', -45)\n elif pos == 'fill':\n cmds.setAttr(light+'.rotateY', 45)\n cmds.setAttr(light+'.rotateZ', 20)\n cmds.setAttr(light+'.intensity', 0.5)\n elif pos == 'rim':\n cmds.setAttr(light+'.rotateY', -135)\n cmds.setAttr(light+'.rotateZ', -45)\n cmds.setAttr(light+'.intensity', 0.7)\n cmds.xform(light, ws=True, cp=True)\n return light", "def led(color: int, /) -> None:", "def adjust_lightness(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])", "def clip_gradient(model, clip_norm):\r\n totalnorm = 0\r\n for p in model.parameters():\r\n if p.requires_grad:\r\n modulenorm = p.grad.data.norm()\r\n totalnorm += modulenorm ** 2\r\n totalnorm = np.sqrt(totalnorm)\r\n\r\n norm = clip_norm / max(totalnorm, clip_norm)\r\n for p in model.parameters():\r\n if p.requires_grad:\r\n p.grad.mul_(norm)", "def clip_gradient(model, clip_norm):\n totalnorm = 0\n for p in model.parameters():\n if p.requires_grad:\n modulenorm = p.grad.data.norm()\n totalnorm += modulenorm ** 2\n totalnorm = np.sqrt(totalnorm)\n\n norm = clip_norm / max(totalnorm, clip_norm)\n for p in model.parameters():\n if p.requires_grad:\n p.grad.mul_(norm)", "def calculateLighting(x,y,z, xnormal, ynormal, znormal):\n dummy = 0\n clr = dislin.getlit(x,y,z,xn,yn,zn,dummy)", "def clip_gradient(model, clip):\n totalnorm = 0\n for p in model.parameters():\n modulenorm = p.grad.data.norm()\n totalnorm += modulenorm ** 2\n totalnorm = math.sqrt(totalnorm)\n return min(1, clip / (totalnorm + 1e-6))", "def build_light(self, item):\n\n # Validete NMS object.\n if \"ObjectID\" not in item:\n return\n\n # Get object id from item.\n object_id = item[\"ObjectID\"]\n # Find light data\n if object_id not in self.lights_dictionary:\n return\n\n # Build Lights\n light_information = self.lights_dictionary[object_id]\n for idx, light_values in enumerate(light_information.values()):\n # Get Light Properties.\n light_type = light_values[\"type\"]\n light_location = light_values[\"location\"]\n\n # Create light.\n light = bpy.ops.object.light_add(\n type=light_type.upper(),\n location=light_location\n )\n light = bpy.context.object\n light[\"NMS_LIGHT\"] = True\n light.name = \"{0}_light{1}\".format(item.name, idx)\n data_copy = deepcopy(light_values)\n\n # Remove invalid blender properties.\n data_copy.pop(\"type\")\n data_copy.pop(\"location\")\n\n # Apply all other properties to blender object.\n for key, value in data_copy.items():\n if isinstance(value, list):\n value = mathutils.Vector(tuple(value))\n setattr(light.data, key, value)\n\n # Parent to object.\n utils.parent(light, item)\n\n # Disable Selection.\n light.hide_viewport = True\n light.hide_select = True", "def clip_gradient(model, clip_norm):\n totalnorm = 0\n for p in model.parameters():\n if p.requires_grad:\n modulenorm = p.grad.data.norm()\n totalnorm += modulenorm ** 2\n totalnorm = torch.sqrt(totalnorm).item()\n norm = (clip_norm / max(totalnorm, clip_norm))\n for p in model.parameters():\n if p.requires_grad:\n p.grad.mul_(norm)", "def lighter(clr, f=1/3):\n gaps = [f*(1 - val) for val in clr]\n new_clr = [val + gap for gap, val in zip(gaps, clr)]\n return new_clr", "def lightness(\n self,\n ax=None,\n figsize=None,\n multiplier=None,\n filter_field=None,\n lightness_field=None,\n clim=None,\n colorwheel=True,\n colorwheel_xlabel=None,\n colorwheel_ylabel=None,\n colorwheel_args=None,\n filename=None,\n **kwargs,\n ):\n if self.field.nvdim == 2:\n if lightness_field is None:\n lightness_field = self.field.norm\n if filter_field is None:\n filter_field = self.field._valid_as_field\n x = self.field._r_dim_mapping[self.field.mesh.region.dims[0]]\n y = self.field._r_dim_mapping[self.field.mesh.region.dims[1]]\n return plot_util.inplane_angle(self.field, x, y).mpl.lightness(\n ax=ax,\n figsize=figsize,\n multiplier=multiplier,\n filter_field=filter_field,\n lightness_field=lightness_field,\n clim=clim,\n colorwheel=colorwheel,\n colorwheel_xlabel=colorwheel_xlabel,\n colorwheel_ylabel=colorwheel_ylabel,\n colorwheel_args=colorwheel_args,\n filename=filename,\n **kwargs,\n )\n elif self.field.nvdim == 3:\n if lightness_field is None:\n # find vector components pointing along the two axes 0 and 1\n vdims = [\n self.field._r_dim_mapping[self.field.mesh.region.dims[0]],\n self.field._r_dim_mapping[self.field.mesh.region.dims[1]],\n ]\n # find the third vector component for lightness\n lightness_vdim = (set(self.field.vdims) - set(vdims)).pop()\n lightness_field = getattr(self.field, lightness_vdim)\n if filter_field is None:\n filter_field = self.field._valid_as_field\n x = self.field._r_dim_mapping[self.field.mesh.region.dims[0]]\n y = self.field._r_dim_mapping[self.field.mesh.region.dims[1]]\n return plot_util.inplane_angle(self.field, x, y).mpl.lightness(\n ax=ax,\n figsize=figsize,\n multiplier=multiplier,\n filter_field=filter_field,\n lightness_field=lightness_field,\n clim=clim,\n colorwheel=colorwheel,\n colorwheel_xlabel=colorwheel_xlabel,\n colorwheel_ylabel=colorwheel_ylabel,\n colorwheel_args=colorwheel_args,\n filename=filename,\n **kwargs,\n )\n\n ax = self._setup_axes(ax, figsize)\n\n if filter_field is None:\n filter_field = self.field._valid_as_field\n\n multiplier = self._setup_multiplier(multiplier)\n extent = self._extent(multiplier)\n\n if lightness_field is None:\n lightness_field = self.field.norm\n elif lightness_field.nvdim != 1:\n raise ValueError(f\"Cannot use {lightness_field.nvdim=} lightness_field.\")\n elif lightness_field.mesh.region.ndim != 2:\n raise ValueError(\n \"'lightness_field' must be defined on a 2d mesh, not\"\n f\" {lightness_field.mesh.region.ndim=}.\"\n )\n\n values = self.field.array.copy().reshape(self.field.mesh.n)\n\n if not np.array_equal(lightness_field.mesh.n, self.field.mesh.n):\n lightness_field = lightness_field.resample(self.field.mesh.n)\n lightness = lightness_field.array.reshape(self.field.mesh.n)\n\n rgb = plot_util.hls2rgb(\n hue=values, lightness=lightness, saturation=None, lightness_clim=clim\n ).squeeze()\n self._filter_values(filter_field, rgb)\n\n # alpha channel to hide points with nan values (filter field)\n # all three rgb values are set to nan\n rgba = np.empty((*rgb.shape[:-1], 4))\n rgba[..., :3] = rgb\n rgba[..., 3] = 1.0\n rgba[..., 3][np.isnan(rgb[..., 0])] = 0\n\n kwargs[\"cmap\"] = \"hsv\" # only hsv cmap allowed\n ax.imshow(\n np.transpose(rgba, (1, 0, 2)), origin=\"lower\", extent=extent, **kwargs\n )\n\n if colorwheel:\n if colorwheel_args is None:\n colorwheel_args = {}\n cw_ax = add_colorwheel(ax, **colorwheel_args)\n if colorwheel_xlabel is not None:\n cw_ax.arrow(100, 100, 60, 0, width=5, fc=\"w\", ec=\"w\")\n cw_ax.annotate(colorwheel_xlabel, (115, 140), c=\"w\")\n if colorwheel_ylabel is not None:\n cw_ax.arrow(100, 100, 0, -60, width=5, fc=\"w\", ec=\"w\")\n cw_ax.annotate(colorwheel_ylabel, (40, 80), c=\"w\")\n\n self._axis_labels(ax, multiplier)\n\n self._savefig(filename)", "def led(color: Tuple[int, int, int], /) -> None:", "def add_lego_colors(df, color_df):\n # Can't use uint8 for variance, numbers become too large. \n df[['R', 'G', 'B']] = df[['R', 'G', 'B']].astype('int')\n\n # Determine which lego color is closest (Euclidean distance) to the image color.\n cmask = ((color_df.c_Palette2016==True) & (color_df.c_Transparent==False) \n & (color_df.c_Glow==False) & (color_df.c_Metallic==False))\n for index, row in color_df[cmask].iterrows():\n if index == color_df.index[0]:\n df['cvar_min'] = (df.R-row.R)**2 + (df.G-row.G)**2 + (df.B-row.B)**2\n df['R_lego'] = row.R\n df['G_lego'] = row.G\n df['B_lego'] = row.B\n df['color'] = row.Color\n else:\n df['cvar'] = (df.R-row.R)**2 + (df.G-row.G)**2 + (df.B-row.B)**2\n mask = df.cvar < df.cvar_min\n df.loc[mask, 'cvar_min'] = df.loc[mask, 'cvar']\n df.loc[mask, 'R_lego'] = row.R\n df.loc[mask, 'G_lego'] = row.G\n df.loc[mask, 'B_lego'] = row.B\n df.loc[mask, 'color'] = row.Color\n\n # Drop helper columns we no longer need\n df.drop(columns=['cvar', 'cvar_min'], inplace=True)\n return df", "def _lighten_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n c = color\n amount += 0.5\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])", "def lightness(color):\n\n strongest = max(color.red, color.green, color.blue)\n weakest = min(color.red, color.green, color.blue)\n return 0.5 * (strongest + weakest) / 255", "def gradient_fill(x, y, fill_color=None, ax=None, direction=1, **kwargs):\n\n line, = ax.plot(x, y, **kwargs)\n if fill_color is None:\n fill_color = line.get_color()\n\n # print fill_color\n zorder = line.get_zorder()\n alpha = line.get_alpha()\n alpha = 1.0 if alpha is None else alpha\n\n z = np.empty((100, 1, 4), dtype=float)\n rgb = mcolors.colorConverter.to_rgb(fill_color)\n z[:, :, :3] = rgb\n if direction == 1:\n z[:, :, -1] = np.linspace(0, alpha, 100)[:, None]\n else:\n z[:, :, -1] = np.linspace(alpha, 0, 100)[:, None]\n\n xmin, xmax, ymin, ymax = x.min(), x.max(), y.min(), y.max()\n im = ax.imshow(z, aspect='auto', extent=[xmin, xmax, ymin, ymax],\n origin='lower', zorder=zorder)\n\n xy = np.column_stack([x, y])\n if direction == 1:\n xy = np.vstack([[xmin, ymin], xy, [xmax, ymin], [xmin, ymin]])\n else:\n xy = np.vstack([[xmin, ymax], xy, [xmax, ymax], [xmin, ymax]])\n clip_path = Polygon(xy, lw=0.0, facecolor='none',\n edgecolor='none', closed=True)\n ax.add_patch(clip_path)\n im.set_clip_path(clip_path)\n\n ax.autoscale(True)\n\n return line, im", "def light_palette(color, n_colors=6, reverse=False, as_cmap=False,\n input=\"rgb\"):\n color = _color_to_rgb(color, input)\n light = set_hls_values(color, l=.95)\n colors = [color, light] if reverse else [light, color]\n return blend_palette(colors, n_colors, as_cmap)", "def add_light(self, location, time_start, duration):\n min_state = self.location_to_state(location, 0)\n max_state = self.location_to_state(location, self.max_time - 1)\n\n # times the red light is on\n # if time_start = 2, duration = 3, max_time = 10\n # _ _ - - - _ _ _ - -, where '_' is green and '-' red\n for i in range(time_start, self.max_time, duration * 2):\n for redd in range(i, i + duration):\n if min_state + redd <= max_state:\n self.rewards[min_state + redd] = [1, 0, 0, 0, 0]", "def LightContrastColour(c):\r\n\r\n amount = 120\r\n\r\n # if the colour is especially dark, then\r\n # make the contrast even lighter\r\n if c.Red() < 128 and c.Green() < 128 and c.Blue() < 128:\r\n amount = 160\r\n\r\n return StepColour(c, amount)", "def dark(r, d):\n return d * 1.0 / (r + d) + d * r * 1.0 / ((r + d) ** 2)", "def gradualShadeV(img, brightness, direction=0, min_b=0.35, max_b=1.5):\n h, _, _ = img.shape\n img2 = np.float32(img)\n\n half = brightness / 2.0\n alpha = max(min_b, 1 - half)\n beta = min(max_b, alpha + brightness)\n delta = (beta - alpha) / float(h)\n\n t = alpha\n if direction % 2 == 0:\n t = beta\n delta = -1 * delta\n\n for j in range(h):\n t += delta\n img2[j, :, :] = img2[j, :, :] * t\n\n img2 = np.uint8(img2.clip(0, 255))\n return img2" ]
[ "0.5512884", "0.5359985", "0.5224884", "0.51298463", "0.49652985", "0.49155885", "0.49088266", "0.48779944", "0.4798884", "0.47342348", "0.47193816", "0.47108945", "0.46938923", "0.46933818", "0.46920228", "0.4672632", "0.46464553", "0.46086583", "0.46074972", "0.4583746", "0.45804772", "0.4569888", "0.45696533", "0.45696026", "0.45539036", "0.45534062", "0.4550621", "0.45063826", "0.45027778", "0.44717348" ]
0.63035196
0
Return the maximum color value that this light can produce.
def get_max_brightness(self) -> float: return max(self._color)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color_max(image, color):\n boundaries = find_color_boundaries(image, color)\n if boundaries:\n return (0, image[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1])\n else:\n return 1, None", "def maximal_color(graph, node):\n return max(get_node_colors(graph, node))", "def last_color(self):\n idx = self._color_indexes.get(self._plotid)\n if idx is not None:\n return COLOR_CYCLE[(idx-1) % len(COLOR_CYCLE)]\n return COLOR_CYCLE[0]", "def _get_color(self, c, x, max_num):\n\n ratio = 5*(float(x)/max_num)\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio -= i\n r = (1 - ratio) * self._colors[i][c] + ratio*self._colors[j][c]\n return int(255*r)", "def getMaxAbundanceOfClrSample(self):\n #try: maximum = max(self.clr_sample['abundance'])+0.001\n try: maximum = max(self.clr_sample)+0.01\n except: maximum = 0\n return maximum", "def _find_max_gradient_id(self) -> int:\n\n mask = self.alpha < self.b\n mask_ids = np.where(mask)[0]\n i = mask_ids[np.argmax(self.gradient[mask])]\n\n return i", "def rgb_maximum(colors_tuple):\n r_sorted_tuple = sorted(colors_tuple, key=lambda x:x[1][0])\n g_sorted_tuple = sorted(colors_tuple, key=lambda x:x[1][1])\n b_sorted_tuple = sorted(colors_tuple, key=lambda x:x[1][2])\n\n r_min = r_sorted_tuple[0][1][0]\n g_min = g_sorted_tuple[0][1][1]\n b_min = b_sorted_tuple[0][1][2]\n\n r_max = r_sorted_tuple[len(colors_tuple)-1][1][0]\n g_max = g_sorted_tuple[len(colors_tuple)-1][1][1]\n b_max = b_sorted_tuple[len(colors_tuple)-1][1][2]\n\n return {\n \"r_max\":r_max,\n \"r_min\":r_min,\n \"g_max\":g_max,\n \"g_min\":g_min,\n \"b_max\":b_max,\n \"b_min\":b_min,\n \"r_dvalue\":(r_max-r_min)/3,\n \"g_dvalue\":(g_max-g_min)/3,\n \"b_dvalue\":(b_max-b_min)/3\n }", "def state_max(self) -> float:\n raise NotImplementedError", "def cmax(self):\n return self['cmax']", "def native_max_value(self) -> float:\n return TEMP_MAXIMUM", "def cmax(self):\n return self[\"cmax\"]", "def max_well(self):\n maxVal = np.max(self.get_well_depth_image())\n return maxVal", "def z_max(self):\n return self.get_max_value(self.Z_INDEX)", "def get_max_depth_val():\n data = SUNRGBDTrainDataset(True)\n return max([data[0][i][-1].flatten().item() for i in range(len(data))])", "def _get_color_brightness(self, color):\n d0, _, _ = self._get_color_dominance_indices(color)\n return color[d0]/MAX", "def luminance(self):\n \n return (self.r + self.g + self.b) // 3", "def get_color(in_val, min_val=0, max_val=100):\n width = max_val - min_val\n unit = width / len(continuum)\n return continuum[min(int(in_val / unit), 19)]", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def get_color(self):\n return self.color", "def get_color(self):\r\n return self.__color", "def get_color(self):\r\n return self._color", "def curvature_max(self):\n return 1.0 / self.radius_min", "def native_max_value(self) -> float:\n return 9", "def get_rmax(self):\n return self.rmax", "def getcolors(self, maxcolors=256):\r\n\r\n if self._mode in (\"1\", \"L\", \"P\"):\r\n h = self._instance.histogram()\r\n out = []\r\n for i in range(256):\r\n if h[i]:\r\n out.append((h[i], i))\r\n if len(out) > maxcolors:\r\n return None\r\n return out\r\n uni, counts = self._getcolors()\r\n if c>maxcolors: return None\r\n colors = []\r\n for l in range(len(counts)):\r\n colors.append((counts[l], l))\r\n return colors", "def get_color(self):\n\n return self.color", "def lightness(self):\n min_component = min(self.red, self.green, self.blue)\n max_component = max(self.red, self.green, self.blue)\n avg = (max_component + min_component) / 2\n light = avg / 255\n return light", "def _maximum(self) -> float:\n return self._config[CONF_MAX]", "def max_value(self) -> int:\n return self.__max_value", "def get_color(self):\n return self._color" ]
[ "0.7567657", "0.7444326", "0.69971585", "0.68010145", "0.67860895", "0.66241765", "0.6564625", "0.6542855", "0.65393096", "0.65304965", "0.6511511", "0.6487539", "0.64411324", "0.6435717", "0.64141905", "0.6350198", "0.6333665", "0.63173854", "0.62954146", "0.6288576", "0.6258697", "0.6249966", "0.62456995", "0.6241918", "0.62343043", "0.6232674", "0.6222261", "0.62167525", "0.620402", "0.6200973" ]
0.8303178
0
Return the color contributed by this light on a surface given its (unit) normal vector and material color.
def compute_shaded_color(self, normal: Point3D, material_color: Color) -> Color: dot_product = sum(multiply_vectors(self._direction, normal)) light_amount = max(dot_product, self._dot_clip) light_amount = (light_amount - self._dot_clip) / (1.0 - self._dot_clip) return [vm*vl*light_amount for vm, vl in zip(material_color, self._color)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_shaded_color(self, p1: Point3D, p2: Point3D, p3: Point3D, material_color: Color) -> Color:\n # compute the normal vector\n ax, ay, az = p1[0]-p2[0], p1[1]-p2[1], p1[2]-p2[2]\n bx, by, bz = p1[0]-p3[0], p1[1]-p3[1], p1[2]-p3[2]\n nx = ay*bz - az*by\n ny = az*bx - ax*bz\n nz = ax*by - ay*bx\n normal = normalize(nx, ny, nz)\n \n # compute the color of the lights on the surface\n cr, cg, cb = 0, 0, 0\n for light in self._lights:\n lr, lg, lb = light.compute_shaded_color(normal, material_color)\n cr += lr\n cg += lg\n cb += lb\n \n # clip the color values at 1.0 and return\n max_v = max(cr, cg, cb)\n if max_v > 1.0:\n return min(cr,1.0), min(cg,1.0), min(cb,1.0)\n return cr, cg, cb", "def mesh_color(self):\n return self._mesh_color", "def _calcColor(self, colorTuple):\n return milight.color_from_rgb(*colorTuple)", "def specular_light(self):\n return self._specular_light", "def getColor(self):\r\n return self.color", "def getColor(self):\n return self.color", "def get_color(self):\r\n return self.__color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def get_color(self):\n return self.color", "def get_color(self):\r\n return self._color", "def calculateLighting(x,y,z, xnormal, ynormal, znormal):\n dummy = 0\n clr = dislin.getlit(x,y,z,xn,yn,zn,dummy)", "def get_color(self):\n return self._color", "def get_color(self):\n return self._color", "def getColor(self):\n return self._l[2]", "def __lightness(self, color):\n hsv = color.toHsv()\n return hsv.valueF()", "def light_color(self):\n return self._spots[constants.CROSSING_LOCATION - 1].light_color()", "def get_color(self):\n\n return self.color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def getColor(self):\n return self.side_color", "def color(self):\n return self['color']", "def color(self):\n return self.__color", "def lightness(self):\n min_component = min(self.red, self.green, self.blue)\n max_component = max(self.red, self.green, self.blue)\n avg = (max_component + min_component) / 2\n light = avg / 255\n return light", "def color(self):\n return self._color", "def color(self):\n return self._color", "def get_color(self):\n\n return self._color" ]
[ "0.66830057", "0.64685374", "0.63342154", "0.6113619", "0.61106956", "0.60924566", "0.6054413", "0.6053781", "0.6053781", "0.6053781", "0.60509187", "0.6034845", "0.59819704", "0.59732807", "0.59732807", "0.5946499", "0.5937387", "0.59219927", "0.5917848", "0.5914473", "0.5914473", "0.5914473", "0.5914473", "0.58607775", "0.58581346", "0.5844932", "0.58181405", "0.5796599", "0.5796599", "0.57897234" ]
0.7437899
0
Project a point in 3D world space into 2D screen space.
def project_point(self, point: Point3D) -> Point3D: x, y, z = point cam_x, cam_y, cam_z = self._pos x -= cam_x y -= cam_y z -= cam_z dx = self._cy*(self._sz*y + self._cz*x) - self._sy*z dy = self._sx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) + self._cx*(self._cz*y - self._sz*x) dz = self._cx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) - self._sx*(self._cz*y - self._sz*x) return self._scale * dx/dz, self._scale * dy/dz, dz
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _world_point(self, point_3d):\n return self.obj.matrix_world @ point_3d", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "def world_to_screen(self, x, y):\n return x-self.x, self.h-(y-self.y)", "def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]", "def project_point(self, point: array_like) -> Point:\n # Vector from the point in space to the point on the plane.\n vector_to_plane = Vector.from_points(point, self.point)\n\n # Perpendicular vector from the point in space to the plane.\n vector_projected = self.normal.project_vector(vector_to_plane)\n\n return Point(point) + vector_projected", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def project(self, win_width, win_height, fov, viewer_distance):\n\t\tfactor = fov / (viewer_distance + self.z)\n\t\tx = self.x * factor + win_width / 2\n\t\ty = -self.y * factor + win_height / 2\n\t\treturn Point3D(x, y, 1)", "def ConvertScreenToWorld(self, x, y):\r\n return b2.b2Vec2((x + self.viewOffset.x) / self.viewZoom,\r\n ((self.screenSize.y - y + self.viewOffset.y)\r\n / self.viewZoom))", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def projectPoint(self, point):\n vector = self.normal_vector\n angle = vector.angle\n line = Line(point, angle, correct=False)\n projection = self.crossLine(line)\n return projection", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def project( self, vector3 ):\n self._coords[:3] = vector3._coords[:3] * ( self.dot(vector3)\n / vector3.magnitude**2 )\n \n return self", "def project_point_along_2Dvector(): \n \n # 2d vector \n a = vec2( 1 , 1 )\n b = vec2( -1 , -1 )\n com = vec2() \n\n #fb = pixel_op() \n #fb.create_buffer(800, 800)\n #fb.graticule(pixels_per_unit)\n\n vecs = [a,b]\n pts = [com.project_pt(a, b, 2)]\n\n bloody_simple_2drender('2d_render.png', vecs=vecs, pts=pts, gridsize=40)", "def project_points(self, points_3d, camera):\n batch_size = points_3d.shape[0]\n device = points_3d.device\n cam_t = torch.stack([camera[:, 1], camera[:, 2], 2 * self.focal_length / (self.img_res * camera[:, 0] + 1e-09)], dim=-1)\n camera_center = camera.new_zeros([batch_size, 2])\n rot_t = torch.eye(3, device=device, dtype=points_3d.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n joints_2d = perspective_projection(points_3d, rotation=rot_t, translation=cam_t, focal_length=self.focal_length, camera_center=camera_center)\n return joints_2d", "def to_world(self, x, y, **kwargs):", "def objectCenter(*args, gl: bool=True, local: bool=True, x: bool=True, y: bool=True, z:\n bool=True, **kwargs)->List[float]:\n pass", "def projection(self, point):\n projected_point = self._iterate_over_factors(\"projection\", {\"point\": point})\n return projected_point", "def camera_2_world(self, o, d):\r\n wo = self.camera2world_point @ ti.Vector([o.x, o.y, o.z, 1.0])\r\n wd = self.camera2world_vec @ d\r\n return ti.Vector([wo.x,wo.y,wo.z]), wd", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def position3d(self) -> Point3:\n return Point3.from_proto(self.proto.pos)", "def world_to_object(self, point: Point) -> Point:\n if self.parent:\n point = self.parent.world_to_object(point)\n result = self.transform.inverse() * point\n return result", "def move3dCursor(p = (0,0,0)):\n bpy.context.scene.cursor_location = p\n # bpy.context.space_data.cursor_location = p", "def proj3d(v):\n v = normalize(v)\n x, y, z, w = v\n return np.array([x, y, z]) / (1 + 1e-8 - w) # avoid divide by zero", "def project(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = np.tile(points_proj[2,:], [3, 1])\n points_proj = np.divide(points_proj, point_depths)\n if round_px:\n points_proj = np.round(points_proj)\n\n if isinstance(point_cloud, Point):\n return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame)\n return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame)" ]
[ "0.74053556", "0.6547067", "0.64282596", "0.64282596", "0.64282596", "0.6392517", "0.63821685", "0.63705236", "0.6332853", "0.63261366", "0.63232213", "0.6312432", "0.6261391", "0.62242615", "0.6203859", "0.61349493", "0.6087345", "0.6072686", "0.60687876", "0.6006095", "0.59953916", "0.59914786", "0.59558994", "0.5934117", "0.5888852", "0.5870568", "0.5870203", "0.5864161", "0.58598906", "0.5853281" ]
0.72277606
1
Fade a color depending on how far from the camera it is.
def compute_fog_faded_color(self, color: Color, dz: float) -> Color: fade_amount = math.exp(-(dz * self._fog_factor)**2) return mix_colors(UPPER_SKY_COLOR, color, fade_amount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def colorEyes(self, color, fade_duration = 0.2):\n\n\t\tif color in self.colors:\n\t\t\tcolor = self.colors[color]\n\n\t\tself.leds.fadeRGB(\"FaceLeds\", color, fade_duration)", "def do_fade_colour(l, leds, r, g, b, duration):\n l._do_multi_led_command(\n create_fade_colour_command, leds, r, g, b, duration\n )", "def colorEyes(self, color, fade_duration = 0.2):\n\n\t\tif color in self.colors:\n\t\t\tcolor = colors[color]\n\n\t\tself.leds.fadeRGB(\"FaceLeds\", color, fade_duration)", "def fade_to_rgb(self, r=0, g=0, b=0, fade=300, check=True):\n #When we're doing a fade, the pin values may have changed... check first!!\n if check:\n self.sync_channels()\n \n #Now we'll have the correct init values!!!\n init_r = self.red\n init_g = self.green\n init_b = self.blue\n gap_r = r - init_r\n gap_g = g - init_g\n gap_b = b - init_b\n n_steps = int(float(fade)/20.0) #50Hz = 20 milliseconds\n \n for step in xrange(0, n_steps):\n fractional_progress = float(step)/n_steps\n cur_r = init_r + (gap_r*fractional_progress)\n cur_g = init_g + (gap_g*fractional_progress)\n cur_b = init_b + (gap_b*fractional_progress)\n cur_col = self.set_rgb(cur_r,cur_g,cur_b)\n sleep(0.02) #20ms\n if self._sequence and self._sequence_stop_signal: #Instantly escape the fade if changing routine\n break \n \n #And fix it to the target in case float calcs put us off a bit\n return self.set_rgb(r,g,b)", "def fadeToRGB(self, color: tuple):\n r, g, b = color\n self._sendi2c('c', [r, g, b])", "def fade(self, r=None, g=None, b=None, hex_value=None, name=None, fade_time=300, check=True):\n return self.set(r, g, b, hex_value, name, fade=fade_time, check=check)", "def fade_out(self, duration: int = 1):\n original_brightness = self.np.brightness\n\n step_level = 0.01\n sleep_cycle = duration / (original_brightness / step_level)\n\n while self.np.brightness > 0:\n # FIXME :\n # Im not totally sure why, but...\n # self.np.brightness -= step_level\n # causes self.np.brightness of 0.1 to become 0.09000000000000001\n # and i dont feel like figuring out why right now\n self.np.brightness = round(self.np.brightness - step_level, 2)\n self.np.show()\n time.sleep(sleep_cycle)\n\n self.np.fill(OFF)\n self.np.show()\n\n # Reset brightness to original value now that pixels are OFF\n self.np.brightness = original_brightness\n\n return True", "def FadeOut(self):\r\n\r\n while 1:\r\n self._alpha_amount -= 10\r\n if self._alpha_amount <= 0:\r\n self._alpha_amount = 255\r\n return\r\n\r\n self.SetTransparent(self._alpha_amount)\r\n wx.SafeYield()\r\n wx.MilliSleep(15)", "def fade(startColor, endColor, steps, interval, strip):\r\n lastUpdate = utime.time() - interval\r\n for i in range(0, steps):\r\n print(\"range step: \", steps)\r\n red = ((startColor[0] * (steps - i)) + (endColor[0] * i)) // steps\r\n green = ((startColor[1] * (steps - i)) + (endColor[1] * i)) // steps\r\n blue = ((startColor[2] * (steps - i)) + (endColor[2] * i)) // steps\r\n \r\n while ((utime.time() - lastUpdate) < interval):\r\n pass\r\n setStrip(strip, (red, green, blue))\r\n lastUpdate = utime.time()", "def fadeLED( gpio, startVal, stopVal ):\n\t#convert passed values into usable format for pi-blaster (i.e 0 - 1)\n\tRGBstartVal = startVal / 255\n\tRGBstopVal = stopVal / 255\n\t#debug\n\tprint RGBstartVal, startVal, RGBstopVal, stopVal;\n\t#set the current LED values to the start value\n\tcurrentVal = RGBstartVal\n\tif RGBstartVal < RGBstopVal:\n\t\twhile currentVal < RGBstopVal:\n\t\t\tos.system(\"echo \\\"{0}={1}\\\" > /dev/pi-blaster\" .format(gpio,currentVal))\n\t\t\tcurrentVal = currentVal + STEP;\n\t\t\ttime.sleep(FADESPEED)\n\t\t\tprint currentVal\n\telif RGBstartVal > RGBstopVal:\n\t\t while currentVal > RGBstopVal:\n\t\t\tos.system(\"echo \\\"{0}={1}\\\" > /dev/pi-blaster\" .format(gpio,currentVal))\n currentVal = currentVal - STEP;\n time.sleep(FADESPEED)\n print currentVal\n\treturn;", "def setFadeSpeed(self,speed):\n speed = clamp(speed, 1, 255)\n self._sendi2c('f', [speed])", "def fade_death(self):\n self.image = pg.Surface((64, 64)).convert()\n self.image.set_colorkey(c.BLACK)\n self.image.set_alpha(self.alpha)\n self.image.blit(self.death_image, (0, 0))\n self.alpha -= 8\n if self.alpha <= 0:\n self.kill()\n self.notify(c.ENEMY_DEAD)", "def compute_fade(f):\n\n return 6 * f**5 - 15 * f**4 + 10 * f**3", "def refresh_color(self):\n self.color = max(0, int(math.sqrt(self.vx ** 2\n + self.vy ** 2)) + 100)", "def flash_red(self, duration=0.2):\n self.pen_color = wx.RED\n self.Refresh(True)\n t = time.time()\n while time.time() - t < duration:\n time.sleep(0.001)\n self.pen_color = wx.WHITE\n self.Refresh(True)", "def fade_display():\n for col in range(5):\n for row in range(5):\n brightness = microbit.display.get_pixel(col, row)\n # reduce by one, but make sure it's still in 0 to 9\n brightness = clamp(MIN_BRIGHTNESS, brightness - 1, MAX_BRIGHTNESS)\n microbit.display.set_pixel(col, row, brightness)", "def fadeOut(self):\n clock = pygame.time.Clock()\n blackRect = pygame.Surface(self.screen.get_size())\n blackRect.set_alpha(100)\n blackRect.fill((0, 0, 0))\n # Continuously draw a transparent black rectangle over the screen\n # to create a fadeout effect\n for i in range(0, 5):\n clock.tick(15)\n self.screen.blit(blackRect, (0, 0))\n pygame.display.flip()\n clock.tick(15)\n screen.fill((255, 255, 255, 50))\n pygame.display.flip()", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def offsetColor(self, input_color, amount, clamp=None):\n\t\tif amount == 0: # Do nothing\n\t\t\treturn input_color\n\n\t\telif amount > 0: # Lighten\n\t\t\tif clamp is None:\n\t\t\t\tmin_clamp = 0\n\t\t\telse:\n\t\t\t\tmin_clamp = clamp\n\t\t\tmax_clamp = 255\n\n\t\telif amount < 0: # Darken\n\t\t\tmin_clamp = 0\n\t\t\tif clamp is None:\n\t\t\t\tmax_clamp = 255\n\t\t\telse:\n\t\t\t\tmax_clamp = clamp\n\n\t\tlum = max(min_clamp, min(input_color.lightness()+amount, max_clamp))\n\t\treturn QtGui.QColor(lum, lum, lum)", "def getTweenColor(self, factor):\n\n pass", "def compute_shaded_color(self, p1: Point3D, p2: Point3D, p3: Point3D, material_color: Color) -> Color:\n # compute the normal vector\n ax, ay, az = p1[0]-p2[0], p1[1]-p2[1], p1[2]-p2[2]\n bx, by, bz = p1[0]-p3[0], p1[1]-p3[1], p1[2]-p3[2]\n nx = ay*bz - az*by\n ny = az*bx - ax*bz\n nz = ax*by - ay*bx\n normal = normalize(nx, ny, nz)\n \n # compute the color of the lights on the surface\n cr, cg, cb = 0, 0, 0\n for light in self._lights:\n lr, lg, lb = light.compute_shaded_color(normal, material_color)\n cr += lr\n cg += lg\n cb += lb\n \n # clip the color values at 1.0 and return\n max_v = max(cr, cg, cb)\n if max_v > 1.0:\n return min(cr,1.0), min(cg,1.0), min(cb,1.0)\n return cr, cg, cb", "def dark(r, d):\n return d * 1.0 / (r + d) + d * r * 1.0 / ((r + d) ** 2)", "def _ampl_color(self, amplitude, frame):\n color=(0,0,0)\n for col in self._color:\n if frame >= col[0]:\n color = col[1]\n return color + (int(self._amplfactor * amplitude),)", "def on_collision(self):\n self.car_color = arcade.color.RED_DEVIL", "def get_fade_fract(self):\n\n FADE_TIME = 2.0 # Adjust this to change fade-in and fade-out times in seconds\n\n if self.show_runtime < FADE_TIME:\n fract = self.show_runtime / FADE_TIME\n elif self.show_runtime > self.max_show_time - FADE_TIME:\n fract = (self.max_show_time - self.show_runtime) / FADE_TIME\n else:\n fract = 1\n\n return fract", "def RedLED(firstPixel, secondPixel):\n led = LPD8806.strand() \n count1 = 250\n count2 = 0\n while count1 != 0:\n \"\"\" Fade green off \"\"\"\n led.set(firstPixel, 0, count1, 0)\n led.set(secondPixel, 0, count1, 0)\n led.update()\n count1 -= 25\n while count2 != 250:\n \"\"\" Fade red on \"\"\"\n led.set(firstPixel, count2, 0, 0)\n led.set(secondPixel, count2, 0, 0)\n led.update()\n count2 += 25\n return", "def set_fade_threshold(self, address):\n self.model.fade_address = address", "def light(color, dist):\n return tuple( float(x*dist*dist) for x in color )", "def driftColor(baseColor, factor=110):\n if baseColor.lightness() > 128:\n return baseColor.darker(factor)\n else:\n return baseColor.lighter(factor+10)", "def blend(c: float, a: float) -> float:\n return 255 + (c - 255) * a" ]
[ "0.6437271", "0.64267206", "0.6367479", "0.62626547", "0.6219544", "0.61913073", "0.5968006", "0.57155895", "0.5697734", "0.5643331", "0.5637951", "0.5629955", "0.5598446", "0.558756", "0.5551077", "0.5531995", "0.5469604", "0.54235256", "0.53964067", "0.5386732", "0.53677034", "0.5357918", "0.5320172", "0.5317542", "0.5286201", "0.5274732", "0.52695477", "0.52435905", "0.52090985", "0.52084523" ]
0.6534394
0
Shade, project, and draw a list of triangles in 3D.
def draw_triangles(self, triangles: Collection): # project the points into 2D and compute each shaded/faded color processed_triangles = [] for p1, p2, p3, color in progress_iterator(triangles, "Processing triangles..."): shaded_color = self.compute_shaded_color(p1, p2, p3, color) *p1_p, z1 = self.project_point(p1) *p2_p, z2 = self.project_point(p2) *p3_p, z3 = self.project_point(p3) centroid_z = (z1 + z2 + z3) / 3 faded_color = self.compute_fog_faded_color(shaded_color, centroid_z) processed_triangles.append((centroid_z, p1_p, p2_p, p3_p, faded_color)) # sort the list of triangles back-to-front (by centroid Z depth) processed_triangles.sort(key=lambda tri: tri[0], reverse=True) # draw the triangles for _, p1, p2, p3, color in progress_iterator(processed_triangles, "Adding triangles to the canvas..."): self.draw_triangle(p1, p2, p3, color) print(f" Added {len(processed_triangles)} triangles")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_wireframe_3d(self, **kwds):\n wireframe = [];\n for l in self.lines:\n l_coords = self.coordinates_of(l)\n wireframe.append( line3d(l_coords, **kwds))\n for a in self.arrows:\n a_coords = self.coordinates_of(a)\n wireframe.append(arrow3d(a_coords[0], a_coords[1], **kwds))\n return sum(wireframe)", "def split_triangles(mesh):\n triangles = np.asarray(mesh.triangles).copy()\n vertices = np.asarray(mesh.vertices).copy()\n\n triangles_3 = np.zeros_like(triangles)\n vertices_3 = np.zeros((len(triangles) * 3, 3), dtype=vertices.dtype)\n\n for index_triangle, t in enumerate(triangles):\n index_vertex = index_triangle * 3\n vertices_3[index_vertex] = vertices[t[0]]\n vertices_3[index_vertex + 1] = vertices[t[1]]\n vertices_3[index_vertex + 2] = vertices[t[2]]\n\n triangles_3[index_triangle] = np.arange(index_vertex, index_vertex + 3)\n\n mesh_return = deepcopy(mesh)\n mesh_return.triangles = o3d.utility.Vector3iVector(triangles_3)\n mesh_return.vertices = o3d.utility.Vector3dVector(vertices_3)\n mesh_return.triangle_normals = mesh.triangle_normals\n mesh_return.paint_uniform_color([0.5, 0.5, 0.5])\n return mesh_return", "def draw_triangle(tup):\n x, y, z = tup[0], tup[1], tup[2]\n t_draw = turtle.Turtle()\n for index in range(3):\n t_draw.forward()", "def render_solid_3d(self, **kwds):\n return sum([ polygon3d(self.coordinates_of(f), **kwds) \n for f in self.polygons ])", "def _fractal_triangle(self, p1: Point3D, p2: Point3D, p3: Point3D, depth: int):\n if depth == 0:\n height = (p1[1]+p2[1]+p3[1])/3\n if self._only_heightmap:\n self._heightmap[self._get_heightmap_key(p1,p2,p3)] = height\n else:\n if self._color_offset_heightmap is not None:\n height += self._color_offset_heightmap.get_height(p1, p2, p3)\n if height > self._snow_height:\n c = SNOW_COLOR\n elif height < self._tree_height:\n c = TREE_COLOR\n else:\n c = ROCK_COLOR\n self._triangles.append((p1, p2, p3, c))\n else:\n displace = depth <= self._noise_depth\n mid12 = self._get_midpoint(p1, p2, displace)\n mid23 = self._get_midpoint(p2, p3, displace)\n mid13 = self._get_midpoint(p3, p1, displace)\n self._fractal_triangle(p1, mid12, mid13, depth=depth-1)\n self._fractal_triangle(mid12, p2, mid23, depth=depth-1)\n self._fractal_triangle(mid13, mid23, p3, depth=depth-1)\n self._fractal_triangle(mid12, mid23, mid13, depth=depth-1)", "def extract_triangles(mesh, materials_list):\n tri_list = []\n do_uv = bool(mesh.tessface_uv_textures)\n\n for mat in materials_list:\n for i, face in enumerate(mesh.tessfaces):\n f_v = face.vertices\n if mesh.materials[face.material_index].name != mat: continue\n\n uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None\n\n fmt = 0\n if(do_uv): fmt = face.material_index\n\n if do_uv:\n f_uv = uf.uv\n\n if len(f_v) == 3:\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n else: new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n tri_list.append(new_tri)\n\n else: # it's a quad\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), fmt)\n\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])\n else:\n new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n new_tri_2.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n\n tri_list.append(new_tri)\n tri_list.append(new_tri_2)\n\n return tri_list", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def render_3d(projection, **kwds):\n if isinstance(projection, Polyhedron): projection = Projection(projection)\n return \\\n projection.render_vertices_3d(width=3, color='green', **kwds) +\\\n projection.render_wireframe_3d(width=3, color='green', **kwds) + \\\n projection.render_solid_3d(**kwds)", "def drawTriangle(t, color, x, y):\n ## t.color(color)\n ## t.begin_fill()\n for i in range(3):\n t.forward(x)\n t.right(y)", "def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))", "def Triangle(self, c1=(0.,0.), c2=(0.,1.), c3=(1.,0.), npoints=10, element_type=\"tri\", equally_spaced=True):\n\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple):\n raise ValueError(\"The coordinates c1, c2 and c3 should be given in tuples of two elements each (x,y)\")\n\n npoints = int(npoints)\n\n\n npoints = npoints - 1\n if npoints < 0:\n npoints = 0\n\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)\n opoints = np.vstack((c1,c2,c3))\n oelements = np.array([[0,1,2]])\n\n if element_type==\"tri\":\n mesh = self.TriangularProjection(points=opoints, npoints=npoints, equally_spaced=equally_spaced)\n self.__update__(mesh)\n\n\n elif element_type == \"quad\":\n\n # SPLIT THE TRIANGLE INTO 3 QUADS\n omesh = Mesh()\n omesh.element_type=\"tri\"\n omesh.elements = oelements\n omesh.nelem = omesh.elements.shape[0]\n omesh.points = opoints\n omesh.GetBoundaryEdges()\n\n sys.stdout = open(os.devnull, \"w\")\n omesh.ConvertTrisToQuads()\n sys.stdout = sys.__stdout__\n\n\n npoints = int(npoints/2) + 1\n mesh = self.QuadrilateralProjection(points=omesh.points[omesh.elements[0,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n for i in range(1,omesh.nelem):\n mesh += self.QuadrilateralProjection(points=omesh.points[omesh.elements[i,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n\n self.__update__(mesh)", "def imshow_mesh_3d(img, vertices, faces, camera_center, focal_length, colors=(76, 76, 204)):\n H, W, C = img.shape\n if not has_pyrender:\n warnings.warn('pyrender package is not installed.')\n return img\n if not has_trimesh:\n warnings.warn('trimesh package is not installed.')\n return img\n try:\n renderer = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H)\n except (ImportError, RuntimeError):\n warnings.warn('pyrender package is not installed correctly.')\n return img\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(vertices))]\n colors = [color_val(c) for c in colors]\n depth_map = np.ones([H, W]) * np.inf\n output_img = img\n for idx in range(len(vertices)):\n color = colors[idx]\n color = [(c / 255.0) for c in color]\n color.append(1.0)\n vert = vertices[idx]\n face = faces[idx]\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color)\n mesh = trimesh.Trimesh(vert, face)\n rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])\n mesh.apply_transform(rot)\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material)\n scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))\n scene.add(mesh, 'mesh')\n camera_pose = np.eye(4)\n camera = pyrender.IntrinsicsCamera(fx=focal_length[0], fy=focal_length[1], cx=camera_center[0], cy=camera_center[1], zfar=100000.0)\n scene.add(camera, pose=camera_pose)\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n valid_mask = (rend_depth < depth_map) * (rend_depth > 0)\n depth_map[valid_mask] = rend_depth[valid_mask]\n valid_mask = valid_mask[:, :, None]\n output_img = valid_mask * color[:, :, :3] + (1 - valid_mask) * output_img\n return output_img", "def create_mesh(self):\n print(\"create_mesh\")\n faces = self.get_faces()\n print(\"num faces: {}\".format(len(faces)))\n\n # TODO: perform face filtering to remove long edges in Z direction\n # filtered_faces = self.get_filtered_faces(faces)\n # print(\"num filtered faces: {}\".format(len(filtered_faces)))\n\n vertices = self.xyz_points.T\n\n # handle texture mappings\n vertex_index_to_texture = []\n for j in range(0, self.height):\n for i in range(0, self.width):\n # vertex_index = (j * self.width) + ij\n w = i / self.width\n h = (self.height - j - 1) / self.height\n vertex_index_to_texture.append(\n (w, h)\n )\n\n # Create material.\n # TODO: make the string/filename randomly generated and unique\n file0 = open(os.path.join(self.args.path, \"triangle_mesh.obj.mtl\"), \"w\") # write mode\n file0.write(\"newmtl material_0\\n\")\n # Save image here.\n cv2.imwrite(os.path.join(self.args.path, \"triangle_mesh.png\"), self.bgr)\n file0.write(\"map_Kd triangle_mesh.png\\n\")\n file0.close()\n\n # https://en.wikipedia.org/wiki/Wavefront_.obj_file\n # https://github.com/mmatl/pyrender/blob/master/examples/models/fuze.obj\n obj_path = os.path.join(self.args.path, \"triangle_mesh.obj\")\n file1 = open(obj_path, \"w\") # write mode\n file1.write(\"mtllib ./triangle_mesh.obj.mtl\\n\")\n for vertex in vertices:\n x, y, z = vertex\n file1.write(\"v {} {} {}\\n\".format(x, y, z))\n file1.write(\"usemtl material_0\\n\")\n for w, h in vertex_index_to_texture:\n file1.write(\"vt {} {}\\n\".format(w, h))\n for face in faces:\n a, b, c = face\n a += 1\n b += 1\n c += 1\n file1.write(\"f {}/{} {}/{} {}/{}\\n\".format(\n a, a, b, b, c, c\n )\n )\n file1.close()\n\n # Load the trimesh from OBJ file.\n trimesh_mesh = trimesh.load(obj_path)\n # trimesh_mesh.show()\n\n mesh = pyrender.Mesh.from_trimesh(trimesh_mesh, smooth=False)\n self.scene = pyrender.Scene(ambient_light=[3.0, 3.0, 3.0])\n\n camera = pyrender.IntrinsicsCamera(\n self.focal_length, self.focal_length, self.width / 2, self.height / 2\n )\n self.camera_pose = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # https://pyrender.readthedocs.io/en/latest/examples/cameras.html#creating-cameras\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html\n r = R.from_rotvec(np.array([0, np.pi, 0]))\n r = R.from_rotvec(np.array([0.0, 0, np.pi])) * r\n matrix = r.as_matrix()\n self.camera_pose[:3, :3] = matrix\n\n light = pyrender.PointLight(\n color=[1.0, 1.0, 1.0],\n intensity=0.0\n )\n\n self.nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))\n self.nl = pyrender.Node(light=light, matrix=np.eye(4))\n self.nc = pyrender.Node(camera=camera, matrix=np.eye(4))\n self.scene.add_node(self.nm)\n self.scene.add_node(self.nl)\n self.scene.add_node(self.nc)\n\n # Set the pose and show the image.\n temppose = self.extrinsics @ self.camera_pose\n self.scene.set_pose(self.nl, pose=temppose)\n self.scene.set_pose(self.nc, pose=temppose)\n pyrender.Viewer(self.scene, use_raymond_lighting=True,\n viewport_size=(self.width, self.height))", "def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )", "def drawShadedSurface(xlist, ylist, zmatrix):\n dislin.surshd(xlist, len(xlist), ylist, len(ylist), zmatrix)", "def render_vertices_3d(self, **kwds):\n return point3d(self.coordinates_of(self.points), **kwds)", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def __init__(self, camera=None, light=None, name=\"\", \r\n corners=((-0.5, -0.28868), (0.0, 0.57735), (0.5, -0.28868)),\r\n x=0.0, y=0.0, z=0.0, sx=1.0, sy=1.0, sz=1.0,\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0):\r\n super(Triangle, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n self.ttype = GL_TRIANGLES\r\n self.verts = []\r\n self.norms = []\r\n self.texcoords = []\r\n self.inds = []\r\n c = corners # alias for convenience\r\n\r\n self.verts = ((c[0][0], c[0][1], 0.0), (c[1][0], c[1][1], 0.0), (c[2][0], c[2][1], 0.0))\r\n self.norms = ((0, 0, -1), (0, 0, -1), (0, 0, -1))\r\n self.texcoords = ((0.0, 0.0), (0.5, 0.86603), (1.0, 0.0))\r\n\r\n self.inds = ((0, 1, 2), ) #python quirk: comma for tuple with only one val\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))", "def trisurf(\n x,\n y,\n z,\n simplices,\n show_colorbar,\n edges_color,\n scale,\n colormap=None,\n color_func=None,\n plot_edges=False,\n x_edge=None,\n y_edge=None,\n z_edge=None,\n facecolor=None,\n):\n # numpy import check\n if not np:\n raise ImportError(\"FigureFactory._trisurf() requires \" \"numpy imported.\")\n points3D = np.vstack((x, y, z)).T\n simplices = np.atleast_2d(simplices)\n\n # vertices of the surface triangles\n tri_vertices = points3D[simplices]\n\n # Define colors for the triangle faces\n if color_func is None:\n # mean values of z-coordinates of triangle vertices\n mean_dists = tri_vertices[:, :, 2].mean(-1)\n elif isinstance(color_func, (list, np.ndarray)):\n # Pre-computed list / array of values to map onto color\n if len(color_func) != len(simplices):\n raise ValueError(\n \"If color_func is a list/array, it must \"\n \"be the same length as simplices.\"\n )\n\n # convert all colors in color_func to rgb\n for index in range(len(color_func)):\n if isinstance(color_func[index], str):\n if \"#\" in color_func[index]:\n foo = clrs.hex_to_rgb(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n if isinstance(color_func[index], tuple):\n foo = clrs.convert_to_RGB_255(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n mean_dists = np.asarray(color_func)\n else:\n # apply user inputted function to calculate\n # custom coloring for triangle vertices\n mean_dists = []\n for triangle in tri_vertices:\n dists = []\n for vertex in triangle:\n dist = color_func(vertex[0], vertex[1], vertex[2])\n dists.append(dist)\n mean_dists.append(np.mean(dists))\n mean_dists = np.asarray(mean_dists)\n\n # Check if facecolors are already strings and can be skipped\n if isinstance(mean_dists[0], str):\n facecolor = mean_dists\n else:\n min_mean_dists = np.min(mean_dists)\n max_mean_dists = np.max(mean_dists)\n\n if facecolor is None:\n facecolor = []\n for index in range(len(mean_dists)):\n color = map_face2color(\n mean_dists[index], colormap, scale, min_mean_dists, max_mean_dists\n )\n facecolor.append(color)\n\n # Make sure facecolor is a list so output is consistent across Pythons\n facecolor = np.asarray(facecolor)\n ii, jj, kk = simplices.T\n\n triangles = graph_objs.Mesh3d(\n x=x, y=y, z=z, facecolor=facecolor, i=ii, j=jj, k=kk, name=\"\"\n )\n\n mean_dists_are_numbers = not isinstance(mean_dists[0], str)\n\n if mean_dists_are_numbers and show_colorbar is True:\n # make a colorscale from the colors\n colorscale = clrs.make_colorscale(colormap, scale)\n colorscale = clrs.convert_colorscale_to_rgb(colorscale)\n\n colorbar = graph_objs.Scatter3d(\n x=x[:1],\n y=y[:1],\n z=z[:1],\n mode=\"markers\",\n marker=dict(\n size=0.1,\n color=[min_mean_dists, max_mean_dists],\n colorscale=colorscale,\n showscale=True,\n ),\n hoverinfo=\"none\",\n showlegend=False,\n )\n\n # the triangle sides are not plotted\n if plot_edges is False:\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, colorbar]\n else:\n return [triangles]\n\n # define the lists x_edge, y_edge and z_edge, of x, y, resp z\n # coordinates of edge end points for each triangle\n # None separates data corresponding to two consecutive triangles\n is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]\n if any(is_none):\n if not all(is_none):\n raise ValueError(\n \"If any (x_edge, y_edge, z_edge) is None, \" \"all must be None\"\n )\n else:\n x_edge = []\n y_edge = []\n z_edge = []\n\n # Pull indices we care about, then add a None column to separate tris\n ixs_triangles = [0, 1, 2, 0]\n pull_edges = tri_vertices[:, ixs_triangles, :]\n x_edge_pull = np.hstack(\n [pull_edges[:, :, 0], np.tile(None, [pull_edges.shape[0], 1])]\n )\n y_edge_pull = np.hstack(\n [pull_edges[:, :, 1], np.tile(None, [pull_edges.shape[0], 1])]\n )\n z_edge_pull = np.hstack(\n [pull_edges[:, :, 2], np.tile(None, [pull_edges.shape[0], 1])]\n )\n\n # Now unravel the edges into a 1-d vector for plotting\n x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])\n y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])\n z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])\n\n if not (len(x_edge) == len(y_edge) == len(z_edge)):\n raise exceptions.PlotlyError(\n \"The lengths of x_edge, y_edge and \" \"z_edge are not the same.\"\n )\n\n # define the lines for plotting\n lines = graph_objs.Scatter3d(\n x=x_edge,\n y=y_edge,\n z=z_edge,\n mode=\"lines\",\n line=graph_objs.scatter3d.Line(color=edges_color, width=1.5),\n showlegend=False,\n )\n\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, lines, colorbar]\n else:\n return [triangles, lines]", "def render(self, scene):\n if self.degenerate:\n return\n # The number of subdivisions around the hoop's radial direction.\n if self.thickness:\n band_coverage = scene.pixel_coverage(self.pos, self.thickness)\n else:\n band_coverage = scene.pixel_coverage(self.pos, self.radius * 0.1)\n if band_coverage < 0:\n band_coverage = 1000\n bands = sqrt(band_coverage * 4.0)\n bands = clamp(4, bands, 40)\n # The number of subdivisions around the hoop's tangential direction.\n ring_coverage = scene.pixel_coverage(self.pos, self.radius)\n if ring_coverage < 0:\n ring_coverage = 1000\n rings = sqrt(ring_coverage * 4.0)\n rings = clamp(4, rings, 80)\n slices = int(rings)\n inner_slices = int(bands)\n radius = self.radius\n inner_radius = self.thickness\n\n # Create the vertex and normal arrays.\n vertices = []\n normals = []\n\n outer_angle_step = 2 * pi / (slices - 1)\n inner_angle_step = 2 * pi / (inner_slices - 1)\n outer_angle = 0.\n for i in range(slices):\n cos_outer_angle = cos(outer_angle)\n sin_outer_angle = sin(outer_angle)\n inner_angle = 0.\n for j in range(inner_slices):\n cos_inner_angle = cos(inner_angle)\n sin_inner_angle = sin(inner_angle)\n\n diameter = (radius + inner_radius * cos_inner_angle)\n vertex_x = diameter * cos_outer_angle\n vertex_y = diameter * sin_outer_angle\n vertex_z = inner_radius * sin_inner_angle\n\n normal_x = cos_outer_angle * cos_inner_angle\n normal_y = sin_outer_angle * cos_inner_angle\n normal_z = sin_inner_angle\n\n vertices.extend([vertex_x, vertex_y, vertex_z])\n normals.extend([normal_x, normal_y, normal_z])\n inner_angle += inner_angle_step\n outer_angle += outer_angle_step\n\n # Create ctypes arrays of the lists\n vertices = (gl.GLfloat *len(vertices))(*vertices)\n normals = (gl.GLfloat * len(normals))(*normals)\n\n # Create a list of triangle indices.\n indices = []\n for i in range(slices - 1):\n for j in range(inner_slices - 1):\n pos = i * inner_slices + j\n indices.extend([pos, pos + inner_slices, pos + inner_slices +\n 1])\n indices.extend([pos, pos + inner_slices + 1, pos + 1])\n indices = (gl.GLuint * len(indices))(*indices)\n\n # Compile a display list\n self.list = gl.glGenLists(1)\n gl.glNewList(self.list, gl.GL_COMPILE)\n self.color.gl_set(self.opacity)\n\n gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n self.model_world_transform(scene.gcf,\n Vector([self.radius, self.radius,\n self.radius])).gl_mult()\n\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)\n gl.glNormalPointer(gl.GL_FLOAT, 0, normals)\n gl.glDrawElements(gl.GL_TRIANGLES, len(indices), gl.GL_UNSIGNED_INT,\n indices)\n gl.glPopClientAttrib()\n\n gl.glEndList()\n gl.glCallList(self.list)", "def MeshPyTri(points,facets,*args,**kwargs):\n info = triangle.MeshInfo()\n info.set_points(points)\n info.set_facets(facets)\n\n return triangle.build(info,*args,**kwargs)", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4,b-4,c-4)\n for (a,b,c) in self.triangles if a > 3 and b > 3 and c > 3]", "def create_triangles(list_of_points):\n # create the first two triangle using the create_two_init_triangles with\n # the first 4 points in the given list\n tri_list = create_two_init_triangles(list_of_points[0:FIRST_FOUR_POINTS])\n # run over the point list from the 5th point and on\n for i in range(FIRST_FOUR_POINTS, len(list_of_points)):\n # run on the existing triangles\n for j in range(0, len(tri_list)):\n # check if the point is inside the current triangle\n if is_point_inside_triangle(list_of_points[i], tri_list[j][0],\n tri_list[j][1], tri_list[j][2])[0]:\n # if the point is inside the current triangle, create 3 new\n # triangles using the old triangle vertexes and the new point\n # adding them to the triangle list instead of the triangle the\n # point was in\n tri_list[j:j+1] = create_inner_tri(list_of_points[i],\n tri_list[j][0],\n tri_list[j][1],\n tri_list[j][2])\n break\n return tri_list", "def drawLine3D(x0,y0,z0,x1,y1,z1):\n dislin.strt3d(x0,y0,z0)\n dislin.conn3d(x1,y1,z1)", "def _triangular_mesh_to_three_geometry(vertices, faces, vertex_colors=None):\n context = _js_builder.Js(mode=_js_builder.PERSISTENT)\n vertices = context.Float32Array.new_object(vertices.ravel().tolist())\n faces = context.Uint32Array.new_object(faces.ravel().tolist())\n geometry = context.THREE.BufferGeometry.new_object()\n geometry.addAttribute('position',\n context.THREE.BufferAttribute.new_object(vertices, 3))\n geometry.setIndex(context.THREE.BufferAttribute.new_object(faces, 1))\n geometry.computeVertexNormals()\n if vertex_colors is not None:\n vertex_colors = context.Float32Array.new_object(\n vertex_colors.ravel().tolist())\n geometry.addAttribute(\n 'color', context.THREE.BufferAttribute.new_object(vertex_colors, 3))\n\n return geometry", "def drawTwoTriangles():\n\n drawTriangle(200,100,\"blue\",\"pink\")\n Lucia.up()\n Lucia.forward(220)\n Lucia.down()\n drawTriangle(100,200,\"grey\",\"blue\")\n Lucia.seth(0)", "def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'", "def drawNormalsFromArray(array, textured):\r\n\r\n\tif textured:\r\n\t\tnpt = 36\r\n\telse:\r\n\t\tnpt = 30\r\n\r\n\tvC = len(array) / npt\r\n\tnpv = npt / 3\r\n\r\n\tglBegin(GL_LINES)\r\n\r\n\tif textured:\r\n\t\tfor j in range(vC):\r\n\t\t\tx = 0.0\r\n\t\t\ty = 0.0\r\n\t\t\tz = 0.0\r\n\r\n\t\t\tcV = j * npt\r\n\r\n\t\t\tnx = array[cV + 6]\r\n\t\t\tny = array[cV + 7]\r\n\t\t\tnz = array[cV + 8]\r\n\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tcV = j * npt + i * npv\r\n\t\t\t\tx = x + array[cV + 9]\r\n\t\t\t\ty = y + array[cV + 10]\r\n\t\t\t\tz = z + array[cV + 11]\r\n\r\n\t\t\tx = x / 3.0\r\n\t\t\ty = y / 3.0\r\n\t\t\tz = z / 3.0\r\n\r\n\t\t\tglColor4f(0.0, 0.0, 0.0, 1.0)\r\n\t\t\tglVertex3f(x, y, z)\r\n\t\t\tglVertex3f(x + nx, y + ny, z + nz)\r\n\r\n\telse:\r\n\t\tfor j in range(vC):\r\n\t\t\tx = 0.0\r\n\t\t\ty = 0.0\r\n\t\t\tz = 0.0\r\n\r\n\t\t\tcV = j * npt\r\n\r\n\t\t\tnx = array[cV + 4]\r\n\t\t\tny = array[cV + 5]\r\n\t\t\tnz = array[cV + 6]\r\n\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tcV = j * npt + i * npv\r\n\t\t\t\tx = x + array[cV + 7]\r\n\t\t\t\ty = y + array[cV + 8]\r\n\t\t\t\tz = z + array[cV + 9]\r\n\r\n\t\t\tx = x / 3.0\r\n\t\t\ty = y / 3.0\r\n\t\t\tz = z / 3.0\r\n\r\n\t\t\tglColor4f(0.0, 0.0, 0.0, 1.0)\r\n\t\t\tglVertex3f(x, y, z)\r\n\t\t\tglVertex3f(x + nx, y + ny, z + nz)\r\n\r\n\tglEnd()", "def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]", "def draw():\n global trackball, flashlight, \\\n vertex_buffer, normal_buffer, \\\n colors, color_buffer, selected_face, add_face, \\\n shaders\n\n # Clear the rendering information.\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Clear the transformation stack.\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n glPushMatrix()\n\n # Transform the objects drawn below by a rotation.\n trackball.glRotate()\n\n # * * * * * * * * * * * * * * * *\n # Draw all the triangular facets.\n glUseProgram(shaders)\n\n h_vertex = glGetAttribLocation(shaders,'vertex')\n h_normal = glGetAttribLocation(shaders,'normal')\n h_color = glGetAttribLocation(shaders,'color')\n h_eye = glGetUniformLocation(shaders,'eye')\n h_light = glGetUniformLocation(shaders,'light')\n\n # all the vertex positions\n glEnableVertexAttribArray(h_vertex)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glVertexAttribPointer(h_vertex, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # all the vertex normals\n glEnableVertexAttribArray(h_normal)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glVertexAttribPointer(h_normal, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n # all the face vertex colors\n glEnableVertexAttribArray(h_color)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n\n if selected_face and add_face:\n # paint that face's vertices Green\n rgb_selected = [0.7,0.9,0.6] #GREEN\n for change in range(9):\n colors[selected_face.id * 9 + change] = rgb_selected[change % 3]\n # update the color buffer\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n add_face = False\n\n glVertexAttribPointer(h_color, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # position of the flashlight\n light = flashlight.rotate(vector(0.0,0.0,1.0));\n glUniform3fv(h_light, 1, (2.0*radius*light).components())\n\n # position of the viewer's eye\n eye = trackball.recip().rotate(vector(0.0,0.0,1.0))\n glUniform3fv(h_eye, 1, eye.components())\n\n glDrawArrays (GL_TRIANGLES, 0, len(face.instances) * 3)\n\n glDisableVertexAttribArray(h_vertex)\n glDisableVertexAttribArray(h_normal)\n glDisableVertexAttribArray(h_color)\n\n glPopMatrix()\n\n # Render the scene.\n glFlush()\n\n glutSwapBuffers()" ]
[ "0.6908125", "0.6733296", "0.6631174", "0.654832", "0.65126437", "0.6491568", "0.64820564", "0.6396006", "0.6345809", "0.6259672", "0.624029", "0.6208812", "0.6182865", "0.6181145", "0.610569", "0.6074357", "0.60483927", "0.60355747", "0.5983546", "0.5922067", "0.5917617", "0.5882775", "0.58756536", "0.5869997", "0.5862912", "0.5858775", "0.58426523", "0.58217645", "0.58203846", "0.580303" ]
0.7079494
0
Return the key in the _heightmap dict for the given triangle.
def _get_heightmap_key(self, p1: Point3D, p2: Point3D, p3: Point3D) -> Hashable: return p1[0]+p2[0]+p3[0], p1[2]+p2[2]+p3[2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getblockhash(self, blockheight):\n for block in self.blocks:\n if block[\"height\"] == int(blockheight):\n return block[\"hash\"]", "def height_at(self, x, z):\n\n return self.heightmap[x * 16 + z]", "def hkl(self, i):\n return self.get_hkl(self.xp[i], self.yp[i], self.zp[i])", "def get_height(self, p1: Point3D, p2: Point3D, p3: Point3D) -> float:\n return self._heightmap[self._get_heightmap_key(p1,p2,p3)]", "def get_heuristic_cost_height(element, height_index, height_dict):\n if element in height_dict:\n return abs(height_index - height_dict[element])\n else:\n return 0", "def test_get_triangle_dict_all_int(self):\n triangle = {'a': 1, 'b': 2, 'c': 3}\n result = get_triangle_type(triangle)\n self.assertEqual(result, 'scalene')", "def getHashIndex(self, nodeLoc):\n return (nodeLoc[0] * self.height) + nodeLoc[1]", "def get_block_hash(height):\n return requests.get(BASE+f'/api/block-index/{height}').json()['blockHash']", "def calcHeight(self, px, pz):\n #adjust for map not set at origin\n px -= self.unif[0]\n pz -= self.unif[2]\n\n wh = self.width * 0.5\n hh = self.depth * 0.5\n ws = self.width / self.ix\n hs = self.depth / self.iy\n ht = self.height / 255.0\n #round off to nearest integer\n px = (wh + px) / ws\n pz = (hh + pz) / hs\n x = math.floor(px)\n z = math.floor(pz)\n if x < 0: x = 0\n if x > (self.ix-2): x = self.ix-2\n if z < 0: z = 0\n if z > (self.iy-2): z = self.iy-2\n # use actual vertex location rather than recreate it from pixel*ht\n p0 = int(z*self.ix + x) #offset 1 to get y values\n p1 = p0 + 1\n p2 = p0 + self.ix\n p3 = p0 + self.ix + 1\n\n if pz > (z + 1 - px + x): #i.e. this point is in the triangle on the\n #opposite side of the diagonal so swap base corners\n x0, y0, z0 = x + 1, self.buf[0].vertices[p3][1], z + 1\n else:\n x0, y0, z0 = x, self.buf[0].vertices[p0][1], z\n return self.unif[1] + intersect_triangle((x0, y0, z0),\n (x + 1, self.buf[0].vertices[p1][1], z),\n (x, self.buf[0].vertices[p2][1], z + 1),\n (px, 0, pz))", "def get(self, key: int) -> int:\n sh = key % 37\n if self.map[sh] == None:\n return -1\n for i in range(len(self.map[sh])):\n kv = self.map[sh][i]\n if kv[0] == key:\n return kv[1]\n return -1", "def triangle_area(base, height):\n return (base * height) / 2", "def calcHeight(self, px, pz):\r\n #adjust for map not set at origin\r\n px -= self.unif[0]\r\n pz -= self.unif[2]\r\n\r\n wh = self.width * 0.5\r\n hh = self.depth * 0.5\r\n ws = self.width / self.ix\r\n hs = self.depth / self.iy\r\n ht = self.height / 255.0\r\n #round off to nearest integer\r\n px = (wh + px) / ws\r\n pz = (hh + pz) / hs\r\n x = math.floor(px)\r\n z = math.floor(pz)\r\n if x < 0: x = 0\r\n if x > (self.ix-2): x = self.ix-2\r\n if z < 0: z = 0\r\n if z > (self.iy-2): z = self.iy-2\r\n # use actual vertex location rather than recreate it from pixel*ht\r\n p0 = int(z*self.ix + x) #offset 1 to get y values\r\n p1 = p0 + 1\r\n p2 = p0 + self.ix\r\n p3 = p0 + self.ix + 1\r\n\r\n if pz > (z + 1 - px + x): #i.e. this point is in the triangle on the\r\n #opposite side of the diagonal so swap base corners\r\n x0, y0, z0 = x + 1, self.buf[0].vertices[p3][1], z + 1\r\n else:\r\n x0, y0, z0 = x, self.buf[0].vertices[p0][1], z\r\n return self.unif[1] + intersect_triangle((x0, y0, z0),\r\n (x + 1, self.buf[0].vertices[p1][1], z),\r\n (x, self.buf[0].vertices[p2][1], z + 1),\r\n (px, 0, pz))", "def getKey(team):\n\treturn team.pointsTotal", "def get_hash(self):\n s = super(BoundingBox, self).get_hash()\n for c in self.start:\n s += \"_%f\" % c\n for c in self.size:\n s += \"_%f\" % c\n return s", "def hash_key(aMap,key):#\n\treturn hash(key)%len(aMap)", "def placementKey( geo):\n def diagcmp( xyA, xyB):\n \"\"\"\n Compare two positions based on x + y. If x + y is the same for the\n two, compare based on x.\n \"\"\"\n return cmp(xyA[0] + xyA[1], xyB[0] + xyB[1]) or cmp(xyA[0], xyB[0])\n\n sorted = [ tuple(geo[i]) for i in xrange(geo.shape[0]) ]\n sorted.sort( diagcmp)\n return hash(tuple(sorted))", "def loc_key(self):\r\n key = tuple(self.loc.coord)\r\n return (key)", "def get_hash_address(self, key: keyType) -> int:\n # List and set are unhashable type. So transform the type into 'tuple' if needed.\n tmp = None\n if isinstance(key, set):\n tmp = tuple(key)\n elif isinstance(key, list):\n tmp = tuple(key)\n return tmp.__hash__() % self.length", "def get_location_hash_table():\n return locationHashTable", "def get_hash(self):\n s = super(Point, self).get_hash()\n for c in self.coordinate:\n s += \"_%f\" % c\n return s", "def key(o):\n return hypot((x - o.x), (y - o.y))", "def _key_hash(self, key):\n\n split_key = key.strip(' ').split(' ')[1]\n return int(split_key)", "def triangle_area(base, height): # Compute the area of a triangle\n area = (1.0 / 2) * base * height\n return area", "def get_index(self, key):\r\n if self.hash_table[self.horner_hash(key)] is None:\r\n return None\r\n if self.hash_table[self.horner_hash(key)].key is key:\r\n return self.horner_hash(key)", "def get(self, key: int) -> int:\n if key in self.hashmap.keys():return self.hashmap[key]\n else:return -1", "def height(self) -> int:", "def height(self) -> int:", "def height(self) -> int:", "def quadratic_probe(self, key): #hidden\n # the index should be\n index = self.hash_function(key)\n # do not insert empty string\n if index != -1:\n bucketsprobed = 0\n i = 0\n while bucketsprobed < self.capacity:\n if self.table[index] is not None:\n # if the key in the table\n if self.table[index].key == key:\n return index\n elif self.table[index] is None:\n return index\n # Increment i and recompute bucket index\n i += 1\n index = (index + i * i) % self.capacity\n # Increment number of buckets probed\n bucketsprobed += 1\n return index", "def hash_key(self):" ]
[ "0.5575947", "0.549903", "0.5401808", "0.53665644", "0.53405434", "0.5322528", "0.52560556", "0.5242165", "0.5181723", "0.51672906", "0.51612586", "0.5136122", "0.5113682", "0.50287825", "0.5016688", "0.50130314", "0.5011915", "0.49585357", "0.49564213", "0.49434075", "0.49126548", "0.48956633", "0.48905239", "0.48857832", "0.48716268", "0.4866905", "0.4866905", "0.4866905", "0.48622385", "0.48602796" ]
0.68827283
0
Fill the background sky gradient. Uses num_steps rectangles to approximate a linear gradient that goes from the top of the screen to start_y of the way down the screen (between 0.0 and 1.0).
def fill_sky_gradient(num_steps: int, start_y: float): # compute some helper values min_x = -turtle.window_width() / 2 max_x = +turtle.window_width() / 2 y_step = turtle.window_height()*start_y / num_steps min_y = turtle.window_height() / 2 - turtle.window_height()*start_y # fill the section below the gradient fill_rectangle(min_x, -turtle.window_height()/2, max_x, min_y, LOWER_SKY_COLOR) # fill the gradient for i in range(num_steps): fill_rectangle(min_x, min_y, max_x, min_y + y_step + 1, mix_colors(LOWER_SKY_COLOR, UPPER_SKY_COLOR, i/(num_steps-1))) min_y += y_step
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bg (self):\n self.health = max(0.0, min(1.0, (self.healthsteps + self.mud.value) / self.healthsteps))\n healthycolor = (0x11, 0x22, 0x44)\n pollutedcolor = (0x66, 0x66, 0)\n self.watercolor = [int((a - b) * self.health + b)\n for a,b in zip(healthycolor, pollutedcolor)]\n colorname = \"rgb({},{},{})\".format(*self.watercolor)\n w, h = self.width, self.height\n self.draw.rectangle((0,0,w-1,self.level_px-1), \"#000000\")\n self.draw.rectangle((0,self.level_px,w-1,h-1), colorname)", "def DrawBackground(self, dc, wnd, _rect, horizontal=True):\r\n\r\n rect = wx.Rect(*_rect)\r\n\r\n start_colour = StepColour(self._base_colour, 180)\r\n end_colour = StepColour(self._base_colour, 85)\r\n reflex_colour = StepColour(self._base_colour, 95)\r\n \r\n dc.GradientFillLinear(rect, start_colour, end_colour,\r\n (horizontal and [wx.SOUTH] or [wx.EAST])[0])\r\n\r\n left = rect.GetLeft()\r\n right = rect.GetRight()\r\n top = rect.GetTop()\r\n bottom = rect.GetBottom()\r\n\r\n dc.SetPen(wx.Pen(reflex_colour))\r\n if horizontal:\r\n dc.DrawLine(left, bottom, right+1, bottom)\r\n else:\r\n dc.DrawLine(right, top, right, bottom+1)", "def background():\n sky_color = (66, 170, 255) # color of the sky\n grass_color = (0, 128, 0) # color of the grass\n\n rect(screen, sky_color, (0, 0, 500, 250), 0) # sky\n rect(screen, grass_color, (0, 250, 500, 250), 0) # grass", "def make_background(self):\n for x in range(self.env_list[0].size):\n for y in range(self.env_list[0].size):\n img = load_image(\"dirt.png\")[0]\n self.background.blit(img, (x*50, y*50))", "def draw_bg(self):\n for y in range(WIN_HEIGHT/32): #TODO: make sure this process is correct and efficient.\n for x in range(WIN_WIDTH/32):\n self.screen_image.blit(self.bg, (x * 32, y * 32))", "def fill_gradient(\n surface: 'pygame.Surface',\n color: ColorInputType,\n gradient: ColorInputType,\n rect: Optional['pygame.Rect'] = None,\n vertical: bool = True,\n forward: bool = True\n) -> None:\n if rect is None:\n rect = surface.get_rect()\n x1, x2 = rect.left, rect.right\n y1, y2 = rect.top, rect.bottom\n color = assert_color(color)\n gradient = assert_color(gradient)\n if vertical:\n h = y2 - y1\n else:\n h = x2 - x1\n if forward:\n a, b = color, gradient\n else:\n b, a = color, gradient\n rate = (\n float(b[0] - a[0]) / h,\n float(b[1] - a[1]) / h,\n float(b[2] - a[2]) / h\n )\n fn_line = pygame.draw.line\n if vertical:\n for line in range(y1, y2):\n color = (\n min(max(a[0] + (rate[0] * (line - y1)), 0), 255),\n min(max(a[1] + (rate[1] * (line - y1)), 0), 255),\n min(max(a[2] + (rate[2] * (line - y1)), 0), 255)\n )\n fn_line(surface, color, (x1, line), (x2, line))\n else:\n for col in range(x1, x2):\n color = (\n min(max(a[0] + (rate[0] * (col - x1)), 0), 255),\n min(max(a[1] + (rate[1] * (col - x1)), 0), 255),\n min(max(a[2] + (rate[2] * (col - x1)), 0), 255)\n )\n fn_line(surface, color, (col, y1), (col, y2))", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def draw_bg(self):\n self.screen.fill(self.bg)", "def drawBackground(self,screen):\n pygame.draw.rect(screen,(240,240,240),(self.basepos[0],self.basepos[1],204,504))\n pygame.draw.rect(screen,(0,0,0),(self.basepos[0]+2,self.basepos[1]+2,200,500))", "def draw_ground():\n for i in range(3):\n groundturtle.forward(1450)\n groundturtle.left(90)\n groundturtle.forward(25)\n groundturtle.left(90)\n groundturtle.forward(1450)\n groundturtle.right(90)\n groundturtle.forward(25)\n groundturtle.right(90)", "def update_gradients(self, dw, db):\n self.w = self.w - self.lr * dw\n self.b = self.b - (self.lr * db)", "def _incremental_steps(start, end, steps, stepsize=None):\n if stepsize is None: step_size = (end - start) / np.maximum((steps - 1), 1)\n gradient = []\n for i in range(steps):\n value = start + step_size * i\n gradient.append(value)\n\n return gradient[0:steps]", "def build_background():\n layer_1 = GRect(800, 550)\n layer_1.filled = True\n layer_1.color = 'silver'\n layer_1.fill_color = 'silver'\n window.add(layer_1)\n layer_2 = GRect(800, 90)\n layer_2.filled = True\n layer_2.color = 'whitesmoke'\n layer_2.fill_color = 'whitesmoke'\n window.add(layer_2)\n layer_3 = GRect(800, 40, x=0, y=510)\n layer_3.filled = True\n layer_3.color = 'whitesmoke'\n layer_3.fill_color = 'whitesmoke'\n window.add(layer_3)", "def DrawGradientRectangle(dc, rect, start_colour, end_colour, direction, offset=0, length=0):\r\n \r\n if direction == AUI_GRADIENT_VERTICAL:\r\n dc.GradientFillLinear(rect, start_colour, end_colour, wx.SOUTH)\r\n else:\r\n dc.GradientFillLinear(rect, start_colour, end_colour, wx.EAST)", "def draw_background(self):\n backgrounds = {\n \"forest\": (38, 106, 46),\n \"desert\": (194, 178, 128)\n }\n self.background_surface.fill(backgrounds[self.geography])", "def __init__(self, start_color=Pigment(RGBColor(1.0, 1.0, 1.0)), end_color=Pigment(RGBColor(0.5, 0.7, 1.0)), axis=1):\n super().__init__(\"gradient_texture\")\n self.start_color = start_color\n self.end_color = end_color\n self.axis = axis", "def _bg_update(self):\n self.screen.fill(colour.BLACK)\n for star in self._stars:\n if star[2] + star[1] > self.s_height:\n star[1] = 0\n else:\n star[1] += star[2]\n self.screen.set_at((star[0], star[1]), colour.WHITE)", "def blit_background(self):\n self.screen.fill([67, 67, 67])\n self.screen.blit(self.background, (0,0))\n pygame.draw.rect(self.screen, (0, 0, 0), self.seperate_line)", "def draw_background(self):\n back = pygame.Surface(self.size)\n width, height = self.size\n self.shapes['gradient'] = shapes.gen_gradient(\n (width, height / 2),\n self.colors[3],\n self.colors[4]\n )\n back.blit(self.shapes['gradient'], (0, height - self.sh('gradient')))\n\n # TODO: Don't use static path/icon\n image = '/usr/share/icons/Tango/scalable/mimetypes/audio-x-generic.svg'\n self.shapes['musicimg'] = load_svg(image, [height/2]*2)\n back.blit(\n self.shapes['musicimg'],\n (width / 10, (height - self.sh('musicimg')) / 2)\n )\n return back", "def create_color_gradient():\n colors = []\n step = 10\n for red, green in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': green, 'blue': 0})\n for green, blue in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': 0, 'green': green, 'blue': blue})\n for blue, red in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': 0, 'blue': blue})\n return colors", "def FillVC8GradientColour(self, dc, tabPoints, active):\r\n\r\n xList = [pt.x for pt in tabPoints]\r\n yList = [pt.y for pt in tabPoints]\r\n \r\n minx, maxx = min(xList), max(xList)\r\n miny, maxy = min(yList), max(yList)\r\n\r\n rect = wx.Rect(minx, maxy, maxx-minx, miny-maxy+1) \r\n region = wx.RegionFromPoints(tabPoints)\r\n\r\n if self._buttonRect.width > 0:\r\n buttonRegion = wx.Region(*self._buttonRect)\r\n region.XorRegion(buttonRegion)\r\n \r\n dc.SetClippingRegionAsRegion(region)\r\n\r\n if active:\r\n bottom_colour = top_colour = wx.WHITE\r\n else:\r\n bottom_colour = StepColour(self._base_colour, 90)\r\n top_colour = StepColour(self._base_colour, 170)\r\n\r\n dc.GradientFillLinear(rect, top_colour, bottom_colour, wx.SOUTH)\r\n dc.DestroyClippingRegion()", "def demo(self):\n self.clear()\n\n white = neo.Color(255, 255, 255)\n black = neo.Color(0, 0, 0)\n red = neo.Color(120, 0, 0)\n green = neo.Color(0, 255, 0)\n blue = neo.Color(0, 0, 255)\n pink = neo.Color(255, 102, 178)\n \n state = [[[0,0,0]] * self.width] * self.height\n stepsize = (1.0/self.n_leds)\n lednr = 0\n for x in range(self.width):\n for y in range(self.height):\n h_start = (0 + lednr * (2*stepsize)) % 1 #* (y*self.width + x)\n lednr = lednr + 1\n s_start = 0\n v_start = 1\n hsv = [h_start,s_start,v_start]\n state[x][y] = hsv\n self.set([x,y], hsv_to_neopixel_color(hsv[0], hsv[1], hsv[2]))\n\n tint = 0\n while(True): \n for x in range(self.width):\n for y in range(self.height):\n hsv = state[x][y]\n\n new_h = (hsv[0] + stepsize/60.0) % 1.0\n new_s = (hsv[1] + stepsize/20.0) % 1.0\n new_v = hsv[2] #+ stepsize/20.0) % 1.0\n\n state[x][y][0] = new_h\n state[x][y][1] = new_h\n state[x][y][2] = new_v\n\n self.set([x,y], hsv_to_neopixel_color(\n (translate(new_h, 0.0, 1.0, 0.0, 0.1) + tint) % 1.0, \n to_sine(new_s), \n new_v))\n \n tint = (tint + stepsize/20.0) % 1\n\n self.draw()\n sleep(1.0/40)", "def gradientRect( window, left_colour, right_colour, target_rect ):\n colour_rect = pygame.Surface( ( 2, 2 ) ) # tiny! 2x2 bitmap\n pygame.draw.line( colour_rect, left_colour, ( 0,0 ), ( 0,1 ) ) # left colour line\n pygame.draw.line( colour_rect, right_colour, ( 1,0 ), ( 1,1 ) ) # right colour line\n colour_rect = pygame.transform.smoothscale( colour_rect, ( target_rect.width, target_rect.height ) ) # stretch!\n window.blit( colour_rect, target_rect ) # paint it", "def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)", "def background_maker():\n background = GRect(window.width, window.height)\n background.filled = True\n background.fill_color = '0xFFFCEC'\n background.color = '0xFFFCEC'\n return background", "def gradient_step(self):\n n = 10 #Granularity of line search\n grad = self.gradient()\n W = project(self.W[-1] + grad)\n A = np.linspace(0., self.alpha, n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def DrawTabBackground(self, dc, rect, focus, upperTabs):\r\n\r\n # Define the rounded rectangle base on the given rect\r\n # we need an array of 9 points for it\r\n regPts = [wx.Point() for indx in xrange(9)]\r\n\r\n if focus:\r\n if upperTabs:\r\n leftPt = wx.Point(rect.x, rect.y + (rect.height / 10)*8)\r\n rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 10)*8)\r\n else:\r\n leftPt = wx.Point(rect.x, rect.y + (rect.height / 10)*5)\r\n rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 10)*5)\r\n else:\r\n leftPt = wx.Point(rect.x, rect.y + (rect.height / 2))\r\n rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 2))\r\n\r\n # Define the top region\r\n top = wx.RectPP(rect.GetTopLeft(), rightPt)\r\n bottom = wx.RectPP(leftPt, rect.GetBottomRight())\r\n\r\n topStartColour = wx.WHITE\r\n\r\n if not focus:\r\n topStartColour = LightColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE), 50)\r\n\r\n topEndColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE)\r\n bottomStartColour = topEndColour\r\n bottomEndColour = topEndColour\r\n\r\n # Incase we use bottom tabs, switch the colours\r\n if upperTabs:\r\n if focus:\r\n dc.GradientFillLinear(top, topStartColour, topEndColour, wx.SOUTH)\r\n dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH)\r\n else:\r\n dc.GradientFillLinear(top, topEndColour , topStartColour, wx.SOUTH)\r\n dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH)\r\n\r\n else:\r\n if focus:\r\n dc.GradientFillLinear(bottom, topEndColour, bottomEndColour, wx.SOUTH)\r\n dc.GradientFillLinear(top, topStartColour, topStartColour, wx.SOUTH)\r\n else:\r\n dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH)\r\n dc.GradientFillLinear(top, topEndColour, topStartColour, wx.SOUTH)\r\n \r\n dc.SetBrush(wx.TRANSPARENT_BRUSH)", "def global_sky_background(self, LF):\n # Variables:\n s = 9 # Number of subframes (CHANGE IF NEEDED!) E.g. 4, 9, 16 etc. \n n = self.h*self.w/(self.h+self.w) # Number of pixels used in subframes scales with image dim \n nrows = self.h/(s/2) # Numbers of rows in each subframe\n ncols = self.w/(s/2) # Numbers of columns in each subframe\n\n # Reshape light frame into subframe:\n LF_sub = (LF.reshape(self.h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols))\n\n # Loop over all subframes:\n min_val = np.zeros((s,n))\n for i in range(s):\n # Loops over all pixels:\n for j in range(n):\n min_val[i,j] = np.min(LF_sub[i]) # Minimum value for array\n min_dex = np.where(LF_sub[i] == min_val[i,j]) # Find row, column for min value\n # Min pixel is set to max in order to find the next min:\n LF_sub[i, min_dex[0][0], min_dex[1][0]] = np.max(LF_sub[i]) \n\n # Flux:\n flux_sky = 3*median(min_val) - 2*mean(min_val) # Mean flux from pixels\n return flux_sky", "def gradient_step(self):\n n = 3 #Granularity of line search\n grad = self.gradient()\n #grad = grad/np.linalg.norm(grad, 2)\n W = project(self.W[-1] + grad)\n A = np.linspace(0., 1., n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def DrawVerticalGradient(self, dc, rect, hasfocus):\r\n\r\n oldpen = dc.GetPen()\r\n oldbrush = dc.GetBrush()\r\n dc.SetPen(wx.TRANSPARENT_PEN)\r\n\r\n # calculate gradient coefficients\r\n if hasfocus:\r\n col2 = self._secondcolour\r\n col1 = self._firstcolour\r\n else:\r\n col2 = self._hilightUnfocusedBrush.GetColour()\r\n col1 = self._hilightUnfocusedBrush2.GetColour()\r\n\r\n r1, g1, b1 = int(col1.Red()), int(col1.Green()), int(col1.Blue())\r\n r2, g2, b2 = int(col2.Red()), int(col2.Green()), int(col2.Blue())\r\n\r\n flrect = float(rect.height)\r\n\r\n rstep = float((r2 - r1)) / flrect\r\n gstep = float((g2 - g1)) / flrect\r\n bstep = float((b2 - b1)) / flrect\r\n\r\n rf, gf, bf = 0, 0, 0\r\n \r\n for y in xrange(rect.y, rect.y + rect.height):\r\n currCol = (r1 + rf, g1 + gf, b1 + bf) \r\n dc.SetBrush(wx.Brush(currCol, wx.SOLID))\r\n dc.DrawRectangle(rect.x, y, rect.width, 1)\r\n rf = rf + rstep\r\n gf = gf + gstep\r\n bf = bf + bstep\r\n \r\n dc.SetPen(oldpen)\r\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\r\n dc.DrawRectangleRect(rect)\r\n dc.SetBrush(oldbrush)" ]
[ "0.5875574", "0.5867943", "0.567791", "0.565743", "0.56277025", "0.56065214", "0.54429275", "0.54425997", "0.5415052", "0.54125994", "0.5393641", "0.5389858", "0.5376464", "0.532522", "0.52964115", "0.52853453", "0.52220666", "0.5220336", "0.5219927", "0.52009046", "0.5150408", "0.51278293", "0.5107704", "0.50910383", "0.50752074", "0.50673366", "0.5055838", "0.50245684", "0.50139153", "0.50082475" ]
0.82996637
0
Partition all the ELM events into training, validation and test indices. Training and validation sets are created based on simple splitting with validation set being `fraction_validate` of the training set or by Kfold crossvalidation.
def _partition_elms( self, max_elms: int = None, fold: int = None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: # get ELM indices from datafile elm_index, _ = self._read_file() # limit the data according to the max number of events passed if max_elms is not None and max_elms != -1: LOGGER.info(f"Limiting data read to {max_elms} events.") n_elms = max_elms else: n_elms = len(elm_index) # split the data into train, validation and test sets training_elms, test_elms = model_selection.train_test_split( elm_index[:n_elms], test_size=self.fraction_test, shuffle=True, random_state=config.seed, ) # kfold cross validation if self.kfold and fold is None: raise Exception( f"K-fold cross validation is passed but fold index in range [0, {config.folds}) is not specified." ) if self.kfold: LOGGER.info("Using K-fold cross validation") self._kfold_cross_val(training_elms) training_elms = self.df[self.df["fold"] != fold]["elm_events"] validation_elms = self.df[self.df["fold"] == fold]["elm_events"] else: LOGGER.info( "Creating training and validation datasets by simple splitting" ) training_elms, validation_elms = model_selection.train_test_split( training_elms, test_size=self.fraction_validate ) LOGGER.info(f"Number of training ELM events: {training_elms.size}") LOGGER.info(f"Number of validation ELM events: {validation_elms.size}") LOGGER.info(f"Number of test ELM events: {test_elms.size}") return training_elms, validation_elms, test_elms
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_validation_split(self, threshold=None):\n for train, validation in self._get_k_folds(5, threshold):\n train_provider = train\n validation_provider = validation\n break\n return train_provider, validation_provider", "def split_data(self):\n self.train, self.val, self.test_x, self.test_y = [], [], [], []\n train_size = self.horizon\n # This assumes all countries have the same length.\n # The minus two gives space for the validation and test sets as they will overshoot.\n k_folds = len(self.countries[0].data)//self.horizon - 2\n for _ in range(k_folds):\n tr, v, te_x, te_y = self.cross_validate(train_size)\n self.train.append(tr), self.val.append(v), self.test_x.append(te_x), self.test_y.append(te_y)\n train_size += self.horizon", "def split_validation_training_index(allind, splitsize, do_offset, offset_steps):\n i = offset_steps\n lval = splitsize\n if not do_offset:\n i_val = allind[:lval]\n i_train = allind[lval:]\n else:\n i_val = allind[i * lval:(i + 1) * lval]\n i_train = np.concatenate([allind[0:i * lval], allind[(i + 1) * lval:]], axis=0)\n if len(i_val) <= 0:\n print(\"Warning: #Validation data is 0, take 1 training sample instead\")\n i_val = i_train[:1]\n\n return i_train, i_val", "def split_train_and_validation(whole_train_data, whole_train_labels, validation_index, k_fold):\n dimension = whole_train_data.shape[1]\n train_data_chunks = np.array_split(whole_train_data, k_fold)\n train_label_chunks = np.array_split(whole_train_labels, k_fold)\n validation_data = train_data_chunks[validation_index]\n validation_labels = train_label_chunks[validation_index]\n train_data = np.delete(train_data_chunks, validation_index, 0)\n train_data = train_data.reshape((-1, dimension))\n train_labels = np.delete(train_label_chunks, validation_index, 0)\n train_labels = train_labels.flatten()\n return train_data, train_labels, validation_data, validation_labels", "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def train_test_split(data, validate_size=0.3):\r\n\r\n split = len(data) * (1 - validate_size)\r\n split = int(split)\r\n train = data[:split]\r\n validate = data[split:]\r\n\r\n return train, validate", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset", "def split_training_validation(classes, validation_size = 0.2, shuffle = False):\n num_samples=len(classes)\n classes=np.array(classes)\n classes_unique=np.unique(classes)\n num_classes=len(classes_unique)\n indices=np.arange(num_samples)\n #indices_folds=np.zeros([num_samples],dtype=int)\n training_indice = []\n training_label = []\n validation_indice = []\n validation_label = []\n for cl in classes_unique:\n indices_cl=indices[classes==cl]\n num_samples_cl=len(indices_cl)\n\n # split this class into k parts\n if shuffle:\n random.shuffle(indices_cl) # in-place shuffle\n \n # module and residual\n num_samples_each_split=int(num_samples_cl*validation_size)\n res=num_samples_cl - num_samples_each_split\n \n training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]\n training_label = training_label + [cl] * res\n \n validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]\n validation_label = validation_label + [cl]*num_samples_each_split\n\n training_index = np.arange(len(training_label))\n random.shuffle(training_index)\n training_indice = np.array(training_indice)[training_index]\n training_label = np.array(training_label)[training_index]\n \n validation_index = np.arange(len(validation_label))\n random.shuffle(validation_index)\n validation_indice = np.array(validation_indice)[validation_index]\n validation_label = np.array(validation_label)[validation_index] \n \n \n return training_indice, training_label, validation_indice, validation_label", "def split_training_validation(classes, validation_size = 0.2, shuffle = False):\n num_samples=len(classes)\n classes=np.array(classes)\n classes_unique=np.unique(classes)\n num_classes=len(classes_unique)\n indices=np.arange(num_samples)\n #indices_folds=np.zeros([num_samples],dtype=int)\n training_indice = []\n training_label = []\n validation_indice = []\n validation_label = []\n for cl in classes_unique:\n indices_cl=indices[classes==cl]\n num_samples_cl=len(indices_cl)\n\n # split this class into k parts\n if shuffle:\n random.shuffle(indices_cl) # in-place shuffle\n \n # module and residual\n num_samples_each_split=int(num_samples_cl*validation_size)\n res=num_samples_cl - num_samples_each_split\n \n training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]\n training_label = training_label + [cl] * res\n \n validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]\n validation_label = validation_label + [cl]*num_samples_each_split\n\n training_index = np.arange(len(training_label))\n random.shuffle(training_index)\n training_indice = np.array(training_indice)[training_index]\n training_label = np.array(training_label)[training_index]\n \n validation_index = np.arange(len(validation_label))\n random.shuffle(validation_index)\n validation_indice = np.array(validation_indice)[validation_index]\n validation_label = np.array(validation_label)[validation_index] \n \n \n return training_indice, training_label, validation_indice, validation_label", "def generator_ae_split(self, batch_size, validation_split=0.1, validation=False):\n assert self.dataset_valid, \"Dataset was created with no samples...\"\n\n pred_dummy = np.zeros((batch_size, self.z_num), dtype=np.float32)\n gen_ae = self.generator_ae(batch_size, validation_split=validation_split, validation=validation)\n while True:\n input_array, [_, p] = next(gen_ae)\n output_array = [input_array, p, pred_dummy, pred_dummy]\n yield input_array, output_array", "def train_valid_index_split(all_index, train_size = None, valid_split = 0.3):\n\tall_index = np.arange(all_index) if isinstance(all_index, int) else np.array(all_index)\n\ttrain_size = len(all_index) if train_size is None else train_size\n\ttrain_index_ = np.random.choice(all_index, train_size, replace = False)\n\ttrain_index, valid_index = np.split(train_index_, [int(train_size*(1-valid_split))])\n\treturn train_index, valid_index", "def batch_fit(self, train_loader: torch.utils.data.DataLoader,\n test_loader: torch.utils.data.DataLoader,\n train_size: int, test_size: int, epochs: int = 1,\n calc_mapk: bool = True):\n\n for epoch in range(epochs):\n stats = {'epoch': epoch+1}\n\n print('Training begins...')\n train_loss = self._training(train_loader, train_size)\n stats['train_loss'] = train_loss\n\n print('Validation begins...')\n if calc_mapk:\n print('validation with mapk')\n val_loss, val_mapk = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_mapk'] = val_mapk\n else:\n print('validation without mapk')\n val_loss = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_loss'] = val_loss\n print(stats)\n\n self.metrics.append(stats)", "def train(self):\n not_improved_count = 0\n best_validation_fscore = 0.0\n\n for epoch in range(self.start_epoch, self.max_epochs + 1):\n # Perform one training epoch and output training metrics\n training_metrics = self.run_epoch(epoch, self.train_data_loader, training=True)\n self.logger.info(\"Training epoch {} finished.\".format(epoch))\n self.log_metrics(training_metrics)\n\n # Perform one validation epoch and output validation metrics\n validation_metrics = self.run_epoch(epoch, self.valid_data_loader, training=False)\n self.logger.info(\"Validation epoch {} finished.\".format(epoch))\n self.log_metrics(validation_metrics)\n\n # Check if model is new best according to validation F1 score\n improved = validation_metrics[\"fscore\"] > best_validation_fscore\n if improved:\n best_validation_fscore = validation_metrics[\"fscore\"]\n not_improved_count = 0\n else:\n not_improved_count += 1\n\n if improved or epoch % self.save_period == 0:\n self._save_checkpoint(epoch, is_best=improved)\n\n if not_improved_count > self.early_stop and epoch >= self.min_epochs:\n self.logger.info(\"Validation performance didn\\'t improve for {} epochs. \"\n \"Training stops.\".format(self.early_stop))\n break", "def make_data_splits(samples, params, RESULTSDIR, num_experiments):\n # TODO: Switch to .mat from .pickle so that these lists are easier to read\n # and change.\n\n partition = {}\n if params[\"load_valid\"] is None:\n # Set random seed if included in params\n if params[\"data_split_seed\"] is not None:\n np.random.seed(params[\"data_split_seed\"])\n\n all_inds = np.arange(len(samples))\n\n # extract random inds from each set for validation\n v = params[\"num_validation_per_exp\"]\n valid_inds = []\n if params[\"valid_exp\"] is not None and params[\"num_validation_per_exp\"] > 0:\n all_valid_inds = []\n for e in params[\"valid_exp\"]:\n tinds = [\n i for i in range(len(samples)) if int(samples[i].split(\"_\")[0]) == e\n ]\n all_valid_inds = all_valid_inds + tinds\n valid_inds = valid_inds + list(\n np.random.choice(tinds, (v,), replace=False)\n )\n valid_inds = list(np.sort(valid_inds))\n\n train_inds = list(set(all_inds) - set(all_valid_inds))#[i for i in all_inds if i not in all_valid_inds]\n elif params[\"num_validation_per_exp\"] > 0: # if 0, do not perform validation\n for e in range(num_experiments):\n tinds = [\n i for i in range(len(samples)) if int(samples[i].split(\"_\")[0]) == e\n ]\n valid_inds = valid_inds + list(\n np.random.choice(tinds, (v,), replace=False)\n )\n valid_inds = list(np.sort(valid_inds))\n\n train_inds = [i for i in all_inds if i not in valid_inds]\n elif params[\"valid_exp\"] is not None:\n raise Exception(\"Need to set num_validation_per_exp in using valid_exp\")\n else:\n train_inds = all_inds\n\n assert (set(valid_inds) & set(train_inds)) == set()\n\n train_samples = samples[train_inds]\n train_inds = []\n if params[\"valid_exp\"] is not None:\n train_expts = [f for f in range(num_experiments) if f not in params[\"valid_exp\"]]\n else:\n train_expts = np.arange(num_experiments)\n\n print(\"TRAIN EXPTS: {}\".format(train_expts))\n\n if params[\"num_train_per_exp\"] is not None:\n # Then sample randomly without replacement from training sampleIDs\n for e in train_expts:\n tinds = [\n i for i in range(len(train_samples)) if int(train_samples[i].split(\"_\")[0]) == e\n ]\n print(e)\n print(len(tinds))\n train_inds = train_inds + list(\n np.random.choice(tinds, (params[\"num_train_per_exp\"],), replace=False)\n )\n train_inds = list(np.sort(train_inds))\n else:\n train_inds = np.arange(len(train_samples))\n\n \n\n partition[\"valid_sampleIDs\"] = samples[valid_inds]\n partition[\"train_sampleIDs\"] = train_samples[train_inds]\n\n # Save train/val inds\n with open(os.path.join(RESULTSDIR, \"val_samples.pickle\"), \"wb\") as f:\n cPickle.dump(partition[\"valid_sampleIDs\"], f)\n\n with open(os.path.join(RESULTSDIR, \"train_samples.pickle\"), \"wb\") as f:\n cPickle.dump(partition[\"train_sampleIDs\"], f)\n else:\n # Load validation samples from elsewhere\n with open(os.path.join(params[\"load_valid\"], \"val_samples.pickle\"), \"rb\",) as f:\n partition[\"valid_sampleIDs\"] = cPickle.load(f)\n partition[\"train_sampleIDs\"] = [\n f for f in samples if f not in partition[\"valid_sampleIDs\"]\n ]\n\n # Reset any seeding so that future batch shuffling, etc. are not tied to this seed\n if params[\"data_split_seed\"] is not None:\n np.random.seed()\n\n return partition", "def valid_loop(self, epoch, validate_loader):\n validate_set = iter(validate_loader)\n self.model.eval()\n epoch_mape = 0\n total_scans = 0\n current_chunk = 0 # The current chunk of one scan currently processing\n chunk_loss = 0\n chunk_predicted = None\n chunk_labels = None\n chunk_pos = None\n for current_iteration, (data, labels, pos, file_name) in enumerate(validate_set):\n current_chunk += 1\n self.logger.info(f\"Epoch: {epoch}, Validation scan: {(current_iteration // self.valid_chunks) + 1} / \"\n f\"{len(validate_loader) // self.valid_chunks}. \"\n f\"Chunk: {((current_chunk - 1) % self.valid_chunks) + 1} / \"\n f\"{self.valid_chunks}\")\n data, labels = data.to(self.device), labels.to(self.device)\n predicted, labels, pos, loss, attention = self.validate(data, labels, pos)\n data, labels = data.cpu(), labels.cpu()\n chunk_loss += loss.detach().cpu().item()\n chunk_predicted = predicted.detach().cpu() if chunk_predicted is None else \\\n torch.cat((chunk_predicted, predicted.detach().cpu()), 0)\n chunk_labels = labels.detach().cpu() if chunk_labels is None else \\\n torch.cat((chunk_labels, labels.detach().cpu()), 0)\n chunk_pos = pos.detach().cpu() if chunk_pos is None else \\\n torch.cat((chunk_pos, pos.detach().cpu()), 0)\n if current_chunk % self.valid_chunks == 0:\n self.data_logger.log_error(chunk_predicted,\n chunk_labels,\n chunk_loss, \"valid\")\n\n if self.plot_every > 0 and epoch % self.plot_every == 0:\n if not os.path.exists(f\"{self.export_dir}/Plots\"):\n os.mkdir(f\"{self.export_dir}/Plots\")\n # Matplotlib has a memory leak. To alleviate this do plotting in a subprocess and\n # join to it. When process is suspended, memory is forcibly released.\n plot(plot_maps,\n chunk_predicted.numpy(),\n chunk_labels.numpy(),\n chunk_pos.numpy().astype(int),\n epoch,\n f\"{self.export_dir}/Plots/{file_name}\",\n file_name)\n epoch_mape += torch.mean(torch.abs(((chunk_labels - chunk_predicted) / chunk_labels))) * 100\n total_scans += 1\n chunk_loss = 0\n chunk_predicted = None\n chunk_labels = None\n chunk_pos = None\n\n epoch_valid_mape = epoch_mape / total_scans\n self.logger.info(f\"Validation MAPE: {epoch_valid_mape}\")\n return epoch_valid_mape", "def _generate_validation_fold(self):\n\n for offset in range(self.nb_folds):\n # Load all the data from cache (do this to save memory)\n with open(self.data_cache_path_str + \"data_cache.pkl\", \"rb\") as f:\n data_df, target_df = pickle.load(f)\n\n # Generate train and test sets\n data_dates_lst = data_df[\"date\"].drop_duplicates().sort_values().tolist()\n train_start_day = len(data_dates_lst) - ((self.nb_folds - offset) * self.test_nb_days + self.train_nb_days)\n train_end_day = train_start_day + self.train_nb_days\n test_start_day = train_end_day\n test_end_day = test_start_day + self.test_nb_days\n\n train_dates_lst = data_dates_lst[train_start_day:train_end_day]\n test_dates_lst = data_dates_lst[test_start_day:test_end_day]\n\n # Generate train and test labels\n training_set_df = data_df.loc[data_df[\"date\"].isin(train_dates_lst)].reset_index(drop = True)\n testing_set_df = data_df.loc[data_df[\"date\"].isin(test_dates_lst)].reset_index(drop = True)\n new_target_df = target_df.loc[data_df[\"date\"].isin(train_dates_lst)].reset_index(drop = True)\n truth_df = target_df.loc[data_df[\"date\"].isin(test_dates_lst)].reset_index(drop = True)\n\n # Reduce memory usage\n del data_df, target_df\n gc.collect()\n\n # Return result\n yield (training_set_df, testing_set_df, new_target_df, truth_df)", "def batch_iter(self, partition_index, batch_size, num_epochs):\n err_msg = 'partition index {} out of range 0-{}'.format(\n partition_index, len(self._partitioned_filenames))\n assert partition_index < len(self._partitioned_filenames), err_msg\n filenames = self._partitioned_filenames[partition_index]\n for epoch in xrange(num_epochs):\n print('[DATA] Partition {}: Epoch {} of {}'.format(partition_index, epoch+1, num_epochs))\n # Shuffle the filenames each epoch so we don't always go through in the same order.\n np.random.shuffle(filenames)\n for filename in filenames:\n # Load the file, split its content into batches.\n h5_dict = load(filename)\n data_size = h5_dict['ECAL'].shape[0]\n num_batches = int(data_size / batch_size) + (0 if data_size % batch_size == 0 else 1)\n #print('[DATA] Loading {} experiments in {} batches from {}'.format(data_size, num_batches, filename))\n # Shuffle the order of the batches, and extract each batch one by one.\n for batch_num in np.random.permutation(np.arange(num_batches)):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n if start_index < end_index:\n # 20-24 are always zero in the x and y axis, remove them.\n ecal = h5_dict['ECAL'][start_index:end_index, :DATA_DIM[0], :DATA_DIM[1], :]\n # Only returns the first two elements of the target array.\n # The middle dimension is unused, so squeeze() removes it.\n # If end_index-start_index = 1, then squeeze will be too aggressive and take\n # it down to shape [2]. np.reshape makes sure the shape is [end_index-start_index,2].\n target = np.reshape(h5_dict['target'][start_index:end_index, :, :2].squeeze(),\n (-1, 2))\n yield ecal, target", "def train_valid_index_split_two_stage(all_index, train_size_1 = None, train_size_2 = None, valid_split = 0.3):\n\tall_index = np.arange(all_index) if isinstance(all_index, int) else np.array(all_index)\n\n\ttrain_size_2 = len(all_index) if train_size_2 is None else train_size_2\n\ttrain_index_2_ = np.random.choice(all_index, train_size_2, replace = False)\n\ttrain_index_2, valid_index_2 = np.split(train_index_2_, [int(train_size_2*(1-valid_split))])\n\n\tall_index = np.setdiff1d(all_index, train_index_2)\n\ttrain_index_1_ = np.random.choice(all_index, train_size_1-train_size_2, replace = False)\n\ttrain_index_1, valid_index_1 = np.split(train_index_1_, [int((train_size_1-train_size_2)*(1-valid_split))])\n\ttrain_index_1 = np.hstack([train_index_1, train_index_2])\n\tvalid_index_1 = np.hstack([valid_index_1, valid_index_2])\n\treturn train_index_1, valid_index_1, train_index_2, valid_index_2", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))", "def _set_splits(self, train_E, train_E_false=None, test_E=None, test_E_false=None, directed=False, nw_name='test',\n TG=None, split_id=0, split_alg='spanning_tree', verbose=False):\n if len(train_E) != 0:\n if train_E_false is not None:\n # Stack the edges and non-edges together.\n self._train_edges = np.vstack((list(train_E), list(train_E_false)))\n\n # Create labels vector with 1s for edges and 0s for non-edges\n self._train_labels = np.hstack((np.ones(len(train_E)), np.zeros(len(train_E_false))))\n\n else:\n # Stack the edges and non-edges together.\n self._train_edges = np.array(list(train_E))\n\n # Create labels vector with 1s for edges and 0s for non-edges\n self._train_labels = np.ones(len(train_E))\n\n if test_E is not None:\n if test_E_false is not None:\n # Stack the edges and non-edges together.\n self._test_edges = np.vstack((list(test_E), list(test_E_false)))\n\n # Create labels vector with 1s for edges and 0s for non-edges\n self._test_labels = np.hstack((np.ones(len(test_E)), np.zeros(len(test_E_false))))\n\n else:\n # We only have test edges (no test non-edges)\n self._test_edges = np.array(list(test_E))\n\n # Create labels vector with 1s for edges\n self._test_labels = np.ones(len(test_E))\n else:\n self._test_edges = []\n self._test_labels = []\n\n # Initialize the training graph\n if TG is None:\n if directed:\n self._TG = nx.DiGraph()\n else:\n self._TG = nx.Graph()\n self._TG.add_edges_from(train_E)\n else:\n self._TG = TG.copy()\n\n # Set class attributes to new values\n if test_E is not None:\n self._train_frac = np.around(len(train_E) / (len(train_E) + len(test_E)), 4)\n else:\n self._train_frac = 1\n self._split_alg = split_alg\n self._split_id = split_id\n self._nw_name = nw_name\n else:\n raise ValueError(\"Train edges are always required!\")\n\n # Print the process\n if verbose:\n print(\"Edge splits computed using {} alg. ready.\".format(self.split_alg))", "def splitTrainValidate(df, perc_training = 0.8):\n train = df.sample(frac=perc_training)#, random_state=200)\n validate = df.drop(train.index)\n return (train, validate)", "def split(self,X,y=None):\n all_idx = pd.Series(np.arange(X.shape[0])) \n mbrg = int(X.shape[0]*self.embargo_pct)\n test_starts=[(i[0],i[-1]+1) for i in np.array_split(all_idx.values,self.n_splits)]\n for i, j in test_starts:\n t0 = all_idx.index[i] # start of test set\n test_indices = all_idx.values[i:j]\n maxT1Idx = all_idx.index.searchsorted(all_idx[test_indices].max())\n train_indices = all_idx.index.searchsorted(all_idx[all_idx<=t0].index)\n if maxT1Idx < X.shape[0]: \n train_indices=np.concatenate((train_indices,all_idx[maxT1Idx+mbrg:]))\n yield train_indices,test_indices", "def split_train_validation_and_test(num_examples, val_percentage, test_percentage):\n all_samples_idx = np.arange(num_examples)\n np.random.shuffle(all_samples_idx)\n test_examples = int(np.ceil(num_examples * test_percentage))\n val_examples = int(np.ceil(num_examples * val_percentage))\n # Train and validation indexes\n train_idx = all_samples_idx[0:len(all_samples_idx) - test_examples - val_examples]\n val_idx = all_samples_idx[len(all_samples_idx) - test_examples - val_examples:len(all_samples_idx) - test_examples]\n test_idx = all_samples_idx[len(all_samples_idx) - test_examples:]\n train_idx.sort()\n val_idx.sort()\n test_idx.sort()\n\n return [train_idx, val_idx, test_idx]", "def split(self, X):\n # Make sure it's a sparse array...\n X = check_sparse_array(X)\n\n # Use np.linspace to evenly partition the space between 0 and 1 into\n # k + 1 pieces so we can use them as \"training_sizes\"\n train_sizes = np.linspace(0, 1, self.n_splits + 1)\n\n # We use a series of \"permuted values\" to mask out the training/testing\n # folds.\n random_state = check_random_state(self.random_state)\n values = _get_train_mask_linspace(X.nnz, random_state,\n shuffle=self.shuffle)\n\n # Iterate the fold space bounds in a generator, returning train/test\n for lower, upper in zip(train_sizes[:-1], train_sizes[1:]):\n test, train = _split_between_values(X, values, lower, upper)\n yield train, test", "def cross_validate(self, train_size):\n train, val, test_x, test_y = [], [], [], []\n for country in self.countries:\n tr, v, te_x, te_y = country.split_k_fold(train_size, self.horizon)\n train.append(tr), val.append(v), test_x.append(te_x), test_y.append(te_y)\n return np.stack(train), np.stack(val), np.stack(test_x), np.stack(test_y)", "def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor", "def split_dev(self):\n\t\tprint(\"Splitting test set into dev and test set\")\n\n\t\told_length = len(self.X[\"test\"])\n\t\tindices = list(range(old_length))\n\n\t\tnp.random.seed(0)\n\t\tnp.random.shuffle(indices)\n\t\t\n\t\tsplit = int(len(indices) * 0.5)\n\n\t\tsplit_indices = {\"test\": indices[:split], \"dev\": indices[split:]}\n\t\n\t\tfor dataset in (\"dev\", \"test\"):\n\t\t\tself.X[dataset] = [self.X[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.Y[dataset] = [self.Y[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.raw_documents[dataset] = [self.raw_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.tokenized_documents[dataset] = [self.tokenized_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\n\t\tprint(\"Split test set with\", old_length, \"samples into\", len(self.X[\"test\"]), \"/\", len(self.X[\"dev\"]), \"samples\")", "def train_validation_test_split(\n data: pd.DataFrame,\n target: str,\n val_partition: float = 0.2,\n test_partition: float = 0.15\n) -> list:\n\n assert val_partition + test_partition < 1.0\n\n val_samples = val_partition * data.shape[0]\n test_samples = test_partition * data.shape[0]\n\n train_validation, test = train_test_split(\n data, test_size=int(test_samples), stratify=data[target]\n )\n\n train, validation = train_test_split(\n train_validation, test_size=int(val_samples), stratify=train_validation[target]\n )\n\n return [train, validation, test]", "def split_train_eval(\n self,\n eval_size: Union[int, float] = 0.25,\n random_state: Optional[int] = None,\n ) -> None:\n (\n self.X_tr,\n self.X_ev,\n self.y_tr,\n self.y_ev,\n _,\n self.y_full_ev,\n ) = train_test_split(\n self.X, self.y, self.y_full, test_size=eval_size, random_state=random_state\n )\n self.n_rounds_ev = self.X_ev.shape[0]" ]
[ "0.64495283", "0.6436599", "0.64065903", "0.62911177", "0.6255103", "0.62455463", "0.62217116", "0.6184939", "0.61742663", "0.61742663", "0.61652815", "0.6137683", "0.61352956", "0.61342204", "0.61287206", "0.6107777", "0.60363936", "0.6032738", "0.60128796", "0.59902066", "0.5980845", "0.59693664", "0.5951976", "0.59447724", "0.59350663", "0.5932485", "0.5919861", "0.59132946", "0.5879769", "0.5873955" ]
0.7032567
0
PyTorch dataset class to get the ELM data and corresponding labels according to the sample_indices. The signals are grouped by `signal_window_size`
def __init__( self, signals: np.ndarray, labels: np.ndarray, sample_indices: np.ndarray, window_start: np.ndarray, signal_window_size: int, label_look_ahead: int, stack_elm_events: bool = False, transform=None, ): self.signals = signals self.labels = labels self.sample_indices = sample_indices self.window_start = window_start self.signal_window_size = signal_window_size self.label_look_ahead = label_look_ahead self.stack_elm_events = stack_elm_events self.transform = transform LOGGER.info("-" * 15) LOGGER.info(" Dataset class") LOGGER.info("-" * 15) LOGGER.info(f"Signals shape: {signals.shape}") LOGGER.info(f"Labels shape: {labels.shape}") LOGGER.info(f"Sample indices shape: {sample_indices.shape}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_data(nb_samples):\n inputs = torch.empty(nb_samples, 2).uniform_(0, 1)\n center = Tensor([0.5, 0.5]).view(1, -1)\n distances = torch.norm((inputs - center).abs(), 2, 1)\n labels = (distances < 1 / math.sqrt(2 * math.pi)).type(LongTensor)\n return inputs.t(), labels", "def sample(self,\n data: Sequence[Sequence[torch.Tensor]],\n n_epochs: int = 1) -> Tuple[List[List[int]], List[List[int]], List[int]]:\n\n all_queries = []\n all_targets = []\n for q, t in data:\n all_queries.append(q)\n all_targets.append(t)\n\n print(f'sampler size: {len(all_queries)}')\n\n\n self.n_batch = int(np.ceil(data.__len__() / self.batch_size))\n print(\"n_batch:\", self.n_batch)\n\n for i in range(self.n_batch):\n # position = i * self.batch_size\n # queries = all_queries[position:position + self.batch_size]\n # targets = all_targets[position:position + self.batch_size]\n sample_index = np.random.choice(len(all_queries), self.batch_size)\n queries = [all_queries[i] for i in sample_index]\n targets_label = [all_targets[i] for i in sample_index]\n\n # targets = self.transform_label(targets_label)\n\n # labels = np.arange(len(queries))\n\n # queriess = np.array(queries)\n all_targets_text = self.all_targets\n queries = pad_sequence(queries, batch_first=self.batch_first, padding_value=0)\n\n # targets, queries, labels = torch.tensor(targets), torch.tensor(labels)\n # print(queries[:5])\n # print(len(all_targets_text))\n\n\n targets_label = torch.tensor(targets_label)\n yield (queries, all_targets_text, targets_label)", "def sample_mcd(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int,\n scaler: Dict[str, Union[pd.Series, xarray.Dataset]]) -> Dict[str, torch.Tensor]:\n setup = _SamplingSetup(model, data, model.cfg.head)\n\n # force model into train mode for mc_dropout:\n if setup.mc_dropout:\n model.train()\n\n # sample for different frequencies and targets:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n sample_points = []\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n for nth_target in range(setup.number_of_targets):\n # unbound sampling:\n def _sample_values(ids: List[int]) -> torch.Tensor:\n # The ids are used for location-specific resampling for 'truncation' in '_handle_negative_values'\n target_values = torch.zeros(len(ids), frequency_last_n, n_samples)\n for i in range(n_samples): # forward-pass for each frequency separately to guarantee independence\n prediction = model(data)\n value_buffer = prediction[f'y_hat{freq_suffix}'][:, -frequency_last_n:, 0]\n target_values[ids, -frequency_last_n:, i] = value_buffer.detach().cpu()\n return target_values\n\n ids = list(range(data[f'x_d{freq_suffix}'].shape[0]))\n values = _sample_values(ids)\n\n # bind values and add to sample_points:\n values = _handle_negative_values(setup.cfg, values, _sample_values, scaler, nth_target)\n sample_points.append(values)\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: torch.stack(sample_points, 2)})\n\n return samples", "def load_data_from_inds(data_set, inds):\n\n data = torch.cat([data_set[ind_][0].unsqueeze_(0) for ind_ in inds], 0)\n labels = torch.cat([torch.from_numpy(np.array(data_set[ind_][1])).unsqueeze_(0) for ind_ in inds], 0)\n\n return data, labels", "def get_samples_and_labels(self, size, window_size):\r\n pairs = [[None, None] for _ in range(size)] # np.zeros((size, 2), dtype=int)\r\n labels = np.zeros(self.num_individuals_in_label)\r\n labels[self.individual_id] = 1.\r\n\r\n for i in range(size):\r\n anchor_val = np.random.randint(low=0, high=self.total_windows)\r\n second_val = [None, None]\r\n if not self.randomized_augmentation: # decide whether or not to save augmentation strategy for curr sample\r\n if self.perturb_orig_signal:\r\n second_val[0] = self.get_augmentation_set()\r\n second_val[1] = self.get_augmentation_set()\r\n # else:\r\n # second_val = [None, None]\r\n\r\n pairs[i][0] = anchor_val\r\n pairs[i][1] = second_val\r\n\r\n # print(\"SADataset.get_samples_and_labels: labels shape == \", labels.shape)\r\n return pairs, labels", "def get_data_and_model_samples(self):\n model_samples = (\n self.net_.sample_fantasy(\n x=self.model_samples_[-1],\n num_mc_steps=self.num_sample_mc_steps,\n beta=self.sample_beta,\n mc_dynamics=self.sampler,\n )\n .detach()\n .cpu()\n .numpy()\n )\n data_sample_ixs = torch.randint(\n 0, self.samples.shape[0], size=(model_samples.shape[0],)\n )\n data_samples = self.samples[data_sample_ixs, ...]\n return data_samples, model_samples", "def get_data(pkl_fname, label, sample, replicate, \n incl_curvature=False,\n load_attn1=None, load_attn2=None, \n modelpkl_fname1=None, modelpkl_fname2=None,\n preloadn2v=False,\n out_channels=8, heads=8, negative_slope=0.2, dropout=0.4, \n verbose=True):\n pdfp = os.path.split(pkl_fname)[0]\n \n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n \n if load_attn1 is None and load_attn2 is None and not incl_curvature and preloadn2v is None:\n\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # load all edge_feat\n elif load_attn1 is not None and load_attn2 is not None and incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # only load attn1\n elif load_attn1 is not None and load_attn2 is None and not incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n# F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n# n2v = utils.node2vec_dot2edge(datapkl['adj'], \n# os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n# preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(attn, dtype=float) \n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n\n # attn2 \n elif load_attn1 is None and load_attn2 is not None and not incl_curvature and preloadn2v is None:\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n# F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n# n2v = utils.node2vec_dot2edge(datapkl['adj'], \n# os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n# preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(attn2, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(attn2, dtype=float)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean \n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # curvature\n elif load_attn1 is None and load_attn2 is None and incl_curvature and preloadn2v is None:\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n# n2v = utils.node2vec_dot2edge(datapkl['adj'], \n# os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n# preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(attn2, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(utils.range_scale(F_e)).reshape(-1,1)\n d = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, y=labels)\n del node_features,edge_index,labels,edge_attr\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # n2v\n elif load_attn1 is None and load_attn2 is None and not incl_curvature and preloadn2v is not None:\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n # add other edge feats\n# F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n# edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n# torch.tensor(attn2, dtype=float),\n# torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n# torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n edge_attr = torch.tensor(utils.range_scale(n2v)).reshape(-1,1)\n d = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, y=labels)\n del node_features,edge_index,labels,edge_attr\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + attn2\n elif load_attn1 is not None and load_attn2 is not None and not incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + attn2 + n2v\n elif load_attn1 is not None and load_attn2 is not None and not incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float),\n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + attn2 + curvature\n elif load_attn1 is not None and load_attn2 is not None and incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n # second attention\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn2]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n del datapkl # clear space\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname2\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn2 = model(d)\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(attn2, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n del model # extra clean\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # n2v + curvature\n elif load_attn1 is None and load_attn2 is None and incl_curvature and preloadn2v is not None:\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[label]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, y=labels)\n del node_features,edge_index,labels,edge_attr\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n \n # attn1 + curvature\n elif load_attn1 is not None and load_attn2 is None and incl_curvature and preloadn2v is None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n \n # attn1 + n2v\n elif load_attn1 is not None and load_attn2 is None and not incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n \n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n \n # attn1 + n2v + curvature\n elif load_attn1 is not None and load_attn2 is None and incl_curvature and preloadn2v is not None:\n # model for DATA EXTRACTION\n ## TODO: clean this up in some other script or fx\n\n # load proper label\n node_features = datapkl['X']\n if isinstance(node_features, sparse.csr_matrix):\n node_features = torch.from_numpy(node_features.todense()).float()\n else:\n node_features = torch.from_numpy(node_features).float()\n labels = datapkl[load_attn1]\n if False:\n # assume label_encoding is done in pre-processing steps\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'ctype_labels_encoding.csv'))\n if False:\n # labels as pd.Series\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels) # assumes labels as list\n edge_index,_ = utils.scipysparse2torchsparse(datapkl['adj'])\n\n d = Data(x=node_features, edge_index=edge_index, y=labels)\n del node_features,edge_index,labels\n\n # model to grab attn\n class GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.gat1 = GATConv(d.num_node_features, out_channels=out_channels,\n heads=heads, concat=True, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n self.gat2 = GATConv(out_channels*heads, d.y.unique().size()[0],\n heads=heads, concat=False, negative_slope=negative_slope,\n dropout=dropout, bias=True)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x,attn1 = self.gat1(x, edge_index)\n x = F.elu(x)\n x,attn2 = self.gat2(x, edge_index)\n return F.log_softmax(x, dim=1),attn1\n\n\n # load edge_feature \n model = GAT()\n if False:\n # general fname loading?\n model_pkl = glob.glob(os.path.join(pdfp,'*{}{}_{}*.pkl'.format(sample,replicate,load_attn)))[0]\n else:\n model_pkl = modelpkl_fname1\n model.load_state_dict(torch.load(model_pkl, map_location=torch.device('cpu')))\n model.eval()\n\n logsoftmax_out, attn = model(d)\n\n del model\n\n\n # update labels\n with open(pkl_fname,'rb') as f :\n datapkl = pickle.load(f)\n f.close()\n labels = datapkl[label]\n if False:\n label_encoder = {v:i for i,v in enumerate(labels.unique())}\n labels = labels.map(label_encoder)\n pd.DataFrame(label_encoder,index=[0]).T.to_csv(os.path.join(pdfp,'cond_labels_encoding.csv'))\n if False:\n labels = torch.LongTensor(labels.to_numpy())\n else:\n labels = torch.LongTensor(labels)\n\n # add other edge feats\n F_e = utils.forman_curvature(datapkl['adj'], verbose=True, plot=False)\n n2v = utils.node2vec_dot2edge(datapkl['adj'], \n os.path.join(pdfp,'{}_n2v_{}.txt'.format(sample.split('_')[0], os.path.split(pkl_fname)[1].split('.p')[0])),\n preloaded=preloadn2v)\n edge_attr = torch.cat((torch.tensor(attn, dtype=float),\n torch.tensor(utils.range_scale(F_e)).reshape(-1,1), \n torch.tensor(utils.range_scale(n2v)).reshape(-1,1)),dim=1)\n d = Data(x=d.x, edge_index=d.edge_index, edge_attr=edge_attr, y=labels)\n\n if verbose:\n print('\\nData shapes:')\n print(d)\n print('')\n\n else:\n print('Can only load edge feats of a specific entry set type. Exiting.')\n exit()\n \n return d", "def get_samples_and_labels(self, size, window_size):\r\n pairs = [[None, None] for _ in range(size)] # np.zeros((size, 2), dtype=int)\r\n labels = None\r\n\r\n for i in range(size):\r\n anchor_val = np.random.randint(low=0, high=self.total_windows)\r\n second_val = None\r\n if not self.randomized_augmentation: # decide whether or not to save augmentation strategy for curr sample\r\n second_val = self.get_augmentation_set()\r\n # else:\r\n # second_val = None\r\n\r\n pairs[i][0] = anchor_val\r\n pairs[i][1] = second_val\r\n\r\n # print(\"PSDataset.get_samples_and_labels: labels shape == \", labels.shape)\r\n return pairs, labels", "def make_percolation_dataset(side=8, threshold=0.42, n_examples=10):\n X_data = (np.random.random([n_examples, side, side, 1]) > threshold).astype(float)\n Y_data = np.zeros([n_examples, 1])\n for i in range(0, n_examples):\n if percolate(X_data[i, :, :, 0]):\n Y_data[i, 0] = 1\n dataset = [{'image': torch.tensor(x.astype(np.float32)).reshape(1, side, side),\n 'label': torch.tensor(float(y)).reshape(1)} for x, y in zip(X_data, Y_data)]\n print(sum([x['label'].item() for x in dataset]) / len(dataset))\n return dataset", "def get_samples_and_labels(self, size, window_size):\r\n pairs = [[None, None] for _ in range(size)] # np.zeros((size, 2), dtype=int)\r\n labels = np.zeros(size)\r\n\r\n for i in range(size):\r\n anchor_val = None\r\n second_val = None\r\n label = None\r\n\r\n anchor_val = np.random.randint(low=0, high=self.total_windows)\r\n if random.random() < 0.5: # decide whether or not to generate positive labeled data sample\r\n label = 1\r\n second_val = np.random.randint(low=0, high=self.total_windows)\r\n else:\r\n label = -1\r\n second_val = np.nan\r\n\r\n pairs[i][0] = anchor_val\r\n pairs[i][1] = second_val\r\n labels[i] = label\r\n\r\n # print(\"PSDataset.get_samples_and_labels: labels shape == \", labels.shape)\r\n return pairs, labels", "def features_dataset(features_pickle_path, train=True):\n if train:\n start = 0\n stop = 48000\n else:\n start = 48000\n stop = 50000\n\n features = pd.read_hdf(features_pickle_path, start=start, stop=stop).values\n\n labels = torch.zeros(features.shape[0]).float()\n features = torch.from_numpy(features).float()\n\n return torch.utils.data.TensorDataset(features, labels)", "def __getitem__(self, index):\n # Select sample\n chunk_idx = self.chunk_idxes[index]\n\n # get filename\n filename = self.filename_list[index]\n\n # Load data and get label\n X = self.features[:, chunk_idx: chunk_idx + self.chunk_len, :] # (n_channels, n_timesteps, n_mels)\n sed_labels = self.sed_targets[chunk_idx: chunk_idx + self.chunk_len] # (n_timesteps, n_classes)\n doa_labels = self.doa_targets[chunk_idx: chunk_idx + self.chunk_len] # (n_timesteps, x*n_classes)\n\n # Mixup mainly for SED\n if self.is_mixup:\n a1 = np.random.beta(0.5, 0.5)\n if np.random.rand() < 0.8 and np.abs(a1 - 0.5) > 0.2:\n random_index = np.random.randint(0, self.n_samples, 1)[0]\n random_chunk_idx = self.chunk_idxes[random_index]\n X_1 = self.features[:, random_chunk_idx: random_chunk_idx + self.chunk_len, :]\n sed_labels_1 = self.sed_targets[random_chunk_idx: random_chunk_idx + self.chunk_len]\n doa_labels_1 = self.doa_targets[random_chunk_idx: random_chunk_idx + self.chunk_len]\n X = a1 * X + (1 - a1) * X_1\n sed_labels = a1 * sed_labels + (1 - a1) * sed_labels_1\n doa_labels = a1 * doa_labels + (1 - a1) * doa_labels_1\n\n if self.transform is not None:\n X = self.transform(X)\n\n return X, sed_labels, doa_labels, filename", "def __getitem__(self, idx):\r\n \r\n signal=torch.from_numpy(np.genfromtxt(os.path.join(self.signals_dir, \r\n self.signal_list[idx])))\r\n X, labels=self.transform(signal)\r\n \r\n return X, labels", "def show_sample(max_len):\n for i in range(len(train_dataset)):\n sample = train_dataset[i]\n\n X = sample['X']\n y_d = sample['y_descreen']\n y_e = sample['y_edge']\n print(i)\n\n print(type(X), X.size())\n print(type(y_d), y_d.size())\n print(type(y_e), y_e.size())\n\n if i == max_len:\n break", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def sample(self, features, states=None): \n\n #print('features', features.size())\n sampled_ids = []\n \n batch_size = features.size(0)\n \n encoder_dim = features.size(-1)\n \n features = features.view(batch_size, -1, encoder_dim)\n num_pixels = features.size(1)\n #print('features', features)\n\n prev_word = torch.LongTensor([[self.vocab.word2idx['<start>']]]).to(device)\n\n h, c = self.init_hidden_states(features)\n \n #print(h.size(), c.size())\n #print(h.mean())\n \n for t in range(self.max_seg_length):\n \n embeddings = self.embedding(prev_word).squeeze(1)\n #print(embeddings.size())\n att, _ = self.attention(features, h)\n #print('att', att)\n gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)\n #print('gate', gate)\n att = gate * att\n #print('att', att.size())\n #print(h.size())\n h, c = self.decode_step(torch.cat([embeddings, att], dim=1), (h, c)) \n #print(torch.cat([embeddings, att], dim=1))\n #print('h',h.mean())\n \n #print(h.size(), c.size())\n #preds = self.fc(self.dropout(h)) \n #print('preds', preds)\n \n h_embedded = self.linear_h(h)\n att_embedded = self.linear_z(att)\n preds = self.linear_o(self.dropout(embeddings + h_embedded + att_embedded))\n \n _, predicted = torch.max(preds, dim=1)\n #print(predicted)\n \n #print('indices', predicted)\n prev_word = predicted.unsqueeze(1)\n #print('prev', prev_word.size())\n \n sampled_ids.append(predicted)\n #print('sampled ids', sampled_ids)\n \n sampled_ids = torch.stack(sampled_ids, 1) \n #print('ids', sampled_ids)\n\n \n return sampled_ids", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def to_dataset(sound_inputs, chunk_size=None, increment=None):\n\n if chunk_size is None:\n nrows = len(sound_inputs)\n ncols = np.prod(sound_inputs[0].shape)\n else:\n get_nchunks = lambda dur: np.ceil(float(dur) / int(self.chunk_size * self.stride))\n # Compute the total number of vectorized samples in the list of sound_inputs\n nrows = np.sum([get_nchunks(s.sound.annotations[\"data_shape\"][1]) for s in sound_inputs])\n ncols = sound_inputs[0].shape[0] * chunk_size\n\n mean = np.zeros(data_shape[1])\n std = np.ones(data_shape[1])\n if self.zscore:\n for s in sound_inputs:\n data = s.data.ravel()\n s.clear_cache()\n mean += data\n std += data ** 2\n std = np.sqrt(std / len(sound_inputs) - mean ** 2 / len(sound_inputs))\n mean = mean / len(sound_inputs)\n\n\n with h5py.File(self.ds_filename, \"w\") as hf:\n ds = hf.create_dataset(\"input\",\n data_shape,\n chunks=(batch_size, data_shape[1]))\n data = list()\n start = 0\n for s in sound_inputs:\n data.append(s.data.ravel())\n s.clear_cache()\n if len(data) == batch_size:\n ds[start: start + batch_size] = (np.vstack(data) - mean) / std\n start += batch_size\n data = list()\n if len(data):\n ds[start: start + len(data)] = np.vstack(data)\n\n return self.ds_filename", "def create_data(num_sample=None):\n I = np.eye(3, dtype=np.float32)\n\n\n if (num_sample == None):\n num_sample = 100\n\n # Generate first class\n m1 = np.asarray([0.5, 0.5], dtype=np.float32)\n cov1 = np.asarray([[0.1, 0],\n [0, 0.1]], dtype=np.float32)\n data1 = rng.multivariate_normal(m1, cov1, num_sample)\n label1 = np.ones((num_sample), dtype=np.uint16) - 1\n label1 = I[label1,:]\n\n # Generate second class\n m2 = np.asarray([0.3,0.3], dtype=np.float32)\n cov2 = np.asarray([[0.5, 0], [0, 0.5]], dtype=np.float32)\n data2 = rng.multivariate_normal(m2, cov2, num_sample)\n label2 = np.ones((num_sample), dtype=np.uint16)\n label2 = I[label2, :]\n\n\n return (data1, label1, data2, label2)", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def rand_data():\n # 100 examples, with seq_len=10, each holding 300 features\n return torch.randn((100, 10, 300))", "def train_on_chunk(self, chunk, meter):\n # EXTRACT FEATURES:\n # find units:\n self.__setattr__('units', chunk.columns[0])\n # Loading treshold for getting events:\n thDelta = getattr(self, 'thDelta')\n chunk.index.name = 'date_time'\n # To prevent learning many samples at the middle of a edge:\n chunk.ix[:, 0][chunk.ix[:, 0] < thDelta] = 0\n # Learning edges\n chunk['delta'] = chunk.ix[:, 0].diff()\n chunk.delta.fillna(0, inplace=True)\n edges = chunk[np.abs(chunk['delta']) > thDelta].delta\n # Pairing on/off events\n #print(chunk)\n if len(edges) > 1:\n offpower = edges[edges.apply(np.sign).diff() == -2]\n onpower = edges[edges.apply(np.sign).diff(-1) == 2]\n duration = offpower.reset_index().date_time - \\\n onpower.reset_index().date_time\n duration = duration.astype('timedelta64[s]')\n\n # Set consistent index for concatenation:\n onpower = pd.DataFrame(onpower).reset_index(drop=True)\n onpower.columns = ['onpower']\n offpower = pd.DataFrame(offpower).reset_index(drop=True)\n offpower.columns = ['offpower']\n duration = pd.DataFrame(duration).reset_index(drop=True)\n duration.columns = ['duration']\n\n # Len of samples:\n print(\"Samples of onpower: \" + str(len(onpower)))\n print(\"Samples of offpower: \" + str(len(offpower)))\n print(\"Samples of duration: \" + str(len(duration)))\n\n number_of_events = len(onpower)\n # Features (concatenation)\n self.onpower_train = pd.concat(\n [self.onpower_train, onpower]).reset_index(drop=True)\n self.offpower_train = pd.concat(\n [self.offpower_train, offpower]).reset_index(drop=True)\n self.duration_train = pd.concat(\n [self.duration_train, duration]).reset_index(drop=True)\n \n else:\n number_of_events = 0\n print(\"\"\"WARNING: No paired events found on this chunk.\n Is it thDelta too high?\"\"\")\n \n self.duration_train = self.duration_train[self.duration_train.duration<400]\n\n # RE-TRAIN FEATURE MODELS:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)\n\n # UPDATE STATS:\n stat_dict = {'appliance': meter.identifier[\n 0], 'instance': meter.identifier[1], 'Nevents': number_of_events}\n instanceFound = False\n if len(self.stats) == 0:\n self.stats.append(stat_dict)\n else:\n for stat in self.stats:\n if ((stat['appliance'] == stat_dict['appliance']) and\n (stat['instance'] == stat_dict['instance'])):\n index = self.stats.index(stat)\n self.stats[index]['Nevents'] = self.stats[\n index]['Nevents'] + number_of_events\n instanceFound = True\n if not instanceFound:\n self.stats.append(stat_dict)", "def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data", "def __init__(self, sample_df, data_path, load_semilabels=True, load_mask=True,\n output_size=512, data_augmentation=True):\n data.Dataset.__init__(self)\n self.sample_df = sample_df\n self.data_path = data_path\n self.load_semilabels = load_semilabels\n self.load_mask = load_mask\n if data_augmentation:\n self.transform = tf.Compose(tf.Grayscale(), \\\n tf.AutoContrast(cutoff=1), \\\n tf.RandomHorizontalFlip(p=0.5), \\\n tf.RandomVerticalFlip(p=0.5), \\\n tf.RandomBrightness(lower=0.8, upper=1.2), \\\n tf.RandomScaling(scale_range=(0.8,1.2)), \\\n tf.RandomRotation(degree_range=(-20,20)), \\\n tf.ResizeMax(output_size), \\\n tf.PadToSquare(), \\\n tf.MinMaxNormalization(), \\\n tf.ToTorchTensor())\n else:\n self.transform = tf.Compose(tf.Grayscale(), \\\n tf.AutoContrast(cutoff=1), \\\n tf.ResizeMax(output_size), \\\n tf.PadToSquare(), \\\n tf.MinMaxNormalization(), \\\n tf.ToTorchTensor())", "def fetch_samples(self):\n return torch.cat(self.samples,dim=0).reshape(-1,self.parameters.numel())", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def generate_mog_dataset():\n\n n_per_class = 100\n dim = 2\n n_gaussians = 4\n mus = [(0, 1), (-1, 0), (0, -1), (1, 0)]\n mus = [torch.tensor(m) for m in mus]\n var = 0.05\n\n inputs, labels = [], []\n\n for id in range(n_gaussians):\n # Generate input data by mu + x @ sqrt(cov)\n cov = np.sqrt(var) * torch.eye(dim)\n mu = mus[id]\n inputs.append(mu + torch.randn(n_per_class, dim) @ cov)\n\n # Labels\n labels.append(torch.tensor(n_per_class * [1.0 if id < 2 else 0.0]))\n\n return torch.cat(inputs, dim=0), torch.cat(labels, dim=0)", "def save_val_sample_pred(dataloader, n_samples=10):\n\n sample_ids = random.sample(range(len(dataloader.dataset)), n_samples)\n\n def pred_samples(model, filename): \n sample_ims = (dataloader.dataset[i] for i in sample_ids)\n\n with evaluate(model):\n preds = torch.cat([torch.cat([sample, model(sample.unsqueeze(0)).squeeze(0), truth], dim=-2)\n for sample, truth in sample_ims],\n dim=-1)\n\n imsave(filename, preds.cpu().squeeze().numpy())\n\n return pred_samples", "def get_data(\n self, shuffle_sample_indices: bool = False, fold: int = None\n ) -> Tuple:\n training_elms, validation_elms, test_elms = self._partition_elms(\n max_elms=config.max_elms, fold=fold\n )\n LOGGER.info(\"Reading ELM events and creating datasets\")\n LOGGER.info(\"-\" * 30)\n LOGGER.info(\" Creating training data\")\n LOGGER.info(\"-\" * 30)\n train_data = self._preprocess_data(\n training_elms, shuffle_sample_indices=shuffle_sample_indices\n )\n LOGGER.info(\"-\" * 30)\n LOGGER.info(\" Creating validation data\")\n LOGGER.info(\"-\" * 30)\n validation_data = self._preprocess_data(\n validation_elms, shuffle_sample_indices=shuffle_sample_indices\n )\n LOGGER.info(\"-\" * 30)\n LOGGER.info(\" Creating test dataset\")\n LOGGER.info(\"-\" * 30)\n test_data = self._preprocess_data(\n test_elms, shuffle_sample_indices=shuffle_sample_indices\n )\n\n return train_data, validation_data, test_data", "def extract_features(self, np_samples):\n log_mel_examples = []\n samples = np_samples.shape[0]\n if self._normalize:\n min_ratio = 0.1 # = 10^(max_db/-20) with max_db = 20\n np_samples /= np.maximum(min_ratio, np.amax(np_samples))\n if self._step_size is not None:\n samples_splits = []\n for i in xrange(0, samples - vggish_params.SAMPLE_RATE + 1,\n self._step_size):\n samples_splits.append(np_samples[i:i + vggish_params.SAMPLE_RATE])\n else:\n samples_splits = np.split(np_samples, samples / vggish_params.SAMPLE_RATE)\n # Compute log mel spectrogram features.\n for samples_window in samples_splits:\n log_mel = mel_features.log_mel_spectrogram(\n samples_window,\n audio_sample_rate=vggish_params.SAMPLE_RATE,\n log_offset=vggish_params.LOG_OFFSET,\n window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,\n hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,\n num_mel_bins=vggish_params.NUM_MEL_BINS,\n lower_edge_hertz=vggish_params.MEL_MIN_HZ,\n upper_edge_hertz=vggish_params.MEL_MAX_HZ)\n\n log_mel_examples.append(\n mel_features.frame(\n log_mel,\n window_length=self._example_window_length,\n hop_length=self._example_window_length))\n return log_mel_examples" ]
[ "0.5600111", "0.54804593", "0.5479767", "0.5365365", "0.530453", "0.5254754", "0.52142054", "0.51725596", "0.5127227", "0.51050186", "0.50939244", "0.50804824", "0.5077783", "0.5063263", "0.50388134", "0.5032259", "0.50208396", "0.5009674", "0.5003568", "0.49945354", "0.4989133", "0.49831465", "0.49728778", "0.4968669", "0.49587274", "0.49586383", "0.4957089", "0.49458843", "0.49432576", "0.49413335" ]
0.57378817
0
Given a Stormpath resource, we'll extract the custom data in a JSON compatible format.
def get_custom_data(self, resource): try: custom_data = dict(resource.custom_data) except AttributeError: custom_data = dict(resource['custom_data']) custom_data['createdAt'] = custom_data['created_at'].isoformat() custom_data['modifiedAt'] = custom_data['modified_at'].isoformat() del custom_data['created_at'] del custom_data['modified_at'] return custom_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def meta_data(self):\r\n return simplejson.dumps(self.__resource_meta)", "def fetch_extra_data(resource):\n person_id = resource.get(\"cern_person_id\")\n return dict(person_id=person_id)", "def get_resource_data(self, resource):\n url = self.api_url + resource\n return self.get_url_data(url)", "def _process_resource(cls, resource):\n urn = resource['component_id']\n hrn, type = urn_to_hrn(resource['component_id'])\n\n resource['urn'] = urn\n resource['hrn'] = hrn\n\n resource['network_hrn'] = Xrn(resource['component_id']).authority[0] # network ? XXX\n\n # We also add 'facility' and 'testbed' fields\n resource['facility_name'] = cls.get_resource_facility_name(urn)\n resource['testbed_name'] = cls.get_resource_testbed_name(urn)\n\n return resource", "def json(self):\n return {\n \"qualified_name\": self.qualified_name,\n \"description\": self.description,\n \"data\": self.data,\n }", "def get(owner, resource):\n resource = logic.resource.find(owner, resource)\n return jsonify(resource)", "def jsonify(self) -> JsonDict:\n\n Attributes = type(self)._Attributes\n\n # Create json dictionary\n json_dict: JsonDict = {}\n # Add every resource attribute to dictionary\n for key in Attributes:\n\n # If necessary jsonify attribute before adding it\n var = getattr(self, key)\n if 'jsonify' in Attributes[key]:\n var = Attributes[key]['jsonify'](var)\n\n json_dict[key] = var\n\n return json_dict", "def json(self):\n if self.resource:\n return jsonify(self.resource)\n return jsonify({'id': self.id})", "def _process_resource(cls, resource):\n urn = resource['component_id']\n hrn, type = urn_to_hrn(resource['component_id'])\n\n resource['urn'] = urn\n resource['hrn'] = hrn\n\n resource['network_hrn'] = Xrn(resource['component_id']).authority[0] # network ? XXX\n\n # We also add 'facility' and 'testbed' fields\n resource['facility_name'] = cls.get_resource_facility_name(urn)\n resource['testbed_name'] = cls.get_resource_testbed_name(urn)\n\n if 'exclusive' not in resource:\n resource['exclusive'] = 'true'\n elif resource['exclusive'] is None:\n resource['exclusive'] = 'true'\n else:\n Log.warning(\"EXCLUSIVE = \",resource['exclusive'])\n\n #if 'location' in node:\n # if node['location']:\n # node['latitude'] = node['location']['latitude']\n # node['longitude'] = node['location']['longitude']\n # del node['location']\n #else:\n # if the location is not provided, aproximate it from the city\n t_urn = resource['urn'].split('+')\n city = t_urn[3].split('.')[1]\n if city == 'iii':\n city = 'Institute for Information Industry, Taïwan 106'\n resource['country'] = 'Taiwan'\n else:\n resource['country'] = 'France'\n location = cls.get_location(city)\n if location is not None:\n resource['latitude'] = str(location.latitude)\n resource['longitude'] = str(location.longitude)\n\n return resource", "def on_get_resource(self, req, resp, **params):\n instance = self.get_object(**params)\n resp.json(**instance.as_resource)", "def get_data(self):\n return self.data.to_json()", "def data_json(self, extra_context=None, publish=False):\n if not self.project.CREATE_JSON:\n # nothing to see here, but the right mimetype\n return jsonify()\n\n if not self.data:\n # this sets site.data by spreadsheet or gdoc\n self.get_context(publish)\n\n return jsonify(self.data)", "def dehydrate_extra_info(self, bundle):\n extra_info = bundle.data[\"extra_info\"]\n return json.loads(extra_info)", "def _resource_fields(chromo):\n return {\n 'name': chromo['resource_name'],\n 'description': chromo['title'],\n 'url_type': u'datastore',\n }", "def json_friendly(self):", "def data_json(request):\n json_data = []\n for resource in Resource.objects.all():\n record = {} \n record['title'] = resource.name\n record['description'] = resource.description\n record['keyword'] = resource.csw_keywords.split(',')\n record['modified'] = resource.last_updated\n record['publisher'] = resource.organization\n record['contactPoint'] = resource.metadata_contact\n record['mbox'] = resource.contact_email\n record['identifier'] = resource.csw_identifier\n if resource.is_published:\n record['accessLevel'] = 'public'\n else:\n record['accessLevel'] = 'non-public'\n\n json_data.append(record)\n\n return HttpResponse(json.dumps(json_data), 'application/json')", "def get_resource_data(self, **kw):\n data = dict(\n url=self['url'],\n dist=self['name'])\n data.update(kw)\n return data", "def print_resource():\n logging.info(\"__package__: %s\", __package__)\n logging.info(\"__name__: %s\", __name__)\n logging.info(\"JSON_RESOURCE: %s\", JSON_RESOURCE)\n logging.info(\"JSON_PATH: %s\", JSON_PATH)", "def get_json(self):\n return {'name': self.name, \n 'path': self.path, \n 'enabled': self.enabled}", "def get_custom_data(self):\n return self._get_custom_data()", "def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context", "def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context", "def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context", "def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context", "def get_person_like_json(self):\n return json.dumps(self.get_person())", "def get_data(self, **context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context", "def set_resource_data(self, resource, meta):", "def data(self):\n return { # TODO Actually query for this shit\n \"foo\": self.__name__,\n \"url\": f\"{self.request.resource_url(self)}\",\n }", "def test_hypermedia_custom_resource():\n data = {\n 'name': 'Wort wort',\n 'slug': 'sluggy',\n 'not_valid': 'nooo',\n 'author': 'http://dev/api/authors/1'\n }\n instance = HypermediaBlogsResource(**data)\n assert hasattr(instance, 'get_authors')", "def get(self, entity):\n return jsonify({p.schema: p.to_json() for p in entity.profiles})" ]
[ "0.7061312", "0.6757691", "0.61228764", "0.6066298", "0.5922103", "0.5876381", "0.5807942", "0.5787658", "0.57215774", "0.571815", "0.5709122", "0.56961465", "0.56823915", "0.5677301", "0.5654893", "0.5653712", "0.5647304", "0.56388575", "0.55996674", "0.55707127", "0.55539393", "0.55539393", "0.55539393", "0.55539393", "0.5529841", "0.55043733", "0.5490912", "0.5477527", "0.5427977", "0.54254377" ]
0.69236267
1
Given a Stormpath Resource, we'll extract the resource ID.
def get_id(self, resource): try: return resource.href.split('/')[-1] except AttributeError: return resource['href'].split('/')[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resourceDocumentId(self, resource: Resource) -> str:", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def get_object_id(resource):\n if hasattr(resource, \"object_id\"):\n return int(resource.object_id)\n\n return int(resource.id)", "def resourceid(self):", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")", "def id(self):\n return self.raw_resource[\"id\"]", "def get_resource_id(self, obj):\n return obj.id", "def get_resource_id(resource_instance, resource):\n if resource and \"id\" in resource:\n return resource[\"id\"] and encoding.force_str(resource[\"id\"]) or None\n if resource_instance:\n return (\n hasattr(resource_instance, \"pk\")\n and encoding.force_str(resource_instance.pk)\n or None\n )\n return None", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self, value, match_option=None):\n return self.attributes(\"resource-id\", value, match_option)" ]
[ "0.77710336", "0.7645945", "0.7645945", "0.7645945", "0.76283467", "0.76112777", "0.75265026", "0.75265026", "0.75265026", "0.75265026", "0.75265026", "0.75265026", "0.75265026", "0.75265026", "0.75265026", "0.75265026", "0.7460142", "0.7449439", "0.7448532", "0.74260885", "0.7413956", "0.7413956", "0.7413956", "0.7413956", "0.7413956", "0.7413956", "0.7413956", "0.7413956", "0.7413956", "0.72653496" ]
0.7842492
0
Export all tenant data for this Stormpath account.
def export_tenants(self): print('\n=== Exporting all tenant data...') tenant = dict(self.client.tenant) print('- Exporting tenant:', tenant['name']) json = { 'id': self.get_id(tenant), 'href': tenant['href'], 'name': tenant['name'], 'key': tenant['key'], 'createdAt': tenant['created_at'].isoformat(), 'modifiedAt': tenant['modified_at'].isoformat(), 'customData': self.get_custom_data(tenant), } #for application in tenant.applications: self.write('%s/%s/meta' % (self.location, json['id']), json) print('=== Done!\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_accounts(self):\n print('=== Exporting all account data...')\n\n for account in self.client.tenant.accounts:\n print('- Exporting account:', account.email)\n\n json = {\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n 'customData': self.get_custom_data(account),\n 'groups': [],\n 'apiKeys': [],\n 'directory': {\n 'id': self.get_id(account.directory),\n 'href': account.directory.href,\n 'name': account.directory.name,\n 'description': account.directory.description,\n 'status': account.directory.status,\n 'createdAt': account.directory.created_at.isoformat(),\n 'modifiedAt': account.directory.modified_at.isoformat(),\n },\n }\n\n for api_key in account.api_keys:\n json['apiKeys'].append({\n 'href': api_key.href,\n 'id': api_key.id,\n 'secret': api_key.secret,\n #'createdAt': api_key.created_at.isoformat(),\n #'modifiedAt': api_key.modified_at.isoformat(),\n })\n\n for group in account.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/accounts/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants", "def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]", "def export_data(self):\n return self.export_all_data()", "def tenants(self):\n # print \"tenant list is %s\" % self.auth.tenants.list()\n if not self._tenancy:\n self._tenancy = {}\n for tenant in self.auth.tenants.list():\n t = Tenant(tenant, self)\n self._tenancy[t[\"name\"]] = t\n return self._tenancy", "def export_groups(self):\n print('=== Exporting all group data...')\n\n for group in self.client.tenant.groups:\n print('- Exporting group:', group.name)\n\n json = {\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n 'customData': self.get_custom_data(group),\n 'directory': {\n 'id': self.get_id(group.directory),\n 'href': group.directory.href,\n 'name': group.directory.name,\n 'description': group.directory.description,\n 'status': group.directory.status,\n 'createdAt': group.directory.created_at.isoformat(),\n 'modifiedAt': group.directory.modified_at.isoformat(),\n },\n 'accounts': [],\n }\n\n for account in group.accounts:\n json['accounts'].append({\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/groups/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def getCustomerAccountData(self):\n self.logger.debug(\"\")\n #Process each entry returned by getCustomersInfo through getAccountsInfo.\n customersInfoResponse = self.getCustomersInfo()\n if customersInfoResponse is None:\n self.logger.debug(\"did not get data from self.getCustomersInfo()\")\n raise RuntimeError()\n first = True\n cInfos = self.parseCustomerInfo(customersInfoResponse)\n self.logger.debug(\"%d cInfos\", len(cInfos))\n data = {}\n for cInfo in cInfos:\n if first:\n first = False\n else: # Adds a newline separator for text output.\n self.output.write(self.format({}))\n data['CustomerId'] = cInfo['Id']\n accountsInfoResponse = self.getAccountsInfo(cInfo['Id'], \"true\")\n if accountsInfoResponse is not None:\n data['accounts'] = self.parseAccountInfo(accountsInfoResponse)\n else:\n data['accounts'] = []\n self.logger.debug(\"yield %r\", data)\n yield data", "def list_accounts(self):\n pass", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def get_tenants(self):", "def _generate_accounts(self):\n accounts = []\n auth_url = 'http://{}:5000/v3/'.format(self.host)\n\n for tenant, network in self.tenants:\n account = RwcalYang.CloudAccount.from_dict({\n 'name': 'rift.auto.openstack',\n 'account_type': 'openstack',\n 'openstack': {\n 'key': self.user or self._DEFAULT_USERNAME,\n 'secret': self._DEFAULT_PASSWORD,\n 'auth_url': auth_url,\n 'tenant': tenant,\n 'mgmt_network': network}})\n\n accounts.append(account)\n\n return accounts", "def get_accounts(self):\n return self.accounts.all()", "def export_organizations(self):\n print('\\n=== Exporting all organization data...')\n\n for organization in self.client.organizations:\n print('- Exporting organizations:', organization.name)\n\n json = {\n 'id': self.get_id(organization),\n 'href': organization.href,\n 'name': organization.name,\n 'nameKey': organization.name_key,\n 'description': organization.description,\n 'status': organization.status,\n 'createdAt': organization.created_at.isoformat(),\n 'modifiedAt': organization.modified_at.isoformat(),\n 'customData': self.get_custom_data(organization),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n }\n\n default_account_store_mapping = organization.default_account_store_mapping\n default_group_store_mapping = organization.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': organization.default_account_store_mapping.href.split('/')[-1],\n 'href': organization.default_account_store_mapping.href,\n 'type': organization.default_account_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_account_store_mapping.account_store.name,\n 'list_index': organization.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': organization.default_group_store_mapping.href.split('/')[-1],\n 'href': organization.default_group_store_mapping.href,\n 'type': organization.default_group_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_group_store_mapping.account_store.name,\n 'list_index': organization.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in organization.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(organization.tenant)\n self.write('%s/%s/organizations/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def run(self):\n self.export_users()", "def get_tenant_resources(self):\n resources = self.context[\"tenant\"].get(\"resources\", [])\n if not resources:\n msg = (\"No resources found for tenant: %s\"\n % self.context[\"tenant\"].get(\"name\"))\n raise exceptions.NotFoundException(message=msg)\n for res_id in resources:\n self._get_resource(res_id)", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result", "def get_accounts(self):\n return self.accounts", "def display_accounts(cls):\n return cls.account_list", "def export_directories(self):\n print('=== Exporting all directory data...')\n\n for directory in self.client.directories:\n print('- Exporting directory:', directory.name)\n\n json = {\n 'id': self.get_id(directory),\n 'href': directory.href,\n 'name': directory.name,\n 'description': directory.description,\n 'status': directory.status,\n 'createdAt': directory.created_at.isoformat(),\n 'modifiedAt': directory.modified_at.isoformat(),\n 'customData': self.get_custom_data(directory),\n 'groups': [],\n }\n\n for group in directory.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n json['provider'] = {\n 'href': directory.provider.href,\n 'providerId': directory.provider.provider_id,\n 'agent': None,\n }\n\n try:\n json['provider']['createdAt'] = directory.provider.created_at.isoformat()\n json['provider']['modifiedAt'] = directory.provider.modified_at.isoformat()\n except AttributeError:\n json['provider']['createdAt'] = None\n json['provider']['modifiedAt'] = None\n\n try:\n json['provider']['clientId'] = directory.provider.client_id\n except AttributeError:\n json['provider']['clientId'] = None\n\n try:\n json['provider']['clientSecret'] = directory.provider.client_secret\n except AttributeError:\n json['provider']['clientSecret'] = None\n\n try:\n json['provider']['redirectUri'] = directory.provider.redirect_uri\n except AttributeError:\n json['provider']['redirectUri'] = None\n\n try:\n json['provider']['agent'] = {\n 'id': self.get_id(directory.provider.agent),\n 'href': directory.provider.agent.href,\n 'status': directory.provider.agent.status,\n 'createdAt': directory.provider.agent.created_at.isoformat(),\n 'modifiedAt': directory.provider.agent.modified_at.isoformat(),\n 'config': {\n 'directoryHost': directory.provider.agent.directory_host,\n 'directoryPort': directory.provider.agent.directory_port,\n 'sslRequired': directory.provider.agent.ssl_required,\n 'agentUserDn': directory.provider.agent.agent_user_dn,\n 'agentUserDnPassword': directory.provider.agent.agent_user_dn_password,\n 'baseDn': directory.provider.agent.base_dn,\n 'pollInterval': directory.provider.agent.poll_interval,\n 'referralMode': directory.provider.agent.referral_mode,\n 'ignoreReferralIssues': directory.provider.agent.ignore_referral_issues,\n 'accountConfig': directory.provider.agent.account_config,\n 'groupConfig': directory.provider.agent.group_config,\n },\n 'download': {\n\n },\n }\n except AttributeError:\n pass\n\n if directory.password_policy:\n json['passwordPolicy'] = {\n 'id': self.get_id(directory.password_policy),\n 'href': directory.password_policy.href,\n #'createdAt': directory.password_policy.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.modified_at.isoformat(),\n 'resetEmailStatus': directory.password_policy.reset_email_status,\n 'resetEmailTemplates': [],\n 'resetSuccessEmailStatus': directory.password_policy.reset_success_email_status,\n 'resetSuccessEmailTemplates': [],\n 'resetTokenTtl': directory.password_policy.reset_token_ttl,\n 'strength': {\n 'href': directory.password_policy.strength.href,\n #'createdAt': directory.password_policy.strength.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.strength.modified_at.isoformat(),\n 'maxLength': directory.password_policy.strength.max_length,\n 'minDiacritic': directory.password_policy.strength.min_diacritic,\n 'minLength': directory.password_policy.strength.min_length,\n 'minLowerCase': directory.password_policy.strength.min_lower_case,\n 'minNumeric': directory.password_policy.strength.min_numeric,\n 'minSymbol': directory.password_policy.strength.min_symbol,\n 'minUpperCase': directory.password_policy.strength.min_upper_case,\n },\n }\n\n try:\n for template in directory.password_policy.reset_email_templates:\n json['passwordPolicy']['resetEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'defaultModel': template.default_model,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n try:\n for template in directory.password_policy.reset_success_email_templates:\n json['passwordPolicy']['resetSuccessEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n tenant = self.get_id(directory.tenant)\n self.write('%s/%s/directories/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def get_accounts(self):\r\n return self._accounts", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def _export_users(admin_access_token):\n admin = User.query.filter_by(id_=ADMIN_USER_ID).one_or_none()\n if admin_access_token != admin.access_token:\n raise ValueError(\"Admin access token invalid.\")\n csv_file_obj = io.StringIO()\n csv_writer = csv.writer(csv_file_obj, dialect=\"unix\")\n for user in User.query.all():\n csv_writer.writerow(\n [user.id_, user.email, user.access_token, user.username, user.full_name]\n )\n return csv_file_obj", "def get_all_user_data(request):\n \n try:\n customers = Customer.objects.all()\n serializer = CustomerSerializer(customers, many=True)\n \n return Response(serializer.data)\n except Exception as e:\n return Response({\"Error\":str(e)})", "def accounts():", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def accounts(self):\n return self._accounts.values()" ]
[ "0.7993855", "0.64110136", "0.6349004", "0.6227858", "0.61415046", "0.5991233", "0.5866704", "0.5837029", "0.5750611", "0.56921303", "0.56606555", "0.56530935", "0.56111664", "0.5586277", "0.55795664", "0.5577892", "0.55623376", "0.5542531", "0.55090034", "0.5446627", "0.54300714", "0.5426655", "0.5416619", "0.53967553", "0.53909487", "0.538191", "0.53773296", "0.53734744", "0.53704804", "0.5369392" ]
0.8416211
0
Export all application data for this Stormpath account.
def export_applications(self): print('\n=== Exporting all application data...') for application in self.client.applications: print('- Exporting application:', application.name) json = { 'id': self.get_id(application), 'href': application.href, 'name': application.name, 'description': application.description, 'status': application.status, 'createdAt': application.created_at.isoformat(), 'modifiedAt': application.modified_at.isoformat(), 'customData': self.get_custom_data(application), 'default_account_store_mapping': None, 'default_group_store_mapping': None, 'account_store_mappings': [], #'verificationEmails': [], } default_account_store_mapping = application.default_account_store_mapping default_group_store_mapping = application.default_group_store_mapping if default_account_store_mapping: json['default_account_store_mapping'] = { 'id': application.default_account_store_mapping.href.split('/')[-1], 'href': application.default_account_store_mapping.href, 'type': application.default_account_store_mapping.account_store.__class__.__name__, 'name': application.default_account_store_mapping.account_store.name, 'list_index': application.default_account_store_mapping.list_index, } if default_group_store_mapping: json['default_group_store_mapping'] = { 'id': application.default_group_store_mapping.href.split('/')[-1], 'href': application.default_group_store_mapping.href, 'type': application.default_group_store_mapping.account_store.__class__.__name__, 'name': application.default_group_store_mapping.account_store.name, 'list_index': application.default_group_store_mapping.list_index, } for account_store_mapping in application.account_store_mappings: json['account_store_mappings'].append({ 'id': self.get_id(account_store_mapping), 'href': account_store_mapping.href, 'account_store': { 'type': account_store_mapping.account_store.__class__.__name__, 'id': self.get_id(account_store_mapping.account_store), 'href': account_store_mapping.account_store.href, 'name': account_store_mapping.account_store.name, 'description': account_store_mapping.account_store.description, 'status': account_store_mapping.account_store.status, }, 'list_index': account_store_mapping.list_index, 'is_default_account_store': account_store_mapping.is_default_account_store, 'is_default_group_store': account_store_mapping.is_default_group_store, }) tenant = self.get_id(application.tenant) self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json) print('=== Done!\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_data(self):\n return self.export_all_data()", "def export_accounts(self):\n print('=== Exporting all account data...')\n\n for account in self.client.tenant.accounts:\n print('- Exporting account:', account.email)\n\n json = {\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n 'customData': self.get_custom_data(account),\n 'groups': [],\n 'apiKeys': [],\n 'directory': {\n 'id': self.get_id(account.directory),\n 'href': account.directory.href,\n 'name': account.directory.name,\n 'description': account.directory.description,\n 'status': account.directory.status,\n 'createdAt': account.directory.created_at.isoformat(),\n 'modifiedAt': account.directory.modified_at.isoformat(),\n },\n }\n\n for api_key in account.api_keys:\n json['apiKeys'].append({\n 'href': api_key.href,\n 'id': api_key.id,\n 'secret': api_key.secret,\n #'createdAt': api_key.created_at.isoformat(),\n #'modifiedAt': api_key.modified_at.isoformat(),\n })\n\n for group in account.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/accounts/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def export_tenants(self):\n print('\\n=== Exporting all tenant data...')\n\n tenant = dict(self.client.tenant)\n\n print('- Exporting tenant:', tenant['name'])\n\n json = {\n 'id': self.get_id(tenant),\n 'href': tenant['href'],\n 'name': tenant['name'],\n 'key': tenant['key'],\n 'createdAt': tenant['created_at'].isoformat(),\n 'modifiedAt': tenant['modified_at'].isoformat(),\n 'customData': self.get_custom_data(tenant),\n }\n\n #for application in tenant.applications:\n\n self.write('%s/%s/meta' % (self.location, json['id']), json)\n\n print('=== Done!\\n')", "def run(self):\n self.export_users()", "def getSessionsData(self):\n export_data = self.get_api_results(\n \"/api/session/export?api_key={0}&format=json\")\n export_data = self.purge_misc_sessions(export_data)\n return export_data", "def export_to(short_name):\r\n (app, owner, n_tasks, n_task_runs,\r\n overall_progress, last_activity) = app_by_shortname(short_name)\r\n title = app_title(app, gettext(\"Export\"))\r\n loading_text = gettext(\"Exporting data..., this may take a while\")\r\n\r\n try:\r\n require.app.read(app)\r\n except HTTPException:\r\n if app.hidden:\r\n raise abort(403)\r\n else: # pragma: no cover\r\n raise\r\n\r\n def respond():\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n app=app,\r\n owner=owner)\r\n\r\n def gen_json(table):\r\n n = db.session.query(table)\\\r\n .filter_by(app_id=app.id).count()\r\n sep = \", \"\r\n yield \"[\"\r\n for i, tr in enumerate(db.session.query(table)\r\n .filter_by(app_id=app.id).yield_per(1), 1):\r\n item = json.dumps(tr.dictize())\r\n if (i == n):\r\n sep = \"\"\r\n yield item + sep\r\n yield \"]\"\r\n\r\n def format_csv_properly(row):\r\n keys = sorted(row.keys())\r\n values = []\r\n for k in keys:\r\n values.append(row[k])\r\n return values\r\n\r\n\r\n def handle_task(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def handle_task_run(writer, t):\r\n if (type(t.info) == dict):\r\n values = format_csv_properly(t.info)\r\n writer.writerow(values)\r\n else: # pragma: no cover\r\n writer.writerow([t.info])\r\n\r\n def get_csv(out, writer, table, handle_row):\r\n for tr in db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .yield_per(1):\r\n handle_row(writer, tr)\r\n yield out.getvalue()\r\n\r\n def respond_json(ty):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n try:\r\n table = tables[ty]\r\n except KeyError:\r\n return abort(404)\r\n return Response(gen_json(table), mimetype='application/json')\r\n\r\n def create_ckan_datastore(ckan, table, package_id):\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n new_resource = ckan.resource_create(name=table,\r\n package_id=package_id)\r\n ckan.datastore_create(name=table,\r\n resource_id=new_resource['result']['id'])\r\n ckan.datastore_upsert(name=table,\r\n records=gen_json(tables[table]),\r\n resource_id=new_resource['result']['id'])\r\n\r\n def respond_ckan(ty):\r\n # First check if there is a package (dataset) in CKAN\r\n tables = {\"task\": model.task.Task, \"task_run\": model.task_run.TaskRun}\r\n msg_1 = gettext(\"Data exported to \")\r\n msg = msg_1 + \"%s ...\" % current_app.config['CKAN_URL']\r\n ckan = Ckan(url=current_app.config['CKAN_URL'],\r\n api_key=current_user.ckan_api)\r\n app_url = url_for('.details', short_name=app.short_name, _external=True)\r\n\r\n try:\r\n package, e = ckan.package_exists(name=app.short_name)\r\n if e:\r\n raise e\r\n if package:\r\n # Update the package\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_update(app=app, user=owner, url=app_url,\r\n resources=package['resources'])\r\n\r\n ckan.package = package\r\n resource_found = False\r\n for r in package['resources']:\r\n if r['name'] == ty:\r\n ckan.datastore_delete(name=ty, resource_id=r['id'])\r\n ckan.datastore_create(name=ty, resource_id=r['id'])\r\n ckan.datastore_upsert(name=ty,\r\n records=gen_json(tables[ty]),\r\n resource_id=r['id'])\r\n resource_found = True\r\n break\r\n if not resource_found:\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n else:\r\n owner = User.query.get(app.owner_id)\r\n package = ckan.package_create(app=app, user=owner, url=app_url)\r\n create_ckan_datastore(ckan, ty, package['id'])\r\n #new_resource = ckan.resource_create(name=ty,\r\n # package_id=package['id'])\r\n #ckan.datastore_create(name=ty,\r\n # resource_id=new_resource['result']['id'])\r\n #ckan.datastore_upsert(name=ty,\r\n # records=gen_json(tables[ty]),\r\n # resource_id=new_resource['result']['id'])\r\n flash(msg, 'success')\r\n return respond()\r\n except requests.exceptions.ConnectionError:\r\n msg = \"CKAN server seems to be down, try again layer or contact the CKAN admins\"\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n except Exception as inst:\r\n if len(inst.args) == 3:\r\n t, msg, status_code = inst.args\r\n msg = (\"Error: %s with status code: %s\" % (t, status_code))\r\n else: # pragma: no cover\r\n msg = (\"Error: %s\" % inst.args[0])\r\n current_app.logger.error(msg)\r\n flash(msg, 'danger')\r\n finally:\r\n return respond()\r\n\r\n def respond_csv(ty):\r\n # Export Task(/Runs) to CSV\r\n types = {\r\n \"task\": (\r\n model.task.Task, handle_task,\r\n (lambda x: True),\r\n gettext(\r\n \"Oops, the application does not have tasks to \\\r\n export, if you are the owner add some tasks\")),\r\n \"task_run\": (\r\n model.task_run.TaskRun, handle_task_run,\r\n (lambda x: type(x.info) == dict),\r\n gettext(\r\n \"Oops, there are no Task Runs yet to export, invite \\\r\n some users to participate\"))}\r\n try:\r\n table, handle_row, test, msg = types[ty]\r\n except KeyError:\r\n return abort(404)\r\n\r\n out = StringIO()\r\n writer = UnicodeWriter(out)\r\n t = db.session.query(table)\\\r\n .filter_by(app_id=app.id)\\\r\n .first()\r\n if t is not None:\r\n if test(t):\r\n writer.writerow(sorted(t.info.keys()))\r\n\r\n return Response(get_csv(out, writer, table, handle_row),\r\n mimetype='text/csv')\r\n else:\r\n flash(msg, 'info')\r\n return respond()\r\n\r\n export_formats = [\"json\", \"csv\"]\r\n if current_user.is_authenticated():\r\n if current_user.ckan_api:\r\n export_formats.append('ckan')\r\n\r\n ty = request.args.get('type')\r\n fmt = request.args.get('format')\r\n if not (fmt and ty):\r\n if len(request.args) >= 1:\r\n abort(404)\r\n return render_template('/applications/export.html',\r\n title=title,\r\n loading_text=loading_text,\r\n ckan_name=current_app.config.get('CKAN_NAME'),\r\n app=app,\r\n owner=owner)\r\n if fmt not in export_formats:\r\n abort(415)\r\n return {\"json\": respond_json, \"csv\": respond_csv, 'ckan': respond_ckan}[fmt](ty)", "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def save(self):\r\n self.df_app_data = self.df_app_data.to_csv(\"app_data.csv\", index=False)", "def export(self):\n f = open(self.database, 'w')\n for line in self.conn.iterdump():\n f.write(line)\n self.c.close()", "def appdata(appname):\n z = Zap(appname)\n z.appdata(stdout=True)", "def get_all_applicant(self) -> List[NoSQLUserApplication]:\n return self.user_application_manager.all()", "def export(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"channels\": [channel for channel in self.channels],\n \"packages\": self.packages.export(),\n \"logs\": [log for log in self.logs],\n \"actions\": [action for action in self.actions],\n \"debug\": [debug for debug in self.debug],\n }", "def export_everything(self):\n orderby = self.orderby.get()\n currentregion = self.region.get()\n previoustext = self.tabs.window.statuslabel['text']\n res = tkinter.messagebox.askyesno(\n 'Export Everything',\n 'Exporting data on all AIS stations, this may take some time.')\n if res:\n outpath = tkinter.filedialog.askdirectory()\n if outpath:\n self.tabs.window.statuslabel.config(\n text='Exporting all AIS station data to - {}'.format(\n outpath),\n fg='black', bg='gold')\n self.update_idletasks()\n export.export_overview(\n self.tabs.window.aistracker,\n self.tabs.window.nmeatracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n export.export_everything(\n self.tabs.window.aistracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n self.tabs.window.statuslabel.config(\n text=previoustext, bg='light grey')\n else:\n raise ExportAborted(\n 'Export of all AIS data cancelled by user.')\n else:\n raise ExportAborted('Export of all AIS data cancelled by user.')", "def dump_all(self):\n _monitored_data_path = if_exists(\"additional_data\")\n _monitored_current_data = os.path.join(_monitored_data_path, f\"{self.user}_monitoring.pickle\")\n data = {'monitored_users': self.monitored_users, 'last_ts': datetime.now().timestamp()}\n with open(_monitored_current_data, 'wb') as f:\n pickle.dump(data, f)", "def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )", "def get_all_apps(self):\n return list(self.apps.values())", "def get_all_applications():\n cursor.execute(\n f'SELECT * FROM public.applications where status = %s', (\"pending\",))\n rows = cursor.fetchall()\n application_dicts = []\n\n for item in rows:\n application = Application(id=item[0], party_name=item[1], office_name=item[2], user_id=item[3],\n date_created=item[4],status=item[5])\n application = application.json_dumps()\n application_dicts.append(application)\n return application_dicts", "def read_all():\n\n # Create the list of environments from our data\n environment = Environment.query.order_by(Environment.key).all()\n app.logger.debug(pformat(environment))\n # Serialize the data for the response\n environment_schema = EnvironmentSchema(many=True)\n data = environment_schema.dump(environment)\n return data", "def export_all(self, setup_id):\n sessions = Session.objects.filter(setup_id_id=setup_id, status='accepted').order_by('id')\n json_string = Facade.export_all_sessions(sessions)\n return json_string", "def generate_viewset_for_all_apps(self):\n for app in self.app_models_dict:\n self.file_write_flag = True\n self.write_init_file = True\n for model_name in self.app_models_dict[app]:\n for serializer_name in self.app_model_serializer_dict[model_name]:\n stream = self.generate_view_stream(app, model_name, serializer_name)\n self.write_to_file(stream, app, serializer_name, model_name)\n return self.app_viewset_dict", "def save_data(app):\n\n prepared_data = (\n app.block_chain.to_list(),\n app.open_txs.to_list(),\n app.network.to_list(),\n app.wallet.to_dict()\n )\n\n try:\n with open(\n file=r'./app/data/app-{}.dat'.format(app.port),\n mode='w',\n encoding='utf-8'\n ) as f:\n for data in prepared_data:\n f.write(json.dumps(data))\n f.write('\\n')\n \n ConsoleLogger.write_log(\n 'info',\n __name__,\n 'save_data',\n 'Data saving is done successfully.'\n )\n\n return True\n except IOError:\n ConsoleLogger.write_log(\n 'error',\n __name__,\n 'save_data',\n 'Data saving is failed.'\n )\n\n return False", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def sync_apps(self):\n pass", "def sync_dashboards(app=None):\n\tif not cint(frappe.db.get_single_value('System Settings', 'setup_complete')):\n\t\treturn\n\tif app:\n\t\tapps = [app]\n\telse:\n\t\tapps = frappe.get_installed_apps()\n\n\tfor app_name in apps:\n\t\tprint(\"Updating Dashboard for {app}\".format(app=app_name))\n\t\tfor module_name in frappe.local.app_modules.get(app_name) or []:\n\t\t\tfrappe.flags.in_import = True\n\t\t\tmake_records_in_module(app_name, module_name)\n\t\t\tfrappe.flags.in_import = False", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def dump_sessions(self, data):\n try:\n with open(\"sessions.json\", \"w\") as file:\n json.dump(data, file)\n except:\n print(\"Can not save active sessions list to disk. Check permissions.\")", "def save_appdata(appdata):\n try:\n shutil.copyfile(str(FNAME), str(FNAME) + '.bak')\n except FileNotFoundError:\n pass\n with FNAME.open('w') as _out:\n json.dump(appdata, _out)", "def export(self, location=None):\n self.location = self.set_location(location)\n\n # Export all Stormpath data.\n for export_type in self.EXPORTS:\n getattr(self, 'export_' + export_type)()", "def _get_app_data(self):\n lti = LTI(self.request, self.kwargs[\"uuid\"])\n lti.verify()\n\n app_data = None\n if lti.is_student:\n cache_key = \"app_data|{model:s}|{domain:s}|{context:s}|{resource!s}\".format(\n model=self.model.__name__,\n domain=lti.get_consumer_site().domain,\n context=lti.context_id,\n resource=lti.resource_id,\n )\n\n app_data = cache.get(cache_key)\n permissions = {\"can_access_dashboard\": False, \"can_update\": False}\n\n if not app_data:\n resource = get_or_create_resource(self.model, lti)\n permissions = {\n \"can_access_dashboard\": lti.is_instructor or lti.is_admin,\n \"can_update\": (lti.is_instructor or lti.is_admin)\n and resource.playlist.lti_id == lti.context_id,\n }\n app_data = {\n \"modelName\": self.model.RESOURCE_NAME,\n \"resource\": self.serializer_class(resource).data if resource else None,\n \"state\": \"success\",\n \"sentry_dsn\": settings.SENTRY_DSN,\n \"environment\": settings.ENVIRONMENT,\n \"release\": settings.RELEASE,\n \"static\": {\"svg\": {\"plyr\": static(\"svg/plyr.svg\")}},\n }\n if lti.is_student:\n cache.set(cache_key, app_data, settings.APP_DATA_CACHE_DURATION)\n\n if app_data[\"resource\"] is not None:\n try:\n locale = react_locale(lti.launch_presentation_locale)\n except ImproperlyConfigured:\n locale = \"en_US\"\n\n # Create a short-lived JWT token for the video\n jwt_token = AccessToken()\n jwt_token.payload.update(\n {\n \"session_id\": str(uuid.uuid4()),\n \"context_id\": lti.context_id,\n \"resource_id\": str(lti.resource_id),\n \"roles\": lti.roles,\n \"course\": lti.get_course_info(),\n \"locale\": locale,\n \"permissions\": permissions,\n \"maintenance\": settings.MAINTENANCE_MODE,\n }\n )\n try:\n jwt_token.payload[\"user_id\"] = lti.user_id\n except AttributeError:\n pass\n\n app_data[\"jwt\"] = str(jwt_token)\n\n return app_data", "def apps_information(self):\n with open(self.app_data_path, 'r') as app_csv_file:\n csv_reader = csv.reader(app_csv_file)\n apps = [self.AppInformation(app[0], app[1], app[2], app[3], app[4], app[5]) for app in csv_reader]\n return apps" ]
[ "0.66612107", "0.6570566", "0.62946093", "0.59756947", "0.59498", "0.59372663", "0.5811485", "0.5728861", "0.55750054", "0.55355513", "0.5454583", "0.5442122", "0.5434154", "0.54202384", "0.54049903", "0.5402838", "0.5373483", "0.53653264", "0.5299492", "0.52917093", "0.5284996", "0.52619165", "0.52590096", "0.52565086", "0.52306145", "0.5202779", "0.5194051", "0.5171682", "0.516432", "0.5159357" ]
0.7371662
0
Export all directory data for this Stormpath account.
def export_directories(self): print('=== Exporting all directory data...') for directory in self.client.directories: print('- Exporting directory:', directory.name) json = { 'id': self.get_id(directory), 'href': directory.href, 'name': directory.name, 'description': directory.description, 'status': directory.status, 'createdAt': directory.created_at.isoformat(), 'modifiedAt': directory.modified_at.isoformat(), 'customData': self.get_custom_data(directory), 'groups': [], } for group in directory.groups: json['groups'].append({ 'id': self.get_id(group), 'href': group.href, 'name': group.name, 'description': group.description, 'status': group.status, 'createdAt': group.created_at.isoformat(), 'modifiedAt': group.modified_at.isoformat(), }) json['provider'] = { 'href': directory.provider.href, 'providerId': directory.provider.provider_id, 'agent': None, } try: json['provider']['createdAt'] = directory.provider.created_at.isoformat() json['provider']['modifiedAt'] = directory.provider.modified_at.isoformat() except AttributeError: json['provider']['createdAt'] = None json['provider']['modifiedAt'] = None try: json['provider']['clientId'] = directory.provider.client_id except AttributeError: json['provider']['clientId'] = None try: json['provider']['clientSecret'] = directory.provider.client_secret except AttributeError: json['provider']['clientSecret'] = None try: json['provider']['redirectUri'] = directory.provider.redirect_uri except AttributeError: json['provider']['redirectUri'] = None try: json['provider']['agent'] = { 'id': self.get_id(directory.provider.agent), 'href': directory.provider.agent.href, 'status': directory.provider.agent.status, 'createdAt': directory.provider.agent.created_at.isoformat(), 'modifiedAt': directory.provider.agent.modified_at.isoformat(), 'config': { 'directoryHost': directory.provider.agent.directory_host, 'directoryPort': directory.provider.agent.directory_port, 'sslRequired': directory.provider.agent.ssl_required, 'agentUserDn': directory.provider.agent.agent_user_dn, 'agentUserDnPassword': directory.provider.agent.agent_user_dn_password, 'baseDn': directory.provider.agent.base_dn, 'pollInterval': directory.provider.agent.poll_interval, 'referralMode': directory.provider.agent.referral_mode, 'ignoreReferralIssues': directory.provider.agent.ignore_referral_issues, 'accountConfig': directory.provider.agent.account_config, 'groupConfig': directory.provider.agent.group_config, }, 'download': { }, } except AttributeError: pass if directory.password_policy: json['passwordPolicy'] = { 'id': self.get_id(directory.password_policy), 'href': directory.password_policy.href, #'createdAt': directory.password_policy.created_at.isoformat(), #'modifiedAt': directory.password_policy.modified_at.isoformat(), 'resetEmailStatus': directory.password_policy.reset_email_status, 'resetEmailTemplates': [], 'resetSuccessEmailStatus': directory.password_policy.reset_success_email_status, 'resetSuccessEmailTemplates': [], 'resetTokenTtl': directory.password_policy.reset_token_ttl, 'strength': { 'href': directory.password_policy.strength.href, #'createdAt': directory.password_policy.strength.created_at.isoformat(), #'modifiedAt': directory.password_policy.strength.modified_at.isoformat(), 'maxLength': directory.password_policy.strength.max_length, 'minDiacritic': directory.password_policy.strength.min_diacritic, 'minLength': directory.password_policy.strength.min_length, 'minLowerCase': directory.password_policy.strength.min_lower_case, 'minNumeric': directory.password_policy.strength.min_numeric, 'minSymbol': directory.password_policy.strength.min_symbol, 'minUpperCase': directory.password_policy.strength.min_upper_case, }, } try: for template in directory.password_policy.reset_email_templates: json['passwordPolicy']['resetEmailTemplates'].append({ 'id': self.get_id(template), 'href': template.href, 'createdAt': template.created_at.isoformat(), 'modifiedAt': template.modified_at.isoformat(), 'fromName': template.from_name, 'name': template.name, 'description': template.description, 'fromEmailAddress': template.from_email_address, 'textBody': template.text_body, 'htmlBody': template.html_body, 'defaultModel': template.default_model, 'mimeType': template.mime_type, 'subject': template.subject, }) except AttributeError: pass try: for template in directory.password_policy.reset_success_email_templates: json['passwordPolicy']['resetSuccessEmailTemplates'].append({ 'id': self.get_id(template), 'href': template.href, 'createdAt': template.created_at.isoformat(), 'modifiedAt': template.modified_at.isoformat(), 'fromName': template.from_name, 'name': template.name, 'description': template.description, 'fromEmailAddress': template.from_email_address, 'textBody': template.text_body, 'htmlBody': template.html_body, 'mimeType': template.mime_type, 'subject': template.subject, }) except AttributeError: pass tenant = self.get_id(directory.tenant) self.write('%s/%s/directories/%s' % (self.location, tenant, json['id']), json) print('=== Done!\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_accounts(self):\n print('=== Exporting all account data...')\n\n for account in self.client.tenant.accounts:\n print('- Exporting account:', account.email)\n\n json = {\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n 'customData': self.get_custom_data(account),\n 'groups': [],\n 'apiKeys': [],\n 'directory': {\n 'id': self.get_id(account.directory),\n 'href': account.directory.href,\n 'name': account.directory.name,\n 'description': account.directory.description,\n 'status': account.directory.status,\n 'createdAt': account.directory.created_at.isoformat(),\n 'modifiedAt': account.directory.modified_at.isoformat(),\n },\n }\n\n for api_key in account.api_keys:\n json['apiKeys'].append({\n 'href': api_key.href,\n 'id': api_key.id,\n 'secret': api_key.secret,\n #'createdAt': api_key.created_at.isoformat(),\n #'modifiedAt': api_key.modified_at.isoformat(),\n })\n\n for group in account.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/accounts/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def export_data(self):\n return self.export_all_data()", "def export_dir(self):\n return self._directory(0) # DIRECTORY_ENTRY_EXPORT", "def save_all(cls, dirpath=\".\"):\n for n, v in cls.__data.items():\n pickle.dump(v, open(cls.dirpath + n + '.p', 'wb'))\n print \"Data saved to: %s\" % dirpath", "def exports(self):\n\n try:\n data_dir = self.export_dir()\n except ValueError, why:\n raise StopIteration(why)\n\n expdir = obj.Object('_IMAGE_EXPORT_DIRECTORY',\n offset = self.DllBase + data_dir.VirtualAddress,\n vm = self.obj_native_vm,\n parent = self)\n\n if expdir.valid(self._nt_header()):\n # Ordinal, Function RVA, and Name Object \n for o, f, n in expdir._exported_functions():\n yield o, f, n", "def run(self):\n self.export_users()", "def dump_data(self):\n while not path.isdir(self.directory):\n print(\n \"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]\".format(\n self.directory))\n select = input()\n if select == \"2\":\n self.directory = input(\"Enter new directory: \\n\")\n else:\n mkdir(self.directory)\n print(\"# Directory \" + self.directory + \" created\")\n\n self.fullpath = self.directory + \"/\" + self.fName\n\n self.data_instance.dump_data(self.fullpath)", "def export_groups(self):\n print('=== Exporting all group data...')\n\n for group in self.client.tenant.groups:\n print('- Exporting group:', group.name)\n\n json = {\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n 'customData': self.get_custom_data(group),\n 'directory': {\n 'id': self.get_id(group.directory),\n 'href': group.directory.href,\n 'name': group.directory.name,\n 'description': group.directory.description,\n 'status': group.directory.status,\n 'createdAt': group.directory.created_at.isoformat(),\n 'modifiedAt': group.directory.modified_at.isoformat(),\n },\n 'accounts': [],\n }\n\n for account in group.accounts:\n json['accounts'].append({\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/groups/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def export(self, context, recursive=False):\n\n data = self.fieldData(context)\n data.update(self.attributeData(context))\n\n if recursive and IFolderish.providedBy(context):\n children = []\n for obj in context.listFolderContents():\n children.append(self.export(obj, True))\n data['children'] = children\n\n return [data]", "def list_all():\n if os.path.exists(DATA_DIR):\n return os.listdir(DATA_DIR)\n return []", "def BT_export(self):\n src = os.path.join(self.resMan.base_path, Config.instance().weld_BT_root_folder)\n srcs=self.BTMan.get_subdirs(src)\n dst = os.path.join(self.project.rootdir, Config.instance().weld_BT_root_folder)\n #this operation has lots of exceptions to output...\n try:\n for src in srcs:\n self.BTMan.export(src, dst)\n except Exception, e:\n print >> sys.__stderr, 'ERROR in Weld.BT_export():'\n print >> sys.__stderr, e.args[0]\n print >> sys.__stderr, 'export cancelled (some cleanup might be needed in %s)' % dst", "def export_tenants(self):\n print('\\n=== Exporting all tenant data...')\n\n tenant = dict(self.client.tenant)\n\n print('- Exporting tenant:', tenant['name'])\n\n json = {\n 'id': self.get_id(tenant),\n 'href': tenant['href'],\n 'name': tenant['name'],\n 'key': tenant['key'],\n 'createdAt': tenant['created_at'].isoformat(),\n 'modifiedAt': tenant['modified_at'].isoformat(),\n 'customData': self.get_custom_data(tenant),\n }\n\n #for application in tenant.applications:\n\n self.write('%s/%s/meta' % (self.location, json['id']), json)\n\n print('=== Done!\\n')", "def list(self):\n response = self._get()\n\n directories = []\n for dir_json in response[\"directories\"]:\n directories.append(Directory._create_from_json(dir_json, self._session, self._url_base))\n\n return directories", "def data_directory(self):\n\n return self.get_raw(\"data_directory\")", "def data_dir(self) -> Path:\n return self._data_dir", "def save(self, directory):\n pass # pragma: no cover", "def data_directories(self):\n\n return [simulation.outdir for simulation in self.simulations]", "def list(self):\n\t\tendpoint = \"/api/walrus/project/\" + self.client.project_string_id + \\\n\t\t\t \"/export/working_dir/list\"\n\n\t\tresponse = self.client.session.get(self.client.host + endpoint)\n\n\t\tself.client.handle_errors(response)\n\n\t\texport_list_json = response.json().get('export_list')\n\t\texport_list = []\n\n\t\tif export_list_json:\n\t\t\tfor export_json in export_list_json:\n\t\t\t\texport_list.append(self.new(export_json))\n\n\t\treturn export_list", "def backup_data():\n\ttry:\n\t\tos.chdir(backup_directory)\n\texcept:\n\t\tprint(\"Backup folder does not exist!\")\n\tfor directory in directories:\n\t\tshutil.rmtree('./'+directory)\n\tos.chdir('..')\n\tfor directory in directories:\n\t\tprint(\"Backing up data for label '{}'...\".format(directory))\n\t\tshutil.copytree('./'+directory, backup_directory+'/'+directory)\n\tprint(\"Backup complete!\")", "def getDataFiles(directoryName):\r\n \r\n return listdir(directoryName)", "def report(self):\n from . import databases\n\n _current = self.current\n data = []\n\n def get_dir_size(dirpath):\n \"\"\"Modified from http://stackoverflow.com/questions/12480367/how-to-generate-directory-size-recursively-in-python-like-du-does.\n\n Does not follow symbolic links\"\"\"\n return sum(\n sum(os.path.getsize(root / name) for name in files)\n for root, dirs, files in os.walk(dirpath)\n )\n\n names = sorted([x.name for x in self])\n for obj in names:\n self.set_current(obj, update=False, writable=False)\n data.append((obj, len(databases), get_dir_size(projects.dir) / 1e9))\n self.set_current(_current)\n return data", "def list_dirs(self):\n return self.list_groups()", "def listdirs(self):\n return self.list_groups()", "def data_dir(self):\n return self._data_dir", "def data_dir(self):\n return self._data_dir", "def __dir__(self):\n result = super(Result, self).__dir__()\n if self._data_ is not None:\n result.extend(self._data.keys())\n return result", "def export(self, path):\n if path[-1] != '/':\n path += '/'\n\n rtn = {}\n\n # DP NOTE: not using self.read becuase of the different token needed\n client = self.connect(VAULT_TOKEN)\n results = client.read(path[:-1])\n if results is not None:\n rtn[path[:-1]] = results['data']\n\n results = client.list(path)\n for key in results['data']['keys']:\n key = path + key\n if key[-1] == '/':\n data = self.export(key)\n rtn.update(data)\n else:\n # DP NOTE: This will currently do a duplicate read if\n # data is stored at key and in paths under\n # key\n # To prevent this, check to see if (key + '/') is in keys\n data = self.read(key)\n rtn[key] = data['data']\n\n return rtn", "def user_data_dir():\n # TODO: hardcoded\n app_name = 'etheroll'\n data_dir = os.path.join('/sdcard', app_name)\n data_dir = os.path.expanduser(data_dir)\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n return data_dir", "def data_dir(self, dd=None):\n self._data_dir = dd", "def data_dir(self):\r\n return self._data_dir" ]
[ "0.6736293", "0.63576007", "0.62175053", "0.60875624", "0.58454275", "0.5837468", "0.57908094", "0.570937", "0.5686852", "0.56343424", "0.55987227", "0.55606085", "0.55584383", "0.5548896", "0.5548611", "0.55001694", "0.5499001", "0.547543", "0.54266113", "0.5383986", "0.5332167", "0.5324081", "0.5303782", "0.52952594", "0.52952594", "0.5293603", "0.5292271", "0.5274497", "0.5268278", "0.52652127" ]
0.73455876
0
Export all organization data for this Stormpath account.
def export_organizations(self): print('\n=== Exporting all organization data...') for organization in self.client.organizations: print('- Exporting organizations:', organization.name) json = { 'id': self.get_id(organization), 'href': organization.href, 'name': organization.name, 'nameKey': organization.name_key, 'description': organization.description, 'status': organization.status, 'createdAt': organization.created_at.isoformat(), 'modifiedAt': organization.modified_at.isoformat(), 'customData': self.get_custom_data(organization), 'default_account_store_mapping': None, 'default_group_store_mapping': None, 'account_store_mappings': [], } default_account_store_mapping = organization.default_account_store_mapping default_group_store_mapping = organization.default_group_store_mapping if default_account_store_mapping: json['default_account_store_mapping'] = { 'id': organization.default_account_store_mapping.href.split('/')[-1], 'href': organization.default_account_store_mapping.href, 'type': organization.default_account_store_mapping.account_store.__class__.__name__, 'name': organization.default_account_store_mapping.account_store.name, 'list_index': organization.default_account_store_mapping.list_index, } if default_group_store_mapping: json['default_group_store_mapping'] = { 'id': organization.default_group_store_mapping.href.split('/')[-1], 'href': organization.default_group_store_mapping.href, 'type': organization.default_group_store_mapping.account_store.__class__.__name__, 'name': organization.default_group_store_mapping.account_store.name, 'list_index': organization.default_group_store_mapping.list_index, } for account_store_mapping in organization.account_store_mappings: json['account_store_mappings'].append({ 'id': self.get_id(account_store_mapping), 'href': account_store_mapping.href, 'account_store': { 'type': account_store_mapping.account_store.__class__.__name__, 'id': self.get_id(account_store_mapping.account_store), 'href': account_store_mapping.account_store.href, 'name': account_store_mapping.account_store.name, 'description': account_store_mapping.account_store.description, 'status': account_store_mapping.account_store.status, }, 'list_index': account_store_mapping.list_index, 'is_default_account_store': account_store_mapping.is_default_account_store, 'is_default_group_store': account_store_mapping.is_default_group_store, }) tenant = self.get_id(organization.tenant) self.write('%s/%s/organizations/%s' % (self.location, tenant, json['id']), json) print('=== Done!\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_accounts(self):\n print('=== Exporting all account data...')\n\n for account in self.client.tenant.accounts:\n print('- Exporting account:', account.email)\n\n json = {\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n 'customData': self.get_custom_data(account),\n 'groups': [],\n 'apiKeys': [],\n 'directory': {\n 'id': self.get_id(account.directory),\n 'href': account.directory.href,\n 'name': account.directory.name,\n 'description': account.directory.description,\n 'status': account.directory.status,\n 'createdAt': account.directory.created_at.isoformat(),\n 'modifiedAt': account.directory.modified_at.isoformat(),\n },\n }\n\n for api_key in account.api_keys:\n json['apiKeys'].append({\n 'href': api_key.href,\n 'id': api_key.id,\n 'secret': api_key.secret,\n #'createdAt': api_key.created_at.isoformat(),\n #'modifiedAt': api_key.modified_at.isoformat(),\n })\n\n for group in account.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/accounts/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def get_organization_details(self):\n\n # Returns 1) OU Name to OU ID mapping (dict)\n # key: OU Name (in the manifest); value: OU ID (at root level)\n # 2) all OU IDs under root (dict)\n org = Organizations(self.logger)\n all_ou_ids, ou_name_to_id_map = self._get_ou_ids(org)\n\n # Returns 1) active accounts (list) under an OU.\n # use case: used to validate accounts in the manifest file\n # 2) Accounts for each OU at the root level.\n # use case: map OU Name to account IDs\n # key: OU ID (str); value: Active accounts (list)\n accounts_in_all_ous, ou_id_to_account_map = \\\n self._get_accounts_in_ou(org, all_ou_ids)\n\n # Returns account name in manifest to account id mapping.\n # key: account name; value: account id\n name_to_account_map = self.get_account_for_name(org)\n\n return accounts_in_all_ous, ou_id_to_account_map, \\\n ou_name_to_id_map, name_to_account_map", "def export_data(self):\n return self.export_all_data()", "def organizations(self):\n self.elements('organizations')", "def export_tenants(self):\n print('\\n=== Exporting all tenant data...')\n\n tenant = dict(self.client.tenant)\n\n print('- Exporting tenant:', tenant['name'])\n\n json = {\n 'id': self.get_id(tenant),\n 'href': tenant['href'],\n 'name': tenant['name'],\n 'key': tenant['key'],\n 'createdAt': tenant['created_at'].isoformat(),\n 'modifiedAt': tenant['modified_at'].isoformat(),\n 'customData': self.get_custom_data(tenant),\n }\n\n #for application in tenant.applications:\n\n self.write('%s/%s/meta' % (self.location, json['id']), json)\n\n print('=== Done!\\n')", "def export_groups(self):\n print('=== Exporting all group data...')\n\n for group in self.client.tenant.groups:\n print('- Exporting group:', group.name)\n\n json = {\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n 'customData': self.get_custom_data(group),\n 'directory': {\n 'id': self.get_id(group.directory),\n 'href': group.directory.href,\n 'name': group.directory.name,\n 'description': group.directory.description,\n 'status': group.directory.status,\n 'createdAt': group.directory.created_at.isoformat(),\n 'modifiedAt': group.directory.modified_at.isoformat(),\n },\n 'accounts': [],\n }\n\n for account in group.accounts:\n json['accounts'].append({\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/groups/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def export_applications(self):\n print('\\n=== Exporting all application data...')\n\n for application in self.client.applications:\n print('- Exporting application:', application.name)\n\n json = {\n 'id': self.get_id(application),\n 'href': application.href,\n 'name': application.name,\n 'description': application.description,\n 'status': application.status,\n 'createdAt': application.created_at.isoformat(),\n 'modifiedAt': application.modified_at.isoformat(),\n 'customData': self.get_custom_data(application),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n #'verificationEmails': [],\n }\n\n default_account_store_mapping = application.default_account_store_mapping\n default_group_store_mapping = application.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': application.default_account_store_mapping.href.split('/')[-1],\n 'href': application.default_account_store_mapping.href,\n 'type': application.default_account_store_mapping.account_store.__class__.__name__,\n 'name': application.default_account_store_mapping.account_store.name,\n 'list_index': application.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': application.default_group_store_mapping.href.split('/')[-1],\n 'href': application.default_group_store_mapping.href,\n 'type': application.default_group_store_mapping.account_store.__class__.__name__,\n 'name': application.default_group_store_mapping.account_store.name,\n 'list_index': application.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in application.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(application.tenant)\n self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def test_get_all_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='[email protected]',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def ls():\n return dynamodb.ls(OrganizationModel)", "def exportOrgs ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n o = sqlQuery ( c, \"select * from Organizations;\" )\n for i in o:\n oL = sqlQuery ( c, \"select * from OrganizationLocations where orgID = '\"+i[0]+\"';\" )\n oER = sqlQuery ( c, \"select * from OrganizationExternalResources where orgID = '\"+i[0]+\"';\" )\n oTC = sqlQuery ( c, \"select * from OrganizationsToCrises where orgID = '\"+i[0]+\"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where orgID = '\"+i[0]+\"';\" )\n xml += openTagAtt ( \"Organization\", \"organizationIdent\", i[0])\n xml += openCloseTag ( \"Name\", i[1])\n xml += closeTagAtt ( \"Kind\", \"organizationKindIdent\", i[2])\n for j in oL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openCloseTag (\"History\", i[3])\n xml += openTag ( \"ContactInfo\" )\n xml += openCloseTag (\"Telephone\", i[4])\n xml += openCloseTag (\"Fax\", i[5])\n xml += openCloseTag (\"Email\", i[6])\n xml += openTag (\"PostalAddress\")\n xml += openCloseTag (\"StreetAddress\", i[7])\n xml += openCloseTag ( \"Locality\", i[8])\n xml += openCloseTag ( \"Region\", i[9])\n xml += openCloseTag ( \"PostalCode\", i[10])\n xml += openCloseTag ( \"Country\", i[11])\n xml += closeTag ( \"PostalAddress\" )\n xml += closeTag ( \"ContactInfo\" )\n xml += openTag (\"ExternalResources\")\n for j in oER:\n xml += openCloseTag ( j[1], j[2])\n xml += closeTag (\"ExternalResources\")\n xml += openTag (\"RelatedCrises\")\n for j in oTC:\n xml += closeTagAtt (\"RelatedCrisis\", \"crisisIdent\", j[1])\n xml += closeTag (\"RelatedCrises\")\n xml += openTag (\"RelatedPersons\")\n for j in pTO:\n xml += closeTagAtt (\"RelatedPerson\", \"personIdent\", j[0])\n xml += closeTag (\"RelatedPersons\")\n xml += closeTag (\"Organization\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def test_get_all_organization(self):\n self.client.force_authenticate(user=self.inventory_manager)\n response = self.client.get(\"/organization/\")\n self.assertEqual(response.status_code,\n status.HTTP_403_FORBIDDEN)", "def get_companies(self):\n response = self.do_request('/management/companies/export/json')\n if response:\n return response.json()", "def organizations(self):\r\n return organizations.Organizations(self)", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def fetch_organization(organization):\n return fetch_json(organization_url, organization)", "def export_directories(self):\n print('=== Exporting all directory data...')\n\n for directory in self.client.directories:\n print('- Exporting directory:', directory.name)\n\n json = {\n 'id': self.get_id(directory),\n 'href': directory.href,\n 'name': directory.name,\n 'description': directory.description,\n 'status': directory.status,\n 'createdAt': directory.created_at.isoformat(),\n 'modifiedAt': directory.modified_at.isoformat(),\n 'customData': self.get_custom_data(directory),\n 'groups': [],\n }\n\n for group in directory.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n json['provider'] = {\n 'href': directory.provider.href,\n 'providerId': directory.provider.provider_id,\n 'agent': None,\n }\n\n try:\n json['provider']['createdAt'] = directory.provider.created_at.isoformat()\n json['provider']['modifiedAt'] = directory.provider.modified_at.isoformat()\n except AttributeError:\n json['provider']['createdAt'] = None\n json['provider']['modifiedAt'] = None\n\n try:\n json['provider']['clientId'] = directory.provider.client_id\n except AttributeError:\n json['provider']['clientId'] = None\n\n try:\n json['provider']['clientSecret'] = directory.provider.client_secret\n except AttributeError:\n json['provider']['clientSecret'] = None\n\n try:\n json['provider']['redirectUri'] = directory.provider.redirect_uri\n except AttributeError:\n json['provider']['redirectUri'] = None\n\n try:\n json['provider']['agent'] = {\n 'id': self.get_id(directory.provider.agent),\n 'href': directory.provider.agent.href,\n 'status': directory.provider.agent.status,\n 'createdAt': directory.provider.agent.created_at.isoformat(),\n 'modifiedAt': directory.provider.agent.modified_at.isoformat(),\n 'config': {\n 'directoryHost': directory.provider.agent.directory_host,\n 'directoryPort': directory.provider.agent.directory_port,\n 'sslRequired': directory.provider.agent.ssl_required,\n 'agentUserDn': directory.provider.agent.agent_user_dn,\n 'agentUserDnPassword': directory.provider.agent.agent_user_dn_password,\n 'baseDn': directory.provider.agent.base_dn,\n 'pollInterval': directory.provider.agent.poll_interval,\n 'referralMode': directory.provider.agent.referral_mode,\n 'ignoreReferralIssues': directory.provider.agent.ignore_referral_issues,\n 'accountConfig': directory.provider.agent.account_config,\n 'groupConfig': directory.provider.agent.group_config,\n },\n 'download': {\n\n },\n }\n except AttributeError:\n pass\n\n if directory.password_policy:\n json['passwordPolicy'] = {\n 'id': self.get_id(directory.password_policy),\n 'href': directory.password_policy.href,\n #'createdAt': directory.password_policy.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.modified_at.isoformat(),\n 'resetEmailStatus': directory.password_policy.reset_email_status,\n 'resetEmailTemplates': [],\n 'resetSuccessEmailStatus': directory.password_policy.reset_success_email_status,\n 'resetSuccessEmailTemplates': [],\n 'resetTokenTtl': directory.password_policy.reset_token_ttl,\n 'strength': {\n 'href': directory.password_policy.strength.href,\n #'createdAt': directory.password_policy.strength.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.strength.modified_at.isoformat(),\n 'maxLength': directory.password_policy.strength.max_length,\n 'minDiacritic': directory.password_policy.strength.min_diacritic,\n 'minLength': directory.password_policy.strength.min_length,\n 'minLowerCase': directory.password_policy.strength.min_lower_case,\n 'minNumeric': directory.password_policy.strength.min_numeric,\n 'minSymbol': directory.password_policy.strength.min_symbol,\n 'minUpperCase': directory.password_policy.strength.min_upper_case,\n },\n }\n\n try:\n for template in directory.password_policy.reset_email_templates:\n json['passwordPolicy']['resetEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'defaultModel': template.default_model,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n try:\n for template in directory.password_policy.reset_success_email_templates:\n json['passwordPolicy']['resetSuccessEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n tenant = self.get_id(directory.tenant)\n self.write('%s/%s/directories/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text", "def run(self):\n self.export_users()", "def all_organizations(\n self,\n page: int | None = None,\n per_page: int | None = None,\n include_totals: bool = True,\n from_param: str | None = None,\n take: int | None = None,\n ):\n\n params = {\n \"page\": page,\n \"per_page\": per_page,\n \"include_totals\": str(include_totals).lower(),\n \"from\": from_param,\n \"take\": take,\n }\n\n return self.client.get(self._url(), params=params)", "def export_everything(self):\n orderby = self.orderby.get()\n currentregion = self.region.get()\n previoustext = self.tabs.window.statuslabel['text']\n res = tkinter.messagebox.askyesno(\n 'Export Everything',\n 'Exporting data on all AIS stations, this may take some time.')\n if res:\n outpath = tkinter.filedialog.askdirectory()\n if outpath:\n self.tabs.window.statuslabel.config(\n text='Exporting all AIS station data to - {}'.format(\n outpath),\n fg='black', bg='gold')\n self.update_idletasks()\n export.export_overview(\n self.tabs.window.aistracker,\n self.tabs.window.nmeatracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n export.export_everything(\n self.tabs.window.aistracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n self.tabs.window.statuslabel.config(\n text=previoustext, bg='light grey')\n else:\n raise ExportAborted(\n 'Export of all AIS data cancelled by user.')\n else:\n raise ExportAborted('Export of all AIS data cancelled by user.')", "def test_get_all_for_other_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n status=403\n )", "def org_info(self):\n\n response = self.postman.request('info')\n\n if (response.status_code == requests.codes.ok):\n data = response.json()\n\n self.repos = data['public_repos']\n self.created = data['created_at']\n self.updated = data['updated_at']\n\n self.repo_info()\n self.member_info()", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]" ]
[ "0.7005029", "0.6404232", "0.6374312", "0.6110875", "0.59876496", "0.5962026", "0.5902772", "0.5896829", "0.5857048", "0.5806698", "0.5772332", "0.5749651", "0.5704955", "0.56713694", "0.5623573", "0.56124943", "0.56087846", "0.55982965", "0.5550646", "0.553527", "0.5504203", "0.54850984", "0.544906", "0.54374856", "0.5406261", "0.540584", "0.5387814", "0.53784776", "0.53774714", "0.53706366" ]
0.80846584
0
Export all group data for this Stormpath account.
def export_groups(self): print('=== Exporting all group data...') for group in self.client.tenant.groups: print('- Exporting group:', group.name) json = { 'id': self.get_id(group), 'href': group.href, 'name': group.name, 'description': group.description, 'status': group.status, 'createdAt': group.created_at.isoformat(), 'modifiedAt': group.modified_at.isoformat(), 'customData': self.get_custom_data(group), 'directory': { 'id': self.get_id(group.directory), 'href': group.directory.href, 'name': group.directory.name, 'description': group.directory.description, 'status': group.directory.status, 'createdAt': group.directory.created_at.isoformat(), 'modifiedAt': group.directory.modified_at.isoformat(), }, 'accounts': [], } for account in group.accounts: json['accounts'].append({ 'id': self.get_id(account), 'href': account.href, 'username': account.username, 'email': account.email, 'fullName': account.full_name, 'givenName': account.given_name, 'middleName': account.middle_name, 'surname': account.surname, 'status': account.status, 'createdAt': account.created_at.isoformat(), 'modifiedAt': account.modified_at.isoformat(), }) tenant = self.get_id(self.client.tenant) self.write('%s/%s/groups/%s' % (self.location, tenant, json['id']), json) print('=== Done!\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_accounts(self):\n print('=== Exporting all account data...')\n\n for account in self.client.tenant.accounts:\n print('- Exporting account:', account.email)\n\n json = {\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n 'customData': self.get_custom_data(account),\n 'groups': [],\n 'apiKeys': [],\n 'directory': {\n 'id': self.get_id(account.directory),\n 'href': account.directory.href,\n 'name': account.directory.name,\n 'description': account.directory.description,\n 'status': account.directory.status,\n 'createdAt': account.directory.created_at.isoformat(),\n 'modifiedAt': account.directory.modified_at.isoformat(),\n },\n }\n\n for api_key in account.api_keys:\n json['apiKeys'].append({\n 'href': api_key.href,\n 'id': api_key.id,\n 'secret': api_key.secret,\n #'createdAt': api_key.created_at.isoformat(),\n #'modifiedAt': api_key.modified_at.isoformat(),\n })\n\n for group in account.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/accounts/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def export_data(self):\n return self.export_all_data()", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def show_all_groups(self, account_name=None, account_id=None, path=None,\n group_name=None, group_id=None, search=False, print_table=True):\n pt = PrettyTable(['ACCOUNT:', 'GROUPNAME:', 'GROUP_ID:'])\n pt.hrules = 1\n pt.align = 'l'\n list = self.get_all_groups(account_name=account_name, account_id=account_id,\n path=path, group_name=group_name, group_id=group_id,\n search=search)\n for group in list:\n pt.add_row([group['account_name'], group['group_name'], group['group_id']])\n if print_table:\n self.log.info(\"\\n\" + str(pt) + \"\\n\")\n else:\n return pt", "def get_all_access_groups():\n\treturn {\"access_groups\": [ag.serialize for ag in AccessGroup.query.all()]}, 200", "def save(self):\n groups = defaultdict(list)\n for group, user, var in self.items:\n if var.get():\n groups[group].append(user)\n\n save_groups(self.filename, groups)", "def get_all_groups():\n return jsonify(admin.get_all_groups(current_app.scoped_session()))", "def all_groups(request):\r\n group = Group()\r\n return HttpResponse(json.dumps(group.parseFile()))", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def RetrieveAllGroups(**argd):\n flag, ret = CGateway.core.RetrieveAllGroup(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n hmBuilder = []\n for hm in ret:\n hmBuilder.append(hm.ToJsonDict())\n return CGateway._SuccessResponse({'return': hmBuilder})", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def export_directories(self):\n print('=== Exporting all directory data...')\n\n for directory in self.client.directories:\n print('- Exporting directory:', directory.name)\n\n json = {\n 'id': self.get_id(directory),\n 'href': directory.href,\n 'name': directory.name,\n 'description': directory.description,\n 'status': directory.status,\n 'createdAt': directory.created_at.isoformat(),\n 'modifiedAt': directory.modified_at.isoformat(),\n 'customData': self.get_custom_data(directory),\n 'groups': [],\n }\n\n for group in directory.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n json['provider'] = {\n 'href': directory.provider.href,\n 'providerId': directory.provider.provider_id,\n 'agent': None,\n }\n\n try:\n json['provider']['createdAt'] = directory.provider.created_at.isoformat()\n json['provider']['modifiedAt'] = directory.provider.modified_at.isoformat()\n except AttributeError:\n json['provider']['createdAt'] = None\n json['provider']['modifiedAt'] = None\n\n try:\n json['provider']['clientId'] = directory.provider.client_id\n except AttributeError:\n json['provider']['clientId'] = None\n\n try:\n json['provider']['clientSecret'] = directory.provider.client_secret\n except AttributeError:\n json['provider']['clientSecret'] = None\n\n try:\n json['provider']['redirectUri'] = directory.provider.redirect_uri\n except AttributeError:\n json['provider']['redirectUri'] = None\n\n try:\n json['provider']['agent'] = {\n 'id': self.get_id(directory.provider.agent),\n 'href': directory.provider.agent.href,\n 'status': directory.provider.agent.status,\n 'createdAt': directory.provider.agent.created_at.isoformat(),\n 'modifiedAt': directory.provider.agent.modified_at.isoformat(),\n 'config': {\n 'directoryHost': directory.provider.agent.directory_host,\n 'directoryPort': directory.provider.agent.directory_port,\n 'sslRequired': directory.provider.agent.ssl_required,\n 'agentUserDn': directory.provider.agent.agent_user_dn,\n 'agentUserDnPassword': directory.provider.agent.agent_user_dn_password,\n 'baseDn': directory.provider.agent.base_dn,\n 'pollInterval': directory.provider.agent.poll_interval,\n 'referralMode': directory.provider.agent.referral_mode,\n 'ignoreReferralIssues': directory.provider.agent.ignore_referral_issues,\n 'accountConfig': directory.provider.agent.account_config,\n 'groupConfig': directory.provider.agent.group_config,\n },\n 'download': {\n\n },\n }\n except AttributeError:\n pass\n\n if directory.password_policy:\n json['passwordPolicy'] = {\n 'id': self.get_id(directory.password_policy),\n 'href': directory.password_policy.href,\n #'createdAt': directory.password_policy.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.modified_at.isoformat(),\n 'resetEmailStatus': directory.password_policy.reset_email_status,\n 'resetEmailTemplates': [],\n 'resetSuccessEmailStatus': directory.password_policy.reset_success_email_status,\n 'resetSuccessEmailTemplates': [],\n 'resetTokenTtl': directory.password_policy.reset_token_ttl,\n 'strength': {\n 'href': directory.password_policy.strength.href,\n #'createdAt': directory.password_policy.strength.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.strength.modified_at.isoformat(),\n 'maxLength': directory.password_policy.strength.max_length,\n 'minDiacritic': directory.password_policy.strength.min_diacritic,\n 'minLength': directory.password_policy.strength.min_length,\n 'minLowerCase': directory.password_policy.strength.min_lower_case,\n 'minNumeric': directory.password_policy.strength.min_numeric,\n 'minSymbol': directory.password_policy.strength.min_symbol,\n 'minUpperCase': directory.password_policy.strength.min_upper_case,\n },\n }\n\n try:\n for template in directory.password_policy.reset_email_templates:\n json['passwordPolicy']['resetEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'defaultModel': template.default_model,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n try:\n for template in directory.password_policy.reset_success_email_templates:\n json['passwordPolicy']['resetSuccessEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n tenant = self.get_id(directory.tenant)\n self.write('%s/%s/directories/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def get_all_groups(self):\n return self.groups + ['all']", "def export_organizations(self):\n print('\\n=== Exporting all organization data...')\n\n for organization in self.client.organizations:\n print('- Exporting organizations:', organization.name)\n\n json = {\n 'id': self.get_id(organization),\n 'href': organization.href,\n 'name': organization.name,\n 'nameKey': organization.name_key,\n 'description': organization.description,\n 'status': organization.status,\n 'createdAt': organization.created_at.isoformat(),\n 'modifiedAt': organization.modified_at.isoformat(),\n 'customData': self.get_custom_data(organization),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n }\n\n default_account_store_mapping = organization.default_account_store_mapping\n default_group_store_mapping = organization.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': organization.default_account_store_mapping.href.split('/')[-1],\n 'href': organization.default_account_store_mapping.href,\n 'type': organization.default_account_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_account_store_mapping.account_store.name,\n 'list_index': organization.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': organization.default_group_store_mapping.href.split('/')[-1],\n 'href': organization.default_group_store_mapping.href,\n 'type': organization.default_group_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_group_store_mapping.account_store.name,\n 'list_index': organization.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in organization.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(organization.tenant)\n self.write('%s/%s/organizations/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "async def getAll():\n return [cluster.export() for cluster in clusters.get_all()]", "def groups(self):\n return self.get_data(\"groups\")", "def store_grouped_data(data,path):\n i = 0\n for name, group in data:\n l = len(group)\n print name, \", \", l\n if l > 999:\n group.to_csv(path + \"//clean.events\"+ str(i), index=False)\n i += 1", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def export(self, buffer: IO[str], ind: str = '') -> None:\n buffer.write(\n f'{ind}visgroup\\n'\n f'{ind}{{\\n'\n f'{ind}\\t\"name\" \"{self.name}\"\\n'\n f'{ind}\\t\"visgroupid\" \"{self.id}\"\\n'\n f'{ind}\\t\"color\" \"{self.color}\"\\n'\n )\n for child in self.child_groups:\n child.export(buffer, ind + '\\t')\n buffer.write(ind + '}\\n')", "def run(self):\n self.export_users()", "def save_groups(filename, groups):\n with open(filename, 'wb') as f:\n for group, members in groups.iteritems():\n f.write('%s: %s\\n' % (group, ' '.join(members)))", "def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):\n return self.client.api.export(self.id, chunk_size)", "def test_export(self):\n member_one = self.create_user()\n member_two = self.create_user()\n owner = self.create_superuser(\n first_name='Group', last_name='Reporter Exporter')\n\n group = self.create_group(created_by=owner, featured=True)\n group.owners.add(owner)\n\n member_one.add_to_group(group.pk)\n member_two.add_to_group(group.pk)\n\n self.create_thread(sender=member_one, group=group)\n\n response = self.client.get(\n '{path}?export&search_name={group_name}'.format(\n path=reverse('groups_report'), group_name=group.group.name))\n self.assertEqual(\n response['Content-Disposition'],\n 'attachment; filename=groups.csv'\n )\n self.assertEqual(\n response['Content-Type'],\n 'text/csv'\n )\n\n # We can use python's CSV parsing functionality by creating a CSV file\n # object and passing it to DictReader.\n reader = csv.DictReader(io.StringIO(unicode(response.content)))\n\n # We can get the first row in the CSV (which should just be our group)\n # by using python's next() functionality.\n report = next(reader)\n\n self.assertEqual(report['Admins'], '1')\n self.assertEqual(report['Category'], 'Default')\n self.assertEqual(report['Created By'], 'Group R.')\n self.assertEqual(report['Featured'], 'True')\n self.assertEqual(report['Member list published'], 'True')\n self.assertEqual(report['Members'], '2')\n self.assertEqual(report['Messages'], '1')\n self.assertEqual(report['Name'], group.group.name)\n self.assertEqual(report['Posters'], '1')\n self.assertEqual(report['Private'], 'False')\n self.assertEqual(report['Threads'], '1')", "def export(self):\r\n mdict = self.matchdict\r\n username = mdict.get('username')\r\n\r\n if self.request.user is not None:\r\n current_user = self.request.user.username\r\n else:\r\n current_user = None\r\n\r\n bmark_list = BmarkMgr.user_dump(username)\r\n BmarkLog.export(username, current_user)\r\n\r\n self.request.response_content_type = 'text/html'\r\n\r\n headers = [('Content-Disposition',\r\n 'attachment; filename=\"bookie_export.html\"')]\r\n setattr(self.request, 'response_headerlist', headers)\r\n\r\n return {\r\n 'bmark_list': bmark_list,\r\n }", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def get_groups(self):\n response = self._get(\"groups\")\n\n return response.json()", "def groups_json(self):\n these_groups = {}\n if self.groups:\n for group in self.groups:\n these_groups[group.group_name] = group.json\n return these_groups", "def flush(self):\n for key in self.grouping_info.keys():\n if self._should_flush(key):\n self._write_current_buffer_for_group_key(key)", "def _retrieve(self):\n all_groups_settings = []\n iam_groups_settings = []\n\n model_manager = self.service_config.model_manager\n scoped_session, data_access = model_manager.get(self.model_name)\n with scoped_session as session:\n for settings in data_access.scanner_fetch_groups_settings(session,\n True):\n email = settings[0].split('group/')[1]\n iam_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n for settings in data_access.scanner_fetch_groups_settings(session,\n False):\n email = settings[0].split('group/')[1]\n all_groups_settings.append(groups_settings.GroupsSettings\n .from_json(email, settings[1]))\n\n return all_groups_settings, iam_groups_settings" ]
[ "0.6567801", "0.64058274", "0.6267897", "0.61082447", "0.60897505", "0.6071893", "0.59926325", "0.5926644", "0.58263636", "0.58187973", "0.5750844", "0.5742057", "0.57241446", "0.5646611", "0.56150687", "0.56112945", "0.55956185", "0.55901736", "0.556596", "0.55508196", "0.55428785", "0.55263275", "0.5520288", "0.5504266", "0.54941136", "0.5483367", "0.5474818", "0.5452014", "0.5447753", "0.5428506" ]
0.81396973
0
Export all account data for this Stormpath account.
def export_accounts(self): print('=== Exporting all account data...') for account in self.client.tenant.accounts: print('- Exporting account:', account.email) json = { 'id': self.get_id(account), 'href': account.href, 'username': account.username, 'email': account.email, 'fullName': account.full_name, 'givenName': account.given_name, 'middleName': account.middle_name, 'surname': account.surname, 'status': account.status, 'createdAt': account.created_at.isoformat(), 'modifiedAt': account.modified_at.isoformat(), 'customData': self.get_custom_data(account), 'groups': [], 'apiKeys': [], 'directory': { 'id': self.get_id(account.directory), 'href': account.directory.href, 'name': account.directory.name, 'description': account.directory.description, 'status': account.directory.status, 'createdAt': account.directory.created_at.isoformat(), 'modifiedAt': account.directory.modified_at.isoformat(), }, } for api_key in account.api_keys: json['apiKeys'].append({ 'href': api_key.href, 'id': api_key.id, 'secret': api_key.secret, #'createdAt': api_key.created_at.isoformat(), #'modifiedAt': api_key.modified_at.isoformat(), }) for group in account.groups: json['groups'].append({ 'id': self.get_id(group), 'href': group.href, 'name': group.name, 'description': group.description, 'status': group.status, 'createdAt': group.created_at.isoformat(), 'modifiedAt': group.modified_at.isoformat(), }) tenant = self.get_id(self.client.tenant) self.write('%s/%s/accounts/%s' % (self.location, tenant, json['id']), json) print('=== Done!\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_data(self):\n return self.export_all_data()", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def accounts():", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def list_accounts(self):\n pass", "def accounts(self):\r\n return acc.Accounts(self)", "def getCustomerAccountData(self):\n self.logger.debug(\"\")\n #Process each entry returned by getCustomersInfo through getAccountsInfo.\n customersInfoResponse = self.getCustomersInfo()\n if customersInfoResponse is None:\n self.logger.debug(\"did not get data from self.getCustomersInfo()\")\n raise RuntimeError()\n first = True\n cInfos = self.parseCustomerInfo(customersInfoResponse)\n self.logger.debug(\"%d cInfos\", len(cInfos))\n data = {}\n for cInfo in cInfos:\n if first:\n first = False\n else: # Adds a newline separator for text output.\n self.output.write(self.format({}))\n data['CustomerId'] = cInfo['Id']\n accountsInfoResponse = self.getAccountsInfo(cInfo['Id'], \"true\")\n if accountsInfoResponse is not None:\n data['accounts'] = self.parseAccountInfo(accountsInfoResponse)\n else:\n data['accounts'] = []\n self.logger.debug(\"yield %r\", data)\n yield data", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def get_accounts(self):\n return self.accounts.all()", "def accounts(self):\n return self._accounts.values()", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def accounts(self):\r\n return accounts.Accounts(self)", "def export_tenants(self):\n print('\\n=== Exporting all tenant data...')\n\n tenant = dict(self.client.tenant)\n\n print('- Exporting tenant:', tenant['name'])\n\n json = {\n 'id': self.get_id(tenant),\n 'href': tenant['href'],\n 'name': tenant['name'],\n 'key': tenant['key'],\n 'createdAt': tenant['created_at'].isoformat(),\n 'modifiedAt': tenant['modified_at'].isoformat(),\n 'customData': self.get_custom_data(tenant),\n }\n\n #for application in tenant.applications:\n\n self.write('%s/%s/meta' % (self.location, json['id']), json)\n\n print('=== Done!\\n')", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def export_records() -> List[Dict[str, Any]]:\n return_val = []\n with session_scope() as session:\n filter_after = datetime.today() - timedelta(12 * 30)\n\n records = (\n session.query(Users, func.sum(Orders.cost).label(\"total_account_value\"))\n .join(Orders)\n .filter(\n extract(\"year\", Orders.date) >= filter_after.year,\n extract(\"month\", Orders.date) >= filter_after.month,\n extract(\"day\", Orders.date) >= filter_after.day,\n )\n .group_by(Users.account)\n .all()\n )\n\n for user_account, total_account_value in records:\n user_account = {\n \"account\": user_account.account,\n \"active\": user_account.active,\n \"is_demo\": user_account.is_demo,\n \"total_account_value\": total_account_value,\n }\n return_val.append(user_account)\n return return_val", "def get(self):\n accounts = database.get_all(Accounts)\n all_accounts = []\n for account in accounts:\n all_transactions = []\n for transaction in account.transactions:\n all_transactions.append(transaction.id)\n new_account = {\n \"id\": account.id,\n \"name\": account.name,\n \"iban\": account.iban,\n \"balance\": float(account.balance),\n \"currency\": account.currency,\n \"transactions ids\": all_transactions\n }\n\n all_accounts.append(new_account)\n return json.dumps(all_accounts), 200", "def jsonify_all(cls):\n return jsonify(accounts=[account.as_dict() for account in cls.query.all()])", "def run(self):\n self.export_users()", "def accounts():\n pass", "def display_accounts(cls):\n return cls.account_list", "def get_account_info(self):\n resource = self.domain + \"/account\"\n self.logger.debug(\"Pulling data from {0}\".format(resource))\n response = self.session.get(resource)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n data = response.text\n root = Et.fromstring(data)\n bf = BadgerFish(dict_type=dict)\n account_info = bf.data(root)\n return account_info", "def get_accounts(self):\n return self.accounts", "def accounts(self):\r\n return resources.Accounts(self)", "def all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SELECTSQL = \"SELECT * FROM accounts;\"\n cursor.execute(SELECTSQL)\n result = []\n for dictrow in cursor.fetchall():\n result.append(cls(**dictrow))\n return result", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def get_accounts(self):\r\n return self._accounts", "def get_account_details(self):\n pass", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def accounts(self): # pragma: no coverage\r\n raise NotImplementedError()" ]
[ "0.66904116", "0.66201806", "0.65058035", "0.6460658", "0.6426826", "0.63867044", "0.63663775", "0.62696487", "0.62393665", "0.62054133", "0.6188278", "0.6173564", "0.61213315", "0.6097949", "0.6078857", "0.6077221", "0.6065543", "0.6044305", "0.60366124", "0.6018987", "0.5995661", "0.5993739", "0.59787", "0.5974675", "0.5974285", "0.59728754", "0.5966025", "0.59257907", "0.59228045", "0.5855417" ]
0.7974274
0
Log error, then raise if is is set.
def log_error(self, error: Exception) -> None: logging.error(error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_error(self, err_msg):\n if self._on_error_action == \"raise\":\n raise InvalidDatasetError(err_msg)\n else:\n logger.warning(err_msg)", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def error(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.error(message)", "def error(self, *args):\n self.mylog.error(*args)", "def error(self, msg, *args):\n if self.lvl<=logging.ERROR: return self._log(msg, *args)", "def logError(self, *args):\n return _libsbml.SBMLErrorLog_logError(self, *args)", "def set_error(self, error):\n if self.log_file_exist(self.file_path_name):\n logging.error(error)\n else:\n print \"The log \"+ self.name_log + \"does not exist in the directory\"", "def raise_(err):\n raise err", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def logerror(self, msg):\n self.logger.error(msg)", "def error(self, msg, *args, **kwargs):\n self._log(self.err, msg, *args, **kwargs)", "def log_error(err):\n print(err)", "def log_error(e):\n\tprint(e)", "def log_error(e):\n\tprint(e)", "def _check_exc(self):\n if self._exc is not None:\n raise self._exc", "def exception(self, msg, *args, **kwargs):\n ex = sys.exc_info()[1]\n\n if hasattr(ex, '_monocle'):\n args = args + (format_tb(ex),)\n self.logger.error('%s\\n%%s' % msg, *args, **kwargs)\n else:\n super(Adapter, self).exception(msg, *args, **kwargs)", "def error_traceback():\n Logger.log('ERROR', traceback.format_exc())", "def error(self, msg):\r\n self.logger.error(msg)", "def error(self, *args):\n\n if self.is_on(_Log.ERROR):\n self._write(self._err, *args)", "def log_error(self, msg):\n self.logger.error(msg)", "def error(self, message):\n for_verbosity = 0\n if self.verbosity_level >= for_verbosity:\n self.logger.error(message, exc_info=True)", "def setErrorLog(self, *args):\n return _libsbml.XMLInputStream_setErrorLog(self, *args)", "def error(self, msg, *args, **kwargs):\n self._logger.error(msg, *args, **kwargs)", "def _err(self, *args):\n logger.error(*args)\n exit(1)", "def log_error(e):\r\n print(e)", "def log_error(e):\r\n print(e)", "def error(self, msg):\n self.__logger.error(msg)", "def logError(e):\r\n print(e)", "def check_error(err):\n if err != None:\n logging.error(err)\n sys.exit(-1)" ]
[ "0.7021225", "0.6831562", "0.6772998", "0.67632544", "0.6753129", "0.67513996", "0.6689103", "0.66439587", "0.6643325", "0.66173506", "0.6590305", "0.65843606", "0.6580504", "0.6578764", "0.6578764", "0.6573797", "0.6555744", "0.6552961", "0.654769", "0.65416425", "0.6523136", "0.6522392", "0.6521013", "0.65142274", "0.6513491", "0.64711684", "0.64711684", "0.6470685", "0.646245", "0.64518034" ]
0.68669385
1
Get replacement file when original missing.
def get_replacement_file(self, path) -> Optional[bytes]: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getOriginalFile(url):\n # does url exist?\n if url is None or url is \"\":\n return", "def get_pristine(self):\n for path in self.get_all_files():\n if path.endswith('.orig.tar.gz'):\n return path\n return None", "def get_original_path(self) -> Optional[str]:\n return self.original_path", "def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname", "def get_existing_filename_or_die(self, key) -> str:\n filename = self.get_or_default(key, None)\n if filename is None:\n print(\"Error, '\" + key + \"' is required.\")\n sys.exit(1)\n elif not os.path.isfile(filename):\n print(\"'\" + str(filename) + \"' is not a file.\")\n sys.exit(1)\n else:\n return filename", "def getReplacement(tag):\n global repd\n \n # read files to load global\n # dictionary if hasn't been read before\n \n if len(repd.keys()) == 0:\n readDirectory()\n \n # tag shouldn't have <>, but be safe\n \n if tag.startswith(\"<\"):\n end = tag.find(\">\")\n tag = tag[1:end]\n \n \n if not tag in repd:\n tag = tag+'s'\n if not tag in repd:\n return \"NO-REPLACEMENT\"\n \n return random.choice(repd[tag])", "def overwrite_original_file(self):\n return self.__overwrite_original_file", "def _verify_original_file_path(self):\n if not isinstance(self._original_file_path, bytes):\n self._original_file_path = Path(self._original_file_path).expanduser()\n if not self._original_file_path.exists():\n raise FileNotFoundError(FILE_NOT_FOUND_ERROR.format(self._original_file_path))\n\n self._original_file_name = self._original_file_path.name", "def _get_source_file(self):\n file_name = get_file_join_name(self._input_path, self._source_file_target)\n if not file_name:\n file_name = get_file_join_name(self._input_path, self._source_file_target_old)\n if not file_name:\n data_path = os.path.join(self._input_path, \"data\")\n file_name = get_file_join_name(data_path, self._source_file_target)\n if not file_name:\n file_name = get_file_join_name(data_path, self._source_file_target_old)\n return file_name", "def get_temporary_file(original_file, no_modifications=False):\n if no_modifications:\n handle = open(original_file, 'wb')\n return handle\n\n handle = open(get_temporary_file_name(original_file), 'wb')\n return handle", "def get_non_existing_file(file_name, max_attempts = 1000):\n if not os.path.exists(file_name):\n return file_name\n attempt = 0\n while True:\n candidate_file_name = \"%s.%d\" % (file_name, attempt)\n if not os.path.exists(candidate_file_name):\n return candidate_file_name\n attempt += 1\n if attempt >= max_attempts:\n msg = \"Cannot find non existing file from pattern %s\"\n raise ValueError(msg % file_name)", "def getFile():\n return P4_FILE_SEARCH.sub( P4_FILE_REPLACE_STRING, \"$File$\")", "def test_get_original_file_name_without_duplication_marker(self):\n test_file_name = \"uploaded_file_name\"\n expected_file_name = \"uploaded_file_name\"\n cfs = CustomFileStorage()\n self.assertEqual(cfs.get_original_file_name(test_file_name), expected_file_name)", "def get_reffile(self, refs, detector):\n for key in refs:\n if detector in key:\n return refs[key]\n self.logger.error(\"WARNING: no file found for detector {} in {}\"\n .format(detector, refs))", "def overwrite_url(self):\n if self.has_url_overwrite:\n return self.path\n return None", "def checkExisting(self, dst):\n if dst.exists():\n msg = 'Refusing to clobber existing file \"%s\"' % (\n dst.path,)\n logging.msg(msg)\n raise errors.NoClobber(msg)", "def _get_unique_lookup_filepath(self, patch_idx, cfg_save_dir, prefix, ext):\n fname = prefix + \"_p_\" + str(patch_idx) + \".m_\" + str(self.uid) + ext\n fpath = os.path.join(cfg_save_dir, fname)\n return fpath", "def get_existing_filename(existing_files: List[str]) -> str:\n\n # Ask user which file only if there are multiple files\n\n if len(existing_files) == 1:\n return existing_files[0]\n\n questions = [\n {\n 'type': 'list',\n 'name': 'target_filename',\n 'message': 'Which file do you want to load ?',\n 'choices': existing_files\n }\n ]\n return prompt(questions, style=custom_style_2)[\"target_filename\"]", "def guess(filename):\n for marker in [\".stem\",\"stem.\",\".seed\",\"seed.\"]:\n if filename.find(marker)>-1: \n return (filename.replace(marker,\"\"))\n\n if \"/\" in filename:\n index = filename.rfind(\"/\")\n return ( filename[:index+1]+\"generated_\"+filename[index+1:])\n else:\n return ( \"generated_\"+filename )", "def _map_source(source):\n for pattern, replacement in \\\n settings.REFINERY_FILE_SOURCE_MAP.iteritems():\n translated_source = re.sub(pattern, replacement, source)\n if translated_source != source:\n return translated_source\n return source", "def test_get_original_file_name_match_regex(self):\n test_file_name = \"uploaded_file_name_%s_abcd123\" % settings.FILE_DUPLICATION_MARKER\n expected_file_name = \"uploaded_file_name\"\n cfs = CustomFileStorage()\n self.assertEqual(cfs.get_original_file_name(test_file_name), expected_file_name)", "def expected_output(self):\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if not path.exists(expected_output_file):\n return None\n else:\n with open(expected_output_file, \"r\", encoding=\"utf8\") as f:\n return f.read()", "def fallback_file(cls, filename):\n file_list = (\n filename,\n path.join(path.expanduser('~'), '.config', 'hiromi.json'),\n path.join(path.expanduser('~'), '.hiromi')\n )\n for a_file in file_list:\n if path.exists(a_file):\n return a_file\n print(\n \"Please given a legal config file, or make a config file at\"\n \"~/.hiromi or ~/.config/hiromi.json\"\n )\n raise ConfigNotFoundException()", "def _get_source_path(self, docmeta: DocMetadata) -> Optional[str]:\n identifier = docmeta.arxiv_identifier\n version = docmeta.version\n file_noex = identifier.filename\n if not docmeta.is_latest:\n parent_path = self._get_parent_path(identifier, version)\n file_noex = f'{file_noex}v{version}'\n else:\n parent_path = self._get_parent_path(identifier)\n\n for extension in VALID_SOURCE_EXTENSIONS:\n possible_path = os.path.join(\n parent_path,\n f'{file_noex}{extension[0]}')\n if os.path.isfile(possible_path):\n return possible_path\n return None", "def get_missing_image(self):\n # This class should have a 'name' property so it mimics the Django file\n # field.\n return MissingProductImage()", "def _get_path_or_dummy(self, fuse_path):\n cache_path = self.converter.to_cache_path(fuse_path)\n dummy_cache_path = self.converter.add_dummy_ending(cache_path)\n if os.path.exists(cache_path):\n return cache_path\n elif os.path.exists(dummy_cache_path):\n return dummy_cache_path\n return None", "def cache_file_if_exists(self, file_path):\r\n dst = r\"C:/Users/ginger frame\"\r\n\r\n if os.path.exists(file_path):\r\n src = os.path.realpath(file_path) \r\n shutil.copy2(src, './temp')\r\n print(os.path.basename(file_path))\r\n return os.path.basename(file_path)\r\n else:\r\n print('error')\r\n sys.exit('Could not write configuration data to device, \"' + file_path + '\" does not exist.')", "def get_default(path):\n with open(path, 'r') as path_file:\n return path_file.read().replace('\\n', '')", "def get_original_file_name(cleanup_event):\n original_name = None\n if not cleanup_event:\n return\n try:\n original_name = cleanup_event.event_outcome_detail.split(\";\")[0]\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n cleanup_event.event_outcome_detail,\n )\n return original_name", "def reffile(self):\n return os.path.join(self.__folder, self.__name + '.ref')" ]
[ "0.64597625", "0.6178859", "0.61031616", "0.5966035", "0.5832142", "0.582151", "0.58184856", "0.5694266", "0.56546396", "0.5616646", "0.55166143", "0.5515076", "0.5506135", "0.5455651", "0.5425946", "0.5390687", "0.5366639", "0.53574777", "0.53303367", "0.53286433", "0.5327186", "0.5316791", "0.52979267", "0.52865356", "0.5276027", "0.5264857", "0.5253427", "0.5252854", "0.524383", "0.5242466" ]
0.7418505
0
Get next CID for related content.
def get_next_cid(self) -> str: self.position += 1 return "img{}".format(self.position)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_id(self):\n self.id_counter += 1\n return self.id_counter - 1", "def next_collapsed_id(self):\n to_return = self.collapsed_id_counter\n self.collapsed_id_counter += 1\n return to_return", "def get_next_id(self):\n con = self.c._connect()\n last_id = self.c.get_last_id(con.cursor())\n con.close()\n return last_id + 1", "def __next_shape_id(self):\n cNvPrs = self.__spTree.xpath('//p:cNvPr', namespaces=_nsmap)\n ids = [int(cNvPr.get('id')) for cNvPr in cNvPrs]\n ids.sort()\n # first gap in sequence wins, or falls off the end as max(ids)+1\n next_id = 1\n for id in ids:\n if id > next_id:\n break\n next_id += 1\n return next_id", "def next_id(self):\n next_id = self._nextid\n self._nextid += 1\n return next_id", "def GetNext(self):\n if self.ids:\n return self.ids.pop()\n self.next_idx += 1\n return self.next_idx", "def getClusterVmNextId(self):\n data = self.connect('get','cluster/nextid',None)\n return data", "def _GetNextId(self):\r\n ret = self.next_id\r\n self.next_id += 1\r\n return str(self.next_id)", "def next_identity(self) -> PublicationId:\n ...", "def next_id(self):\n return self.max_id + 1", "def Cid(self):\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid", "def _next_id(self):\n # modulo to keep within int32 (signed)\n self.correlation_id = (self.correlation_id + 1) % 2**31\n return self.correlation_id", "def next(self):\n return self.cycle.next()", "def next_jid(self):\n return self._next_jid", "def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur", "def getNext(self):\n\t\t\treturn self.next", "def get_cid(self):\n results = self.database.findall(text(\"select cid from cid_minter\"))\n if results:\n return results[0]\n else:\n err_msg = \"Database error: No CID was found in the cid_minter table.\"\n logging.error(err_msg)\n raise ValueError(err_msg)", "def next(self) -> int:\n value = self.inorder[self.index]\n self.index = self.index + 1\n return value", "def next_residue(residue) :\n #Proteins go N terminal --> C terminal\n #The next reside is bonded to the C of this atom...\n for a in residue.peptide.C.bondedTo():\n if a.parent.parent != residue:\n return a.parent.parent\n return None", "def get_next(self):\n return self.next", "def get_next(self):\n return self.next", "def _get_next_event_id():\n VenueCrawler._event_id += 1\n return VenueCrawler._event_id", "def _get_next_venue_id():\n VenueCrawler._venue_id += 1\n return VenueCrawler._venue_id", "def getNext(self):\n return self.__next", "def next(self):\n if self.cursor.nref:\n self.cursor = self.cursor.nref\n return self.cursor\n return None", "def get_next_identifier(self) -> int:\n if self.items:\n return self.items[-1].identifier + 1\n else:\n return 1", "def next_identity(self) -> OrganisationId:\n ...", "def new_id(self):\n self.next += 1\n return self.next", "def _get_next_cust_id():\n # print('Customer roster: ' + str(customers))\n key_list = []\n for customer_key in customers:\n stripped_prefix = customer_key[1:]\n # print('Adding key: ' + str(stripped_prefix))\n key_list.append(stripped_prefix)\n key_list.sort()\n last_id = int(key_list[-1])\n return 'C' + str(last_id + 1)", "def next(self):\n with self.atomicfile.locked():\n curr = self.atomicfile.read_atomic().decode(\"utf8\")\n curr = self.initial if not curr else int(curr)\n self.atomicfile.write_atomic(str(curr + 1).encode(\"utf8\"))\n return curr" ]
[ "0.61992425", "0.61363083", "0.6128795", "0.5966513", "0.59172714", "0.5896527", "0.58824", "0.58359903", "0.58097434", "0.5796506", "0.5771497", "0.5724505", "0.5684626", "0.56635547", "0.56191045", "0.55953336", "0.5580772", "0.55750984", "0.5575075", "0.55578667", "0.55578667", "0.5534142", "0.55105186", "0.5510427", "0.55081743", "0.54932594", "0.5492381", "0.5483659", "0.54792666", "0.5441547" ]
0.7154687
0
Collect images from html code. Return html with iamge src=cid and list of tuple with (maintype, subtype, cid, imagebytes).
def collect_images(self, html_body: str, encoding: str = "UTF-8") -> Tuple[str, List[Tuple[str, str, str, bytes]]]: images = [] reader = etree.HTMLParser(recover=True, encoding=encoding) root = etree.fromstring(html_body, reader) self.init_cid() same_content = {} # type: Dict[bytes, str] # Search elements <img src="..."> and <input type="image" src="..."> for image in root.xpath("//img | //input[@type='image']"): image_src = image.attrib["src"] try: image_content = self.load_file(image_src) except ImageNotFound as err: self.log_error(err) self.conditionally_raise(err) continue content_hash = hashlib.md5(image_content).digest() if content_hash in same_content: cid = same_content[content_hash] else: cid = self.get_next_cid() same_content[content_hash] = cid maintype, subtype = self._get_mime_type(image_src) images.append((maintype, subtype, cid, image_content)) image.attrib["src"] = "cid:{}".format(cid) html_content = etree.tostring(root, encoding=encoding, pretty_print=self.pretty_print) return html_content.decode(encoding), images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embed_images(self):\n for img in self.book.xpath(\"//img[ not(starts-with(@src, 'data:')) and @src!= '']\"):\n img_src = img.attrib[\"src\"]\n img_raw = self.get_remote_content(img_src)\n if img_raw != None:\n img_64 = base64.b64encode(img_raw)\n file_info = os.path.splitext(img_src)\n ext = file_info[1].replace(\".\", \"\")\n ext = re.sub(\"\\?.*$\", \"\" , ext)\n \n if ext == \"svg\":\n svg = html.fromstring(img_raw.decode(\"utf-8\"))\n img.clear()\n img.tag = \"svg\"\n img[:] = [svg]\n else:\n img.set(\"src\", \"data:image/{};base64,{}\".format(ext, img_64.decode(\"utf-8\")))", "def extract_images(content):\n\n return re.findall('src=\"([^\"]+)\"', content)", "def get_captcha_image(self, page_html) -> str:\n try:\n items = page_html.select('div[class=\"ddText\"]')\n result_items = re.findall(r'\\\"data:image.*\\\"', str(items[0]))\n result_items = str(result_items).replace(\"\\\"\", \"\")\n except Exception as e:\n raise e\n else:\n return result_items", "def _get_images(self, fuzzable_request):\n res = []\n\n try:\n response = self._uri_opener.GET(fuzzable_request.get_uri(),\n cache=False)\n except:\n om.out.debug('Failed to retrieve the page for finding captchas.')\n else:\n # Do not use parser_cache here, it's not good since CAPTCHA implementations\n # *might* change the image name for each request of the HTML\n #dp = parser_cache.dpc.get_document_parser_for( response )\n try:\n document_parser = DocumentParser.DocumentParser(response)\n except BaseFrameworkException:\n return []\n \n image_path_list = document_parser.get_references_of_tag('img')\n\n GET = self._uri_opener.GET\n sha1 = hashlib.sha1\n \n result_iter = self.worker_pool.imap_unordered(GET, image_path_list)\n \n for image_response in result_iter:\n if image_response.is_image():\n img_src = image_response.get_uri()\n img_hash = sha1(image_response.get_body()).hexdigest()\n res.append((img_src, img_hash, response))\n\n return res", "def embed_images(self, html):\n if not self.SUPPORT_EMBED_IMAGES:\n raise RuntimeError('%r does not support embed_images' % type(self))\n\n return self.RE_IMG.sub(self._embed_image, html)", "def getimgs():", "def html_img_tags(self):\n return self.findall_markdown_cells(r'<img[^>]*>')", "def processHtml(htmldir, filename):\n logger = logging.getLogger(\"processHtml\")\n logger.info(\"processHtml(%s, %s)\" % (htmldir, filename))\n\n # read in the entire html file\n handle = file(filename, 'r')\n text = handle.read()\n handle.close()\n\n ##### determine all the requested images\n # that aren't set to IMG_NOT_FOUND\n candidates = re.findall(r\"img(.+)/>\", text, flags=re.MULTILINE)\n if len(candidates) <= 0:\n return []\n options = []\n for candidate in candidates:\n start = candidate.find(\"src=\")\n if start <= 0:\n continue\n candidate = candidate[start+5:]\n end = candidate.find('\"')\n if end <= start:\n end = candidate.find(\"'\")\n if end <= start:\n continue\n options.append(candidate[:end])\n # that are set to IMG_NOT_FOUND\n if IMG_NOT_FOUND in text:\n logger.info(\"IMAGE_NOT_FOUND in '%s'\" % filename)\n candidates = []\n index = 0\n while index >= 0:\n index = text.find(IMG_NOT_FOUND, index)\n end = text.find(\"</figure>\", index)\n if end < index or index < 0:\n break\n figs = re.findall(r'Missing image:\\s+(.+)</figcaption>',\n text[index:end])\n candidates.extend(figs)\n index += len(IMG_NOT_FOUND)\n logger.info(\"CANDIDATES: %s\" % str(candidates))\n options.extend(candidates)\n\n # add them to the list of missing images if not found\n results = []\n for candidate in options:\n candidate = os.path.join(htmldir, candidate)\n logger.info(\"looking for '%s'\" % candidate)\n if not os.path.exists(candidate):\n logger.info(\"candidate = '%s' not found\" % candidate)\n results.append(candidate)\n\n # return everything that isn't found\n return results", "def scrape(self):\n reg = re.compile(self.regex)\n images = self.soup.findAll('img')\n results = []\n for img in images:\n try:\n url = dict(img.attrs)['src']\n url = self._make_url_path(url)\n if reg.match(url):\n results.append(url)\n\n except:\n pass\n\n print 'Img tag scraping OK'\n return results", "def extract_image(page_html, family_url, folder):\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n # print(image_page_url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n\n image_link = urljoin(image_page_url, image_src)\n\n print(image_link, image_name)\n # Download image\n fetch(image_link, image_name, folder)", "def get_images(self, folder_id, raw=False):\n response = self._request(\"%s/%s/images\" % (self.folders_url,\n folder_id), headers=self.accept_header)[1]\n if raw:\n return response\n\n # the image list uses microformats which are not on the feedparser\n # whitelist, so we'll need some custom parsing\n\n sanitize_html = fp.SANITIZE_HTML\n fp.SANITIZE_HTML = 0\n parsed = fp.parse(response)\n BeautifulSoup = fp.BeautifulSoup.BeautifulSoup\n for image in parsed.entries:\n # TODO: think of a better way to automate this\n content = image.content[0].value\n image.content = fp.FeedParserDict()\n soup = BeautifulSoup(content)\n for prop in ['filename', 'imageurl', 'height', 'width',\n 'description']:\n image.content[prop] = getattr(soup, prop).text\n\n fp.SANITIZE_HTML = sanitize_html\n return parsed", "def get_images(self, ctx, page):\n is_imgur = 'source' in page.meta and page.meta['source'] == 'imgur'\n if 'type' in page.meta and page.meta['type'] == 'album':\n album = page.meta\n images = []\n if is_imgur:\n pp.pprint(page.meta)\n # bind to template via json\n images = self.get_imgur_album_images(page)\n self.albums[album['slug']] = images\n else:\n # get paths of all of the images in the album\n srcs = []\n # get absolute paths of images in album for each file type\n for file_type in FILE_TYPES:\n imgs = glob.glob(\n GALLERY_DIR + album['slug'] + '/*.' + file_type\n )\n\n for img in imgs:\n img_rel_path = (\n REL_GALLERY_DIR +\n album['slug'] + '/' + img.split('/')[-1]\n )\n srcs.append(img_rel_path)\n\n # split full srcs and thumb srcs from srcs into two lists\n images = []\n thumb_srcs = filter(\n lambda src: src.split('/')[-1].startswith(THUMB_PREFIX),\n srcs\n )\n for thumb_src in thumb_srcs:\n src = thumb_src.replace(THUMB_PREFIX, '')\n thumb_width, thumb_height = self.calc_img_hw(thumb_src)\n width, height = self.calc_img_hw(src)\n images.append({\n 'thumb_src': thumb_src,\n 'thumb_width': thumb_width,\n 'thumb_height': thumb_height,\n\n 'src': src,\n 'width': width,\n 'height': height,\n })\n self.albums[album['slug']] = images", "def find_img_urls(mainURL):\n \n imglist = []\n \n class IMGParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n if tag == 'img':\n imglist.append(dict(attrs)[\"src\"])\n \n URL = urlopen(mainURL)\n html = URL.read()\n \n parser = IMGParser()\n parser.feed(html)\n parser.close()\n \n return imglist", "async def images(self, ctx, *, query: str=None):\n # Handle empty query\n if query is None:\n return await ctx.error('Please provide a query!')\n\n # Using these specific headers and \"lnms\" as source, will provide divs with \"rg_meta\" classes,\n # The modern image search page being JS rendered, data in these divs are jsons with raw image URLs\n # Old image search pages, only have thumbnails and a direct link to websites\n params = {'q': quote_plus(query), 'source': 'lmns', 'tbm': 'isch'}\n async with self.aiohttp_session.get(self.url, params=params, headers=self.image_headers) as r:\n html = await r.text()\n\n # Healthy\n soup = BeautifulSoup(html, 'lxml')\n\n # Go over 4 items, json.loads the item text, and grab \"ou\" probably stands for \"original url\"\n images = []\n for i, item in enumerate(soup.select('div.rg_meta')[:4]):\n js = json.loads(item.text)\n images.append((f\"{i+1}. {js['st']} - {js['s']}\", js[\"ou\"]))\n newl = '\\n'\n await ctx.message.edit(content=f\"```py\\n{newl.join([x[0] for x in images])}\"\n f\"\\n# Choose the appropriate number or type 0 to leave\\n```\")\n\n def check(m):\n return m.author == ctx.author and m.content.isdigit() and m.channel == ctx.channel\n message = await self.bot.wait_for('message', check=check)\n if message.content == \"0\":\n await message.delete()\n return await ctx.message.delete()\n choice = int(message.content) - 1\n await message.delete()\n await ctx.message.edit(content=images[choice][1])", "def get_images(self, article: BeautifulSoup):\n images = []\n content = article.select_one(self.parsing_template.content)\n\n if content:\n body_images = content.select(self.parsing_template.image_element)\n else:\n body_images = None\n\n if body_images:\n for element in body_images:\n\n img = element.find('img')\n if not img:\n continue\n url = img.get(self.parsing_template.image_attribute) # TODO format url correctly\n\n try:\n text = self.get_text(element, self.parsing_template.image_text)\n except IndexError:\n text = ''\n\n try:\n photographer = self.get_text(element, self.parsing_template.image_photographer)\n except IndexError:\n photographer = ''\n\n # Image text and photographer is not separated.\n # Tries to separate out the photographer\n if self.parsing_template.photographer_delimiter:\n if text and not photographer:\n text, photographer = self.parse_photographer(text, text)\n if photographer:\n text, photographer = self.parse_photographer(text, photographer)\n\n if url:\n if photographer:\n # Removes unwanted text in the photographer\n for replace in self.parsing_template.photograph_ignore_text:\n photographer = photographer.replace(replace, '')\n photographer = photographer.replace('/', ',')\n\n if len(text) > 255:\n text = text[:254]\n\n # Separate each photograph\n photographers = []\n for photograph in photographer.split(','):\n photographer_name_split = list(filter(lambda x: x or x != ' ', photograph.split(' ')))\n if photographer_name_split:\n if len(photographer_name_split) == 1:\n lastName = photographer_name_split[0].strip(' ').strip('.')\n firstName = ''\n else:\n firstName = photographer_name_split[0].strip(' ')\n lastName = photographer_name_split[1].strip(' ').strip('.')\n photographers.append(Photographer(firstName=firstName, lastName=lastName))\n\n images.append((ArticleImage(url=url, text=text), photographers))\n\n return images", "def fix_images_encode(self, url, soup, file_descriptor):\n\t\t# Open output file, read lines, and begin parsing to replace all incomplete img src URLs\n\t\tprint(\"[+] Proceeding with updating IMG tag src attributes using: {}\".format(url))\n\t\tprint(\"[+] The src attrbitues that will be modified:\")\n\t\ttry:\n\t\t\t# Print img src URLs that will be modified and provide info\n\t\t\t# Find all <img> with src attribute and create a full URL to download and embed image(s)\n\t\t\tfor img in soup.findAll('img'):\n\t\t\t\tprint(\"* {}\".format(img))\n\t\t\t\timgurl = urllib.parse.urljoin(url, img['src'])\n\t\t\t\timage = urllib.request.urlopen(imgurl)\n\t\t\t\t# Encode in Base64 and embed\n\t\t\t\timg_64 = base64.b64encode(image.read())\n\t\t\t\timg['src'] = \"data:image/png;base64,{}\".format(img_64.decode('ascii'))\n\n\t\t\tprint(\"[+] IMG parsing was successful!\")\n\t\texcept Exception as err:\n\t\t\t# Exception may occur if file doesn't exist or can't be read/written to\n\t\t\tprint(\"[!] IMG parsing failed. Some images may not have URLs, ex: src = cid:[email protected].\")\n\t\t\tprint(\"L.. Details: {!s}\\n\".format(err))\n\n\t\treturn soup", "async def get_url_images(session, url):\n content = await get_page(session, url)\n if not content:\n return []\n soup = BeautifulSoup(content, features=\"html.parser\")\n image_sources = [img['src'] for img in soup.find_all('img')]\n image_sources_fixed = [f'https:{source}' if 'https:' not in source else source for source in image_sources]\n images = []\n for source in image_sources_fixed:\n image = await get_image(session, source)\n if image:\n images.append((source, image))\n\n return images", "def get_image_qm(html_src, todir):\n #print url\n\n img_url, title = img_details(html_src)\n \n r = requests.get(img_url)\n with open(todir+title+'.jpg','wb') as f:\n f.write(r.content)", "def get_url_from_images(html_images):\n urls = []\n for image in html_images:\n try:\n url = image['data-src']\n if not url.find(\"https://\"):\n urls.append(url)\n except:\n try:\n url = image['src']\n if not url.find(\"https://\"):\n urls.append(image['src'])\n except Exception as e:\n print(f'No found image sources.')\n print(e)\n return urls", "def get_images_relative_urls_from_page(page_content: str) -> list:\n\n soup = BeautifulSoup(page_content, 'lxml')\n\n return [img_tag.get('src') for img_tag in soup.findAll('img')]", "def img_alt_src(html_source):\n # =============================================================================\n # Beautiful soup\n # =============================================================================\n \n bs = BeautifulSoup(html_source, 'html.parser')\n \n #Getting all Alt text from HTML \n alt_txt = [] \n img_url = []\n for img in bs.find_all('img', alt=True):\n try:\n print(img['alt'])\n print(img['src'])\n a = img['alt']\n s = img['src']\n alt_txt.append(a)\n img_url.append(s)\n except:\n print('pass')\n \n \n cleaning(alt_txt, img_url)", "def get_img_url_from_html(self, html):\n q = pq(html)\n img_url = q(\"#img\").attr('src')\n return img_url", "def soup_process_email(input):\r\n soup = BeautifulSoup(input,'html.parser')\r\n return_list=[]\r\n # print (soup.prettify())\r\n for img in soup.find_all('img'):\r\n if '_display' in img.get('src'):\r\n trimmed_link=img.get('src').replace('_display','')\r\n # avoid duplicates\r\n if trimmed_link not in return_list and 'video_large' not in trimmed_link:\r\n return_list.append(trimmed_link)\r\n if 'video_large_display' in img.get('src'): \r\n #process the video and append it to the list of urls to process\r\n return_list.append( (soup_process_video(img.parent.get('href')))) \r\n return (return_list)", "def img_urls(self, media, type = \"low_resolution\"):\n\n imgs = {}\n\n for item in media:\n if item[\"type\"] != \"image\":\n continue\n\n imgs[item[\"id\"]] = item[\"images\"][type][\"url\"]\n\n return imgs", "def get_images(self):\n # test\n for it in self.xml.iterfind('image'):\n print(it)\n\n elements = []\n els = self.xml.findall('image')\n for el in els:\n elements.push(el.find('src')[0])\n els = self.xml.findall('full_picture')\n elements = elements + els\n self.__download_(elements)", "def parse_inline_attachments(self, post_html):\n if 'inline-attachment' not in post_html:\n return []\n self.post_html = post_html\n self.p = PyQuery(self.post_html)\n\n attachment_dicts = []\n attachment_dicts += self.parse_s_thumbnails()\n attachment_dicts += self.parse_s_image()\n attachment_dicts += self.parse_s_file()\n attachment_dicts += self.parse_s_wm_file()\n attachment_dicts += self.parse_s_flash_file()\n attachment_dicts += self.parse_s_quicktime_file()\n attachment_dicts += self.parse_s_rm_file()\n\n #print('parse_inline_attachments() attachment_dicts: {0!r}'.format(attachment_dicts))\n return attachment_dicts", "def getBody(HTMLstring, png_list):\n\n # Next, we generate all the rows but the last one...\n while len(png_list) > 4:\n HTMLstring += '<div class=\"row\">'\n for i in range(4):\n HTMLstring += ('''<div class=\"col-xs-3 imgbox\">\n <img class=\"img-responsive\" src=\"''' \n + png_list[i] + '''\" /><h5 class=\"center\">''' + png_list[i]\n + \"</h5></div>\")\n HTMLstring += \"</div>\"\n png_list = png_list[4:]\n \n # We obtain the last row by popping what remains.\n HTMLstring += '<div class=\"row\">'\n while len(png_list) > 0:\n png_file = png_list.pop(0)\n HTMLstring +=('''<div class=\"col-xs-3 imgbox\">\n <img class=\"img-responsive\" src=\"''' \n + png_file + '''\" /><h5 class=\"center\">''' + png_file\n + \"</h5></div>\")\n HTMLstring += \"</div>\"\n return HTMLstring", "def get_img_ref_from_attrs(attrs):\n\n for attr in attrs:\n if attr[0]== 'src':\n if isImage(attr[1]):\n list_of_img_refs.append(attr[1])\n\n if attr[0] == 'href':\n if isImage(attr[1]):\n list_of_img_refs.append(attr[1])", "def get_images(self,soup,Images):\n \n img=soup.find_all('a',href=re.compile(\"/photo.php?fbid=\"))\n img1=soup.find_all('a',href=re.compile(\"/photo\"))\n m=' '\n if img !=[]:\n img_href='https://www.facebook.com'+img[0]['href']\n m+=img_href+'\\n'\n \n elif img1 !=[]:\n img_href='https://www.facebook.com'+img1[0]['href']\n m+=img_href+'\\n'\n \n else:\n img=soup.find_all('a',href=re.compile(\"pcb\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n' \n \n \n else:\n img=soup.find_all('a',href=re.compile(\"photos\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n'\n \n Images.append(m)\n \n return Images", "def replfunc(self, match):\n url = match.group(1)\n imgformat = url.split('.')[-1]\n if url.startswith('http'):\n data = urlopen(url).read()\n elif url.startswith('data'):\n img = '<img src=\"' + url + '\" ' + match.group(2) + ' />'\n return img\n else:\n with open(url, 'rb') as f:\n data = f.read()\n\n self.log.info(\"embedding url: %s, format: %s\" % (url, imgformat))\n b64_data = base64.b64encode(data).decode(\"utf-8\")\n if imgformat == \"svg\":\n img = '<img src=\"data:image/svg+xml;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n elif imgformat == \"pdf\":\n img = '<img src=\"data:application/pdf;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n else:\n img = '<img src=\"data:image/' + imgformat + \\\n ';base64,' + b64_data + '\" ' + match.group(2) + ' />'\n return img" ]
[ "0.6578264", "0.6405035", "0.636044", "0.6353167", "0.625685", "0.61547244", "0.61008567", "0.60811025", "0.6048114", "0.5902244", "0.5867286", "0.5780136", "0.5753646", "0.57383347", "0.57188696", "0.56709164", "0.5641198", "0.5579539", "0.55609024", "0.55588067", "0.5557961", "0.55546075", "0.55428463", "0.54822284", "0.5472429", "0.5441379", "0.5432452", "0.540794", "0.54047173", "0.5396674" ]
0.76466244
0
Collect attachment contents from paths or urls.
def collect_attachments(self, paths_or_urls: Iterable[str]) -> List[Tuple[str, str, str, bytes]]: attachments = [] same_content = [] # type: List[bytes] for src in paths_or_urls: try: content = self.load_file(src) except ImageNotFound as err: self.log_error(err) self.conditionally_raise(err) continue content_hash = hashlib.md5(content).digest() if content_hash in same_content: continue same_content.append(content_hash) maintype, subtype = self._get_mime_type(src) filename = os.path.basename(src) attachments.append((maintype, subtype, filename, content)) return attachments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_attachments(request):\n attachments = []\n for attachment in request.files.getlist('attachment'):\n attachments.append(Attachment(attachment.filename, attachment))\n return attachments", "def attachments(self):\n for part in self.email.walk():\n filename = part.get_filename()\n if filename:\n yield {\n 'type': part.get_content_type(),\n 'name': filename,\n 'content': part.get_payload()\n }", "def download_attachment(self, msg):\n path = None\n for part in msg.walk():\n if part.get_content_type() == 'application/pdf':\n\n time_prefix = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n filename = time_prefix+\"-\"+part.get_filename()\n path = os.path.join(self._DOWNLOAD_FOLDER, filename)\n\n if not os.path.isfile(path):\n with open(path, 'wb') as fb:\n fb.write(part.get_payload(decode=True))\n\n self._processed = True\n return path, self.get_company(msg['From'], msg['To'])", "def collect(self, paths):\r\n raise NotImplementedError()", "def attachments(self, val: list):\n self._attachments = []\n if val is not None:\n for item in val:\n if isinstance(item, Attachment):\n self._attachments.append(item)", "def parse_content(content):\n attachments = []\n body = None\n html = None\n\n for part in content.walk():\n if part.get('Content-Disposition') is not None:\n decoded_data = decode_attachment(part)\n\n attachment = parse_attachment(part)\n if attachment:\n attachments.append(attachment)\n elif part.get_content_type() == \"text/plain\":\n if body is None:\n body = \"\"\n body += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n elif part.get_content_type() == \"text/html\":\n if html is None:\n html = \"\"\n html += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n # return the parsed data\n return {\n 'body': body,\n 'html': html,\n 'filename': decoded_data['filename']\n # 'attachments': attachments\n }", "def get_and_send_attachments(self, session, mid, message_payload_parts, context, m_chat_id):\r\n\r\n store_dir_1 = os.getcwd()\r\n\r\n for part in message_payload_parts:\r\n if part['filename']:\r\n attachment_id = part['body']['attachmentId']\r\n\r\n response = session.get(f'https://www.googleapis.com/gmail/v1/users/me/'\r\n f'messages/{mid}/attachments/{attachment_id}')\r\n\r\n data = response.content\r\n encoded_data_dict = ast.literal_eval(data.decode('utf-8'))\r\n file_data = base64.urlsafe_b64decode(encoded_data_dict['data'].encode('UTF-8'))\r\n\r\n path = os.path.join(store_dir_1, part['filename'])\r\n\r\n # запись данных в файловую систему, чтение, отправка и удаление\r\n with open(path, 'wb') as file_object:\r\n file_object.write(file_data)\r\n with open(path, 'rb') as f:\r\n context.bot.send_document(m_chat_id, f)\r\n os.remove(path)", "def attachments(self):\n return self.properties.get('attachments',\n AttachmentCollection(self.context, ResourcePath(\"attachments\", self.resource_path)))", "def _extract_inline_attachments(doc, files):\n for attr, f in files.items():\n if f.b64:\n data = f.file.replace('\\n', '')\n else:\n data = base64.encodestring(f.file.read()).replace('\\n','')\n f.file.close()\n del f.file\n del f.b64\n del f.inline\n del f.doc_id\n doc.setdefault('_attachments',{})[f.id] = {'content_type': f.mimetype,'data': data}", "def get_attachments(service, user_id, msg_id, save_path):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n if 'parts' not in message['payload']:\n if message['payload']['body']['size'] > 0:\n print(\"Downloading single-part attachment...\")\n file_data = base64.urlsafe_b64decode(message['payload']['body']['data'].encode('UTF-8'))\n path = ''.join([save_path, sanitize_string(message['snippet'][0:70])])\n write_file_to_location(file_data, path)\n elif 'parts' in message['payload']:\n for part in message['payload']['parts']:\n print(\"Downloading multi-part attachment...\")\n if part['filename']:\n data = get_data_from_part(service, user_id, msg_id, part)\n file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))\n path = ''.join([save_path, part['filename']])\n write_file_to_location(file_data, path)\n # Nothing to download\n else:\n return None\n\n except errors.HttpError as error:\n print(f\"An error occurred: {error}\")\n\n return msg_id", "def attachments(self):\n return self._attachments", "def parse_attachbox_attachments(self, post_html):\n if 'attachbox' not in post_html:\n return []\n self.post_html = post_html\n self.p = PyQuery(self.post_html)\n\n attachment_dicts = []\n attachment_dicts += self.parse_s_thumbnails()\n attachment_dicts += self.parse_s_image()\n attachment_dicts += self.parse_s_file()\n attachment_dicts += self.parse_s_wm_file()\n attachment_dicts += self.parse_s_flash_file()\n attachment_dicts += self.parse_s_quicktime_file()\n attachment_dicts += self.parse_s_rm_file()\n\n #print('parse_attachbox_attachments() attachment_dicts: {0!r}'.format(attachment_dicts))\n return attachment_dicts", "def parse_inline_attachments(self, post_html):\n if 'inline-attachment' not in post_html:\n return []\n self.post_html = post_html\n self.p = PyQuery(self.post_html)\n\n attachment_dicts = []\n attachment_dicts += self.parse_s_thumbnails()\n attachment_dicts += self.parse_s_image()\n attachment_dicts += self.parse_s_file()\n attachment_dicts += self.parse_s_wm_file()\n attachment_dicts += self.parse_s_flash_file()\n attachment_dicts += self.parse_s_quicktime_file()\n attachment_dicts += self.parse_s_rm_file()\n\n #print('parse_inline_attachments() attachment_dicts: {0!r}'.format(attachment_dicts))\n return attachment_dicts", "def attachments(self):\n return [Attachment(part) for part in self._parts]", "def extract(request):\n try:\n files = request.FILES.getlist('myFile')\n msg_data = []\n fs = FileSystemStorage()\n for file in files:\n name = file.name.replace(\" \", \"_\")\n if os.path.exists(settings.MEDIA_ROOT + \"\\\\\" + name):\n os.remove(settings.MEDIA_ROOT + \"\\\\\" + name)\n fs.save(settings.MEDIA_ROOT + \"\\\\\" + name, file)\n msg = extract_msg.Message(settings.MEDIA_ROOT + \"\\\\\" + name)\n msg.save_attachments(customPath=settings.MEDIA_ROOT + \"\\\\\")\n attachments = []\n for i in range(0, len(msg.attachments)):\n attachments.append({\n \"filename\": msg.attachments[i].shortFilename,\n \"filepath\": \"/media/\" + msg.attachments[i].shortFilename\n })\n msg_data.append({\n # \"mainProperties\": msg.mainProperties,\n # \"header\": msg.header,\n \"attachments\": attachments,\n \"filename\": file.name,\n \"filepath\": \"/media/\" + name,\n \"from\": msg.sender,\n \"to\": msg.to,\n \"cc\": msg.cc,\n \"subject\": msg.subject,\n \"date\": msg.date,\n \"body\": msg.body,\n })\n msg.close()\n response = {\n \"response\": \"SUCCESS\",\n \"message\": \"File Uploaded!\",\n \"data\": msg_data\n }\n except:\n response = {\n \"response\": \"FAIL\",\n \"message\": \"Erorr in file uploading!\",\n \"data\": msg_data\n }\n return Response(response)", "def getAttachment(mail, directory=detach_dir):#Download attachment to directory & return filename\n filename = []\n for part in mail.walk():\n if part.get_content_maintype() == 'multipart':\n continue\n if part.get('Content-Disposition') is None:\n continue\n\n filename = part.get_filename()\n att_path = os.path.join(directory, filename)\n\n if not os.path.isfile(att_path) :\n fp = open(att_path, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n\n return filename", "def Get_Attachments(service, userId, msg_id, store_dir):\n try:\n message = service.users().messages().get(userId=userId, id=msg_id).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part['body']['attachmentId']\n ).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))\n else:\n file_data = None\n if file_data:\n #do some staff, e.g.\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def downloadAttachments(self, page,localPath):\n return self.pm_getSpaceManager().downloadAttachments(self._unbox(page), localPath)", "def attachments(self):\n if \"attachments\" in self._prop_dict:\n return AttachmentsCollectionPage(self._prop_dict[\"attachments\"])\n else:\n return None", "def download_submission_attachment(self, url):\n\n r = requests.get(url)\n return r.content", "def get_attachment(self, attachment_name):\n attachment = self.get_attachment_meta(attachment_name)\n with attachment.open() as content:\n return content.read()", "def attachments(self):\r\n return Attachments(self)", "def parse_attachments(root, nodes, cfg):\n\n if ignore_attachments(cfg):\n return\n\n attachments = root.find('Attachments').findall('Attachment')\n\n logger.info('Parsing Attachments.')\n for attachment in attachments:\n attachment_type = attachment.find('attachmentType').text\n location = attachment.find('location').text\n object_id = attachment.find('objectID').text\n\n if is_url(attachment_type):\n nodes[object_id]['URL'] = location\n elif is_path(attachment_type):\n nodes[object_id]['path'] = location", "def collect_documents(self):\n documents = []\n ignored = []\n for path in self.paths:\n try:\n current_document = MAE_Document(path)\n except UnsupportedMIMETypeError as e:\n ignored.append(str(e))\n else:\n documents.append(current_document)\n if ignored:\n print \"Some files were ignored:\"\n for file in ignored:\n print \"\\t%s\" % file\n return documents", "def get_contents(self):\n return force_bytes(self.join_str).join(asset.get_contents() for asset in self._assets)", "def collect_local(self, path, req_tag=True):\n for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:\n if not os.path.isfile(f):\n continue\n self.collect_single(f, req_tag)", "def _get_files(self, paths: List[str]) -> List[Tuple[str, bytes]]:\n pool = multiprocessing.dummy.Pool(self._processes)\n return pool.map(self._get_file, paths) # type: ignore", "def attachments(self, attachments):\n\n self._attachments = attachments", "def get_attachment(dataset, question_value, main_key=\"_attachments\"):\n if question_value is not None:\n for attachment in dataset.get(main_key, []):\n if attachment.get(\"filename\", \"\").endswith(question_value):\n return attachment\n return None", "def test_get_file_attachment(self, incident_id, artifact_id, task_id, attachment_id, expected_results_1, expected_results_2):\n\n results = get_file_attachment(mocked_res_client(), incident_id, artifact_id, task_id, attachment_id)\n\n data_content = results[\"content\"]\n file_name = results[\"filename\"]\n assert expected_results_1 == file_name\n assert expected_results_2 == data_content" ]
[ "0.6221126", "0.6146354", "0.60774887", "0.5915002", "0.5801748", "0.5782203", "0.57530725", "0.57097447", "0.56753266", "0.5673091", "0.56211513", "0.5555098", "0.5532392", "0.5503337", "0.54877687", "0.5478201", "0.5437538", "0.5432417", "0.5394351", "0.53382355", "0.5331605", "0.5303731", "0.529349", "0.5282425", "0.52067", "0.5203007", "0.5196416", "0.5186149", "0.5155082", "0.5139793" ]
0.7265279
0
Get C statistics numpy record list, or return None if the file does not exist.
def load_csv_cached(filename='../apps/naive_c_stats.csv', cache={}): if filename in cache: return cache[filename] if not os.path.exists(filename): ans = None else: ans = numpy.recfromcsv(filename) cache[filename] = ans return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def getdata(filename, rw=False, verbose=False):\n sh, dt, header = getheader(filename)\n if verbose:\n print(('Reading %s...\\n%s' % (filename, header)))\n mode = ['c', 'r+']\n return np.memmap(filename, mode=mode[rw], shape=sh, dtype=dt, order='F',\n offset=512)", "def read_statistics(self):\n self.psdata=[]\n self.powerspectra=[]\n self.ds=[]\n self.dsigmasq=[]\n self.dsigma=[]\n self.bsdata=[]\n self.eqbispectra=[]\n self.fNLeq=[]\n\n for sub in range(self.Nsubs):\n self.psdata.append(np.load(self.datadir+self.filebase+\"_\"+str(sub)+\".npy\"))\n self.powerspectra.append(np.trim_zeros(self.psdata[-1][0][1:]))\n self.bsdata.append(np.load(self.datadir+self.fbbispec+\"_\"+str(sub)+\".npy\"))\n self.eqbispectra.append(self.bsdata[-1][0][1:len(self.powerspectra[-1])])\n\n self.ds.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[0])\n self.dsigmasq.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[1])\n self.dsigma = np.array([np.sqrt(dsq) for dsq in self.dsigmasq])\n\n self.klist=np.arange(1, len(self.powerspectra[-1]))*(2.*np.pi/self.Lsub)\n # subtract the mean ds\n self.ds = self.ds - np.mean(self.ds)\n self.fNLeq=np.mean(self.eqbispectra, axis=0)\n self.fNLeqsubs=np.mean(self.eqbispectra, axis=1)\n self.fNLeqds=[]\n for i in range(len(self.eqbispectra)):\n self.fNLeqds.append(np.array([self.ds[i]*self.eqbispectra[i][j] for j in range(45)]))", "def read_cs_stats(cs_stats_file=None):\n\n if not cs_stats_file:\n file_path_name = os.path.join('data', 'piqc_db', 'CS_STATS_DB.txt')\n cs_stats_file = resource_filename(__name__, file_path_name)\n\n cs_stats = defaultdict(_dd)\n\n with open(cs_stats_file, 'r') as fid:\n reader = csv.reader(\n fid, delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n for row in reader:\n corr = Correlation(row[0], (row[1], ), row[2])\n cs_stats[corr]['mode'] = row[3]\n cs_stats[corr]['avg'] = row[4]\n cs_stats[corr]['std'] = row[5]\n cs_stats[corr]['min95'] = row[6]\n cs_stats[corr]['max95'] = row[7]\n return cs_stats", "def get_events(self):\n events_path = self.config.get('filename_rasterization_events')\n if events_path is not None:\n summary = pd.read_csv(events_path)\n return summary\n else:\n return None", "def load_and_get_stats(filename):\n\n import scipy.io.wavfile as siow\n sampling_rate, amplitude_vector = siow.read(filename)\n\n wav_length = amplitude_vector.shape[0] / sampling_rate\n\n return sampling_rate, amplitude_vector, wav_length", "def DataFromFileCache(self,FilePath):\n # dont want to throw an error if the high res doesnt have a separation\n return BinaryHDF5Io.ReadWaveIntoWaveGroup(FilePath,ErrorOnNoSep=False)", "def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret", "def get_stats():\n logger.info(\"Retrieving stats\")\n # create datetime iso format zero hour offset\n current_datetime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n # if filename doesn't exist\n if not path.exists(filename):\n return \"Statistics do not exist\", 404\n\n # get current stats\n with open(filename, 'r') as f:\n currentstats = json.loads(f.read())\n\n # return json\n stats_obj = {}\n stats_obj[\"num_users\"] = currentstats[\"num_users\"]\n stats_obj[\"num_facts\"] = currentstats[\"num_facts\"]\n stats_obj[\"most_popular_tag\"] = currentstats[\"most_popular_tag\"]\n # stats_obj[\"avg_jokes_added_weekly\"] = currentstats[\"avg_jokes_added_weekly\"]\n stats_obj[\"num_subscribed_users\"] = currentstats[\"num_subscribed_users\"]\n stats_obj[\"datetime\"] = current_datetime\n\n logger.debug(stats_obj)\n logger.info(\"Returning stats\")\n return stats_obj, 200", "def read(filename):\n records = Parser.__load_csv(filename)\n return np.array(records)", "def fstat(self):\n return np.squeeze(self._fstat)", "def _readcvcfolder(self):\n # Initialize\n scanrecinfo = ScanRecInfo()\n samptimeset = []\n freqset = []\n try:\n scanrecinfo.read_scanrec(self.filefolder)\n except Exception:\n warnings.warn(\"Could not read session header.\"\n +\" Will try filefolder name...\")\n try:\n obsfolderinfo = self._parse_cvcfolder(self.filefolder)\n except ValueError as er:\n print(er)\n scanrecinfo.scanrecparms = None\n else:\n spw = obsfolderinfo['rcumode']\n nqz = modeparms.rcumode2nyquistzone(spw)\n sbs = modeparms.seqarg2list(obsfolderinfo['subband'])\n freqspec_hi = modeparms.sb2freq(sbs[-1], nqz)\n scanrecinfo.set_scanrecparms(obsfolderinfo['datatype'],\n str(freqspec_hi),\n obsfolderinfo['duration_tot'],\n obsfolderinfo['pointing'],\n obsfolderinfo['integration'])\n scanrecinfo.scanrecparms['rcumode'] = spw\n scanrecinfo.set_stnid(obsfolderinfo['stnid'])\n scanrecinfo.calibrationfile = None\n print(\"Read in filefolder meta.\")\n # Select only data files in folder (avoid CalTable*.dat files)\n ls = os.listdir(self.filefolder)\n filenames = [filename for filename in ls if filename.endswith('.dat')\n and not filename.startswith('CalTable')]\n filenames.sort() # This enforces chronological order\n for cvcfile in filenames:\n cvcdim_t = (os.path.getsize(os.path.join(self.filefolder, cvcfile))\n // self.__get_cvc_dtype().itemsize)\n # Try to get obsfile header\n try:\n (bfilename, _dat) = cvcfile.split('.')\n ymd, hms, ldattype = bfilename.split('_', 2)\n if '_' in ldattype:\n ldattype, _rest = ldattype.split('_',1)\n hfilename = ymd+'_'+hms+'_'+ldattype+'.h'\n hfilepath = os.path.join(self.filefolder, hfilename)\n obsinfo = LDatInfo.read_ldat_header(hfilepath)\n scanrecinfo.add_obs(obsinfo)\n except:\n warnings.warn(\n \"Couldn't find a header file for {}\".format(cvcfile))\n _datatype, t_begin = self._parse_cvcfile(os.path.join(self.filefolder, cvcfile))\n\n # Compute time of each autocovariance matrix sample per subband\n integration = scanrecinfo.get_integration()\n obscvm_datetimes = [None] * cvcdim_t\n for t_idx in range(cvcdim_t):\n t_delta = datetime.timedelta(\n seconds=t_idx * integration\n )\n obscvm_datetimes[t_idx] = t_begin + t_delta\n samptimeset.append(obscvm_datetimes)\n\n # Compute frequency of corresponding time sample\n rcumode = scanrecinfo.get_rcumode()\n nz = modeparms.rcumode2nyquistzone(rcumode)\n if scanrecinfo.get_datatype() == 'acc':\n freqs = modeparms.rcumode2sbfreqs(rcumode)\n else:\n sb = obsinfo.sb\n freq = modeparms.sb2freq(sb, nz)\n freqs = [freq] * cvcdim_t\n freqset.append(freqs)\n return scanrecinfo, filenames, samptimeset, freqset", "def loadFeatureStats():\n print 'Loading feature statistics...'\n featurestats = np.loadtxt(os.path.abspath(args.featurestats), dtype=np.float32)\n print 'Done.'\n return featurestats", "def csvread(file):\r\n thisfile = open(file)\r\n thisreader = csv.reader(thisfile)\r\n filelist = np.array(list(thisreader))\r\n return filelist", "def get_data(stage=0):\n return get_files(stage)[1]", "def return_file_read(_):\n return [\"scorevideo LOG\", \"File: log.mat\"]", "def get_data(self, n=-1): \n try:\n return Profile(os.path.join(self.name, self.files[n]))\n except IndexError:\n return None", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))", "def get_statistics():\n logger.info(\"Started request\")\n if os.path.exists(app_config['datastore']['filename']):\n with open(app_config['datastore']['filename']) as f:\n data = json.loads(f.read())\n\n logging.debug(\"Request data: {}\".format(data))\n logging.info(\"Request completed\")\n\n return data, 200\n else:\n logger.error(\"File not found\")\n return 404", "def read_data(file):\n with rasterio.open(file) as f:\n data = f.read(1)\n profile = f.profile\n return data, profile", "def load_dataset_stats(config):\n filename = None\n if config.data.dataset == 'CIFAR10':\n filename = 'assets/stats/cifar10_stats.npz'\n elif config.data.dataset == 'CELEBA':\n filename = 'assets/stats/celeba_stats.npz'\n elif config.data.dataset == 'LSUN':\n filename = f'assets/stats/lsun_{config.data.category}_{config.data.image_size}_stats.npz'\n else:\n raise ValueError(f'Dataset {config.data.dataset} stats not found.')\n\n with tf.io.gfile.GFile(filename, 'rb') as fin:\n stats = np.load(fin)\n return stats", "def getData(self, filename):\n\n fd = pyfits.open(filename)\n if fd[0].data is None:\n hdu = fd[1]\n else:\n hdu = fd[0]\n header = hdu.header\n if self.type == DOUBLE_PREC:\n data = hdu.data.astype(np.float64)\n else:\n data = hdu.data.astype(np.float32)\n fd.close()\n\n return (data, header)", "def get_data(found_file, created_file):\n\n try:\n fh_f = fits.open(found_file)\n print('Found file has: ', fh_f.info())\n except Exception:\n print(' FATAL ERROR: Unable to open found file ', found_file)\n\n try:\n fh_c = fits.open(created_file)\n print('Created file has: ', fh_c.info())\n except Exception:\n print(' FATAL ERROR: Unable to open created file ', created_file)\n\n try:\n data_f = fh_f['SCI'].data\n except Exception:\n print(' FATAL ERROR: data for found data was expected in SCI extension')\n\n try:\n data_c = fh_c['SCI'].data\n except Exception:\n try:\n data_c = fh_c[0].data\n except Exception:\n print(' FATAL ERROR: created data expected in either SCI or 0 extensions')\n\n return fh_f, fh_c, data_f, data_c", "def get_mcmc_samples(self, filename='mcmc.dat'):\n import numpy as np\n import os\n from .nest2pos import resample_mcmc_chain\n from numpy.lib.recfunctions import stack_arrays\n\n mcmc_samples = []\n for file in os.listdir(self.NS.output_folder):\n if 'mcmc_chain' in file:\n mcmc_samples.append(resample_mcmc_chain(np.genfromtxt(os.path.join(self.NS.output_folder,file), names = True)))\n os.system('rm {0}'.format(os.path.join(self.NS.output_folder,file)))\n\n if not mcmc_samples:\n self.logger.critical('ERROR, no MCMC samples found!')\n return None\n\n # now stack all the mcmc chains\n mcmc_samples = stack_arrays([p for p in mcmc_samples])\n if filename:\n np.savetxt(os.path.join(\n self.NS.output_folder, filename),\n self.mcmc_samples.ravel(),\n header=' '.join(self.mcmc_samples.dtype.names),\n newline='\\n',delimiter=' ')\n return mcmc_samples", "def smartmeter_data():\n path = '/datc/opschaler/smartmeter_data'\n file_paths = np.array(glob.glob(path + \"/*.csv\"))\n\n print('Detected %s smartmeter_data files.' % len(file_paths))\n dwelling_ids = np.array(list((map(lambda x: x[-15:-4], file_paths))))\n\n return file_paths, dwelling_ids", "def get_data():\r\n spatial_expmat = np.load('/home/anniegao/spatial_magan/data/spatial_pca_with_coords.npz')['arr_0']\r\n spatial_expmat[:,100:] *= 5\r\n rna_expmat = np.load('/home/anniegao/spatial_magan/data/rna_pca_sampled.npz')['arr_0']\r\n spatial_pca_components = np.load('/home/anniegao/spatial_magan/data/spatial_pca_100components.npz')['arr_0']\r\n rna_pca_components = np.load('/home/anniegao/spatial_magan/data/rna_pca_100components.npz')['arr_0']\r\n spatial_cluster_labels = np.load('/home/anniegao/spatial_magan/data/spatial_cluster_3_labels_phate.npz')['arr_0']\r\n rna_cluster_labels = np.load('/home/anniegao/spatial_magan/data/rna_cluster_5_labels_sampled.npz')['arr_0']\r\n return spatial_expmat, rna_expmat, spatial_pca_components, rna_pca_components, spatial_cluster_labels, rna_cluster_labels", "def load_cache(self, filename):\n output_df = cudf.read_hdf(filename, key=self.uid)\n return output_df", "def get_datfile(filename):\n if ARGV.get(DEBUG_OPT):\n err_print('Getting datfile from \"{}\"'.format(filename))\n\n try:\n with open(filename, 'rb') as pickle_file:\n try:\n (cache, readlist) = pickle.load(pickle_file)\n pickle_file.close()\n except (EOFError, ValueError):\n (cache, readlist) = ({\"feed\": None, \"max-age\": None, \"last-request\": None}, [])\n except (FileNotFoundError, PermissionError):\n (cache, readlist) = ({\"feed\": None, \"max-age\": None, \"last-request\": None}, [])\n return (cache, readlist)", "def _parse(file_name) -> Tuple[Optional[List[List[float]]], Optional[IOError]]:\n try:\n with open(pkg_resources.resource_filename(__data_pkg__, file_name)) as file_handler:\n next(file_handler)\n return [[float(x) for x in line.split(\" \") if len(x) > 0] for line in file_handler], None\n except IOError as err:\n return None, err", "def read(filename: str)-> List [CrimeStatistics]:\n #return [] #stub\n # Template from htDAP\n \n #loc contains all results read so far\n loc = [] #type List[CrimeStatistics]\n \n with open(filename) as csvfile:\n reader = csv.reader(csvfile)\n next(reader)\n \n \n for row in reader:\n university = row[0].replace(\"4\", \"\")\n campus = parse_campus(row[1])\n enrollment = parse_int(row[2].replace(\",\", \"\"))\n violent_crimes = parse_int(row[3])\n property_crimes = parse_int(row[8])\n arson = parse_int(row[12])\n \n if valid(enrollment):\n cs = CrimeStatistics(university,\n campus,\n enrollment,\n violent_crimes,\n property_crimes,\n arson)\n \n loc.append(cs)\n return loc" ]
[ "0.55165815", "0.54774755", "0.54651594", "0.53451926", "0.5323847", "0.524924", "0.51764786", "0.514426", "0.51365834", "0.5109591", "0.5106836", "0.50748354", "0.50632674", "0.50623465", "0.5045517", "0.5033683", "0.5028954", "0.5026244", "0.502574", "0.5021514", "0.5018565", "0.4996742", "0.49755153", "0.49689394", "0.49625832", "0.49614945", "0.49550417", "0.49542442", "0.4951024", "0.4948098" ]
0.5491351
1
Get the lines of main program logic, excluding various less important information such as imports/comments/tests, and globals (typically used for tests).
def lines(filename, exclude_imports=True, exclude_comments=True, exclude_tests=True, exclude_globals=True, exclude_blank=True, verbose=False, is_c=False, s=None): if s is None: s = open(filename, 'rt').read() L = s.split('\n') # Hack to strip out triple and single quote string lines in a heuristic (unreliable) way, which avoids parsing Cython if not is_c: for i in range(len(L)): if L[i].strip().startswith("'") and L[i].strip().endswith("'"): L[i] = '' i = 0 while i < len(L): found = False for triple_quote in ['"""', "'''"]: if L[i].strip().startswith(triple_quote): L[i] = L[i].strip()[3:] for j in range(i, len(L)): if triple_quote in L[j]: found = True L[j] = '' if found: break i = j+1 if not found: i += 1 else: begin_comment = '/*' end_comment = '*/' i = 0 while i < len(L): found = False if begin_comment in L[i]: rest = L[i][L[i].index(begin_comment)+len(begin_comment):] L[i] = L[i][:L[i].index(begin_comment)] if end_comment in rest: found = True i += 1 else: for j in range(i+1, len(L)): if end_comment in L[j]: found = True L[j] = L[j][L[j].index(end_comment)+len(end_comment):] else: L[j] = '' if found: break i = j + 1 if not found: i += 1 # util.print_header('Lines before exclude_tests:' + filename, '\n'.join(L)) # Hack to strip out def test() and other methods in a heuristic (unreliable) way, which avoids parsing Cython if exclude_tests: # Also exclude makeColorMatrix so that our camera pipe is apples-to-apples comparable with reported lines in Halide paper if not is_c: methods = 'test run_test_all mandelbrot_gray mandelbrot_color composite_numpy composite_numexpr makeColorMatrix'.split() else: methods = ['int main', 'void main'] i = 0 while i < len(L): L_i_strip = L[i].strip() if ((not is_c and (any(L_i_strip.startswith('def ' + method) for method in methods) or any(L_i_strip.startswith('cdef ' + method) for method in methods))) or (is_c and (any(L_i_strip.startswith(method) for method in methods)))): L[i] = '' for j in range(i+1, len(L)): L_j_strip = L[j].strip() c_ok = True if is_c: c_ok = L_j_strip != '{' and L_j_strip != '}' if not L[j].startswith(' ') and not L[j].startswith('\t') and not len(L[j].strip()) == 0 and c_ok: break else: L[j] = '' i = j elif (L[i].strip().startswith('test(') or L[i].strip().startswith('run_test_all(')) and not is_c: L[i] = '' i += 1 else: i += 1 # util.print_header('Lines before exclude_imports:' + filename, '\n'.join(L)) if exclude_imports: if not is_c: L = [x for x in L if not x.lstrip().startswith('import') and not x.lstrip().startswith('cimport') and not x.startswith('cdef extern')] else: L = [x for x in L if not x.lstrip().startswith('#include')] # util.print_header('Lines before exclude_comments:' + filename, '\n'.join(L)) if exclude_comments: if not is_c: L = [x for x in L if not x.lstrip().startswith('#') and not x.strip() == 'pass'] else: L = [x for x in L if not x.lstrip().startswith('//')] # util.print_header('Lines before exclude_globals:' + filename, '\n'.join(L)) if exclude_globals and not is_c: L = [x for x in L if (x.startswith(' ') or x.startswith('\t') or x.startswith('def') or x.startswith('cdef')) and (not x.lstrip().startswith('has_'))] # util.print_header('Lines before exclude_blank:' + filename, '\n'.join(L)) if is_c: # Also exclude makeColorMatrix so that C camera pipe is apples-to-apples comparable with reported lines in Halide paper L = [x for x in L if not x.lstrip().startswith('matrix_3200') and not x.lstrip().startswith('matrix_7000')] if exclude_blank: L = [x for x in L if not len(x.strip()) == 0] if verbose: util.print_header('Final lines for:' + filename, '\n'.join(L)) return len(L)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lines_without_stdlib(self):\n prev_line = None\n current_module_path = inspect.getabsfile(inspect.currentframe())\n for module_path, lineno, runtime in self.lines:\n module_abspath = os.path.abspath(module_path)\n if not prev_line:\n prev_line = [module_abspath, lineno, runtime]\n else:\n if (not check_standard_dir(module_path) and\n module_abspath != current_module_path):\n yield prev_line\n prev_line = [module_abspath, lineno, runtime]\n else:\n prev_line[2] += runtime\n yield prev_line", "def stripped_lines(lines, ignore_comments, ignore_docstrings, ignore_imports):\n if ignore_imports:\n tree = astroid.parse(\"\".join(lines))\n node_is_import_by_lineno = (\n (node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom)))\n for node in tree.body\n )\n line_begins_import = {\n lineno: all(is_import for _, is_import in node_is_import_group)\n for lineno, node_is_import_group in groupby(\n node_is_import_by_lineno, key=lambda x: x[0]\n )\n }\n current_line_is_import = False\n\n strippedlines = []\n docstring = None\n for lineno, line in enumerate(lines, start=1):\n line = line.strip()\n if ignore_docstrings:\n if not docstring and any(\n line.startswith(i) for i in ['\"\"\"', \"'''\", 'r\"\"\"', \"r'''\"]\n ):\n docstring = line[:3]\n line = line[3:]\n if docstring:\n if line.endswith(docstring):\n docstring = None\n line = \"\"\n if ignore_imports:\n current_line_is_import = line_begins_import.get(\n lineno, current_line_is_import\n )\n if current_line_is_import:\n line = \"\"\n if ignore_comments:\n line = line.split(\"#\", 1)[0].strip()\n strippedlines.append(line)\n return strippedlines", "def getsourcelines(object):\r\n lines, lnum = findsource(object)\r\n\r\n if ismodule(object): return lines, 0\r\n else: return getblock(lines[lnum:]), lnum + 1", "def get_cpython_lines():\n cpython_url = 'https://github.com/python/cpython/blob/master/Python/pythonrun.c'\n soup = BeautifulSoup(requests.get(cpython_url).text, 'html.parser')\n cpython_soup = soup.find(\"div\",{\"itemprop\":\"text\", \"class\":\"Box-body p-0 blob-wrapper data type-c\"})\n tds = cpython_soup.find_all('td', {\"class\":\"blob-code blob-code-inner js-file-line\"})\n cpython_code_lines = []\n for td in tds:\n cpython_code_lines.append(\n ''.join([thing.text+\" \" for thing in td.find_all('span') if len(thing.text)>1]))\n return(cpython_code_lines)", "def getsourcelines(object):\n lines, lnum = findsource(object)\n\n if inspect.ismodule(object): return lines, 0\n else: return inspect.getblock(lines[lnum:]), lnum + 1", "def wrapped_getlines(filename, globals):\n lines = orig(filename, globals)\n source = self.format_source(\"\".join(lines))\n\n if sys.version_info < (3,):\n source = self.try_to_encode(source)\n\n return source.splitlines(True)", "def getDebugLines(self):\n return self._get_table_info() + self._get_avatar_info() + self._get_player_info()", "def get_source_lines(self, filename, lineno, context=0):\n if not filename or not lineno:\n return ''\n\n return ''.join([' ' + linecache.getline(filename, line) for line in range(lineno - context, lineno + context + 1)])", "def main():\n # get_history_using_HTTP()\n # merge_files()\n # remove_lines()\n remove_duplicated_lines()", "def find_programme_block(lines):\n top_index = [idx for idx, line in enumerate(lines[:MAX_HEADER_HEIGHT]) \\\n if PROGRAMME_RE.match(line)]\n bottom_index = [idx for idx, line in enumerate(lines[-MAX_FOOTER_HEIGHT:]) \\\n if FOOTER_RE.match(line)]\n if len(top_index) and len(bottom_index):\n return [i for i in lines[top_index[0]+1:-(MAX_FOOTER_HEIGHT-bottom_index[0])] \\\n if i.strip()]\n return None", "def main_code():\n pass", "def get_extra_attestation(self):\n main = sys.modules['__main__']\n main_source = inspect.getsource(main)\n d = hashlib.sha1()\n d.update(main_source)\n return base64.b64encode(d.digest())", "def _get_multiline(self):\n lines = []\n line = self._get_line()\n lines.append(line)\n if line[3:4] == \"-\":\n code = line[:3]\n while 1:\n nextline = self._get_line()\n lines.append(nextline)\n if nextline[:3] == code and nextline[3:4] != \"-\":\n break\n return lines", "def preprocess_main():", "def lines_of_code(project: Project) -> int:\n ret = sh.cloc(\"--quiet\", \"--include-lang=Python\", \"--yaml\", str(project.root))\n ret_obj = list(yaml.safe_load_all(str(ret)))\n return ret_obj[0][\"Python\"][\"code\"]", "def crunch(self):\n while True:\n lst = self.want_line(r'\\s*\\.file\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.globl\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.ident\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.section\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.type\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.size\\s+(.*)')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(bss)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(data)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n lst = self.want_line(r'\\s*\\.(text)\\s+')\n if lst:\n self.erase(lst[0])\n continue\n break\n if osarch_is_amd64():\n self.crunch_amd64(lst)\n elif osarch_is_ia32():\n self.crunch_ia32(lst)\n self.__tag = None", "def dead_code():\n with safe_cd(SRC):\n exclusions = \"--exclude *settings.py,migrations/,*models.py,*_fake.py,*tests.py,*ui/admin.py\"\n if IS_TRAVIS:\n command = (\n \"{0} vulture {1} {2}\".format(PYTHON, PROJECT_NAME, exclusions)\n .strip()\n .split()\n )\n else:\n command = (\n \"{0} vulture {1} {2}\".format(PIPENV, PROJECT_NAME, exclusions)\n .strip()\n .split()\n )\n\n output_file_name = \"dead_code.txt\"\n with open(output_file_name, \"w\") as outfile:\n env = config_pythonpath()\n subprocess.call(command, stdout=outfile, env=env)\n\n cutoff = 120\n num_lines = sum(1 for line in open(output_file_name) if line)\n if num_lines > cutoff:\n print(\n \"Too many lines of dead code : {0}, max {1}\".format(num_lines, cutoff)\n )\n exit(-1)", "def _load_program():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), PROGRAM_TXT)\n f = open(filepath, 'r')\n program = f.read()\n f.close()\n return program.strip().split('\\n')", "def readlines(self):\n return [\"\"] + self.get(\"1.0\", END).split(\"\\n\")[:-1]", "def remove_curl_debug_lines(text: str) -> str:\n lines = text.split(\"\\n\")\n lines = [line for line in lines if not line.startswith(\"**\")]\n return \"\\n\".join(lines)", "def find_lines(self):\n return []", "def remove_warnings(self, program):\n lines = program.split(\"\\n\")\n clean = []\n for line in lines:\n if line.startswith(\"Dafny program verifier finished\"):\n pass\n elif re.search(\"Warning: .*No terms found\", line):\n pass\n elif re.search(\"Warning: the type of the other operand\", line):\n pass\n else:\n clean.append(line)\n return \"\\n\".join(clean)", "def cmdline_main(config: Config) -> int | None:\n if config.option.generate_missing:\n return show_missing_code(config)\n return None # Make mypy happy", "def loc():\n file_types = (\n ['Python', 'py', '#']\n )\n\n click.echo('Lines of code\\n-------------')\n\n click.echo(\"{0}: {1}\".format(file_types[0], count_locs(file_types[1],\n file_types[2])))\n\n return None", "def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]", "def pre_mutation(context):\n line = context.current_source_line.strip()\n if context.current_line_index != 0:\n prev_line = context.source_by_line_number[context.current_line_index - 1].strip()\n else:\n prev_line = \"\"\n\n if line.startswith(\"logger.\") or prev_line.startswith(\"logger.\"):\n context.skip = True\n if line.startswith(\"logger = structlog\"):\n context.skip = True\n if line.startswith(\"cls.__doc__\"):\n context.skip = True\n\n # This file is copied verbatim and is not tested\n if context.filename.endswith(\"crypt.py\"):\n context.skip = True", "def get_entry_points():\n ret = []\n\n # global roots\n ret.extend(get_globals())\n # dynamic global roots\n ret.extend(get_dyn_globals())\n # stacks and local roots\n ret.extend(walk_ocaml_stacks())\n\n # global C roots\n ret.extend(get_global_roots(\"caml_global_roots\"))\n ret.extend(get_global_roots(\"caml_global_roots_young\"))\n ret.extend(get_global_roots(\"caml_global_roots_old\"))\n\n # finalised values\n ret.extend(get_final_roots())\n\n # scan_roots_hook\n traverse_scan_roots_hook()\n return ret", "def detect_rust(src):\n lines = []\n in_code_block = False\n start_of_code_block = 0\n\n for i, line in enumerate(src.splitlines()):\n if '```rust' in line:\n start_of_code_block = i\n in_code_block = True\n elif '```' in line and in_code_block:\n lines.append((start_of_code_block + 1, i - 1))\n in_code_block = False\n\n return lines", "def remove_debug_line_numbers(contents):\n lines = contents.splitlines()\n # split each line on \":\"\n lines = [l.split(\":\", 3) for l in lines]\n # join each line back together while ignoring the\n # 3rd column which is the line number\n lines = [len(l) > 3 and \":\".join(l[3:]) or l for l in lines]\n return \"\\n\".join(lines)", "def program_data(progf):\r\n if os.path.exists(progf):\r\n prog = \"\"\r\n for line in open(progf, \"r\", encoding=\"utf-8\"):\r\n line = line.split(\"#\")[0]\r\n prog += line\r\n prog = prog.split()\r\n return prog" ]
[ "0.67089903", "0.58544457", "0.5778849", "0.5775647", "0.57722783", "0.56521636", "0.563037", "0.5536816", "0.551413", "0.5475512", "0.54629", "0.54422885", "0.54418725", "0.5426173", "0.5407686", "0.53450173", "0.53443503", "0.5323852", "0.5322268", "0.5320133", "0.5281814", "0.5270845", "0.52692586", "0.5251834", "0.52389246", "0.5235897", "0.5234345", "0.51921415", "0.5181613", "0.5168568" ]
0.6400236
1
stimate distance given estimated sensor locations.
def compute_distance_with_sensor_and_obj_loc(sensor_loc, obj_loc): estimated_distance = scipy.spatial.distance.cdist(obj_loc, sensor_loc, metric='euclidean') return estimated_distance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance", "def test_sense_distance(self):\n\n\t\tmeasurements = [29, 29, 28]\n\t\tself.driver.us_dist.side_effect = lambda x: measurements.pop()\n\t\texpected_measurement = int(ultrasonic_sensor_error(29))\n\n\t\tself.assertEqual(self.s.sense_distance(60), expected_measurement)\n\t\tself.mount.move.assert_called_once_with(x=60)", "def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()", "def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames", "def estimate_dists(self) -> np.array:\n return np.array(\n list(\n chain.from_iterable(\n model.estimate_dist(self.featurized_data)\n for model in self.models\n )\n )\n )", "def distances(self):", "def estimated_distance(self, log=False):\n\t\t\n\t\tx0 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 0.0)\n\t\tx1 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 1.0)\n\n\t\t# Derivative at x=0 and x=1\n\t\tu = x0\n\t\tv = x1\n\t\t# Initial height at x=0\n\t\ty = abs(self.velocity)\n\n\t\tif log:\n\t\t\tprint(f'u: {u}, v: {v}, y: {y}\\nEstimated distance: {get_positive_area(u, v, y)}\\n')\n\t\t\n\t\treturn get_positive_area(u, v, y)", "def calc_distance(self, observation):\n actual_obs = observation[0]\n scrn_player = actual_obs.observation.feature_screen.player_relative\n scrn_select = actual_obs.observation.feature_screen.selected\n scrn_density = actual_obs.observation.feature_screen.unit_density\n\n state_added = scrn_select + scrn_density\n\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n\n # first step\n if np.sum(scrn_select) == 0:\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n # marine behind beacon\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n else:\n # normal navigation\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 3), axis=0).round()\n\n beacon_center = np.mean(self.xy_locs(scrn_player == 3), axis=0).round()\n #\n # print(state_added)\n # print(\"---- Marine {} | {} Beacon ----\".format(marine_center, beacon_center))\n # time.sleep(0.2)\n distance = math.hypot(beacon_center[0] - marine_center[0],\n beacon_center[1] - marine_center[1])\n\n return beacon_center, marine_center, distance", "def distance_to_origin(self):\n\n self.D = edist(self.isomap,\n np.zeros([1, self.isomap.shape[1]])).flatten()", "def test_EstimateDistances(self):\n d = EstimateDistances(self.al, JC69())\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n # excercise writing to file\n d.writeToFile('junk.txt')\n try:\n os.remove('junk.txt')\n except OSError:\n pass # probably parallel", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def get_distances(self):\n return DistanceSensors(*self.bot_client.send_command(_Command.GetDistances))", "def calculate_vars(data, lat, lon):\n # Keep track of running distance and time calculations\n distance_to_dest = 0.0\n time_estimate = 0.0\n\n # Calculate from starting dest to first point in data\n user_coords = (lat, lon)\n first_path_coords = (data[0][\"lat\"], data[0][\"lon\"])\n first_distance = geopy.distance.distance(user_coords, first_path_coords).miles\n distance_to_dest += first_distance\n time_estimate += first_distance * 20 # 3mph walking speed\n\n # Calculate for all other points\n for i in range(1, len(data) - 1):\n this_coords = (data[i][\"lat\"], data[i][\"lon\"])\n next_coords = (data[i + 1][\"lat\"], data[i + 1][\"lon\"])\n\n distance = geopy.distance.distance(this_coords, next_coords).miles\n distance_to_dest += distance\n time_estimate += distance * 20 # 3mph walking speed\n\n # Round distance and time estimates\n distance_to_dest = round(distance_to_dest, 1)\n time_estimate = round(time_estimate)\n\n return distance_to_dest, time_estimate", "def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)", "def distances(reference_location, locations):\n differences = np.array(locations) - np.array(reference_location)\n distances_ = np.sqrt(np.sum(differences**2, axis=1))\n return distances_", "def _update_distance_(self):\n pass", "def test_distances(self):\n for p1, p2, distance in DISTANCES:\n calculated = p1.approximate_distance_meters(p2)\n self.assertAlmostEqual(distance, calculated, delta=5)", "def distance_train(self):\n\n for self.epoch in range(self.args.epochs):\n # switch to train mode\n self.set_train()\n data_loading_time = 0\n gpu_time = 0\n before_op_time = time.time()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n data_loading_time += (time.time() - before_op_time)\n before_op_time = time.time()\n # -- PUSH INPUTS DICT TO DEVICE --\n self.inputs_to_device(inputs)\n\n # -- DISTANCE ESTIMATION --\n outputs, features = self.predict_distances(inputs)\n\n # -- POSE ESTIMATION --\n outputs.update(self.predict_poses(inputs, features))\n\n # -- PHOTOMETRIC LOSSES --\n losses, outputs = self.photometric_losses(inputs, outputs)\n\n # -- COMPUTE GRADIENT AND DO OPTIMIZER STEP --\n self.optimizer.zero_grad()\n losses[\"distance_loss\"].mean().backward()\n self.optimizer.step()\n\n duration = time.time() - before_op_time\n gpu_time += duration\n\n if batch_idx % self.args.log_frequency == 0:\n self.log_time(batch_idx, duration, losses[\"distance_loss\"].mean().cpu().data,\n data_loading_time, gpu_time)\n self.distance_statistics(\"train\", inputs, outputs, losses)\n data_loading_time = 0\n gpu_time = 0\n\n self.step += 1\n before_op_time = time.time()\n\n self.lr_scheduler.step()\n\n if (self.epoch + 1) % self.args.save_frequency == 0:\n self.save_model()\n\n print(\"Training complete!\")", "def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01", "def __calculate_estimation(self):\r\n estimation = 0.0\r\n for index_cluster in range(0, len(self.__clusters)):\r\n cluster = self.__clusters[index_cluster]\r\n index_medoid = self.__current[index_cluster]\r\n for index_point in cluster:\r\n estimation += euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[index_medoid])\r\n\r\n return estimation", "def distance(known_loc,found_loc,N_vars,):\n undersqrt=np.zeros(N_vars)\n for i in (np.arange(N_vars)):\n undersqrt[i] =(known_loc[i]-found_loc[i])**2\n dist = np.sqrt(sum(undersqrt))\n\n return dist", "def test_get_distance(self):\n meters = location_util.distance(COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1])\n self.assertAlmostEqual(meters / 1000, DISTANCE_KM, places=2)", "def __calculate_estimation(self):\n estimation = 0.0\n for index_cluster in range(0, len(self.__clusters)):\n cluster = self.__clusters[index_cluster]\n index_medoid = self.__current[index_cluster]\n for index_point in cluster:\n estimation += euclidean_distance_square(\n self.__pointer_data[index_point],\n self.__pointer_data[index_medoid],\n )\n\n return estimation", "def test_stations_by_distance():\n station_list = build_station_list()\n #test for stations closest to cambridge city coordinates\n station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218))\n output = [(station.name, distance) for (station, distance) in station_list_sort]\n for n in range(1, len(station_list)):\n #make sure that the distance of the previous station to the point is less than the next one in the list\n assert output[n-1][1] <= output[n][1]", "def distances(self):\n self._sort_measurements()\n return self._distances", "def augmentDistance(self):\n\n for key,value in self._models.iteritems():\n src=[float(i) for i in value['src'].replace('#',' ').split()]\n tgt=[float(i) for i in value['tgt'].replace('#',' ').split()]\n\n dist = haversine((np.mean(src[0:2]),np.mean(src[2:])),\n (np.mean(tgt[0:2]),np.mean(tgt[2:])),\n miles=True)\n self._models[key]['distance'] = dist\n\n return", "def calculate_distances(drives):\n for d in drives:\n d.set_distance()", "def calculateDistances(df):\n return", "def cal_dist(origs,dests):\n\tradius = 6371.009 # km\n\tif origs.ndim:\n\t\tlat1, lon1 = origs\n\t\tlat2, lon2 = dests\n\telse:\n\t\tlat1 = origs[0,:]\n\t\tlon1 = origs[1,:]\n\t\tlat2 = dests[0,:]\n\t\tlon2 = dests[1,:]\n\tdlat = (lat2-lat1) / 180. * np.pi\n\tdlon = (lon2-lon1) / 180. * np.pi\n\ta = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(lat1 / 180. * np.pi) \\\n\t\t* np.cos(lat2 / 180. * np.pi) * np.sin(dlon/2) * np.sin(dlon/2)\n\tc = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))\n\treturn radius * c", "def estimate(self, xDest, yDest):\n dx = xDest - self.xPos\n dy = yDest - self.yPos\n # Euclidian Distance\n d= np.sqrt(dx**2 + dy**2)\n \n # Octile Distance\n #d = max(dx,dy)+.4*min(dx,dy)\n \n return(d)" ]
[ "0.62089807", "0.597473", "0.59654075", "0.5948641", "0.589891", "0.58918214", "0.58761024", "0.5871087", "0.5824022", "0.5823167", "0.5822607", "0.5809026", "0.57345897", "0.57335913", "0.5690579", "0.56877965", "0.5680121", "0.56717455", "0.56000197", "0.5594889", "0.55837417", "0.5577344", "0.55768746", "0.5574946", "0.556474", "0.5551849", "0.5535584", "0.552888", "0.5528466", "0.54993737" ]
0.6066834
1
Find and read the observing log file.
def load_obslog(pattern, fmt='obslog', verbose=True): # find observing log in the current workin gdirectory logname_lst = [fname for fname in os.listdir(os.curdir) if re.match(pattern, fname)] if len(logname_lst)==0: print('No observation log found') return None elif len(logname_lst)==1: select_logname = logname_lst[0] elif len(logname_lst)>1: nlog = len(logname_lst) # maximum length of log filename maxlen = max([len(logname) for logname in logname_lst]) # maximum length of log number maxdgt = len(str(nlog)) fmt_string = (' - [{{:{:d}d}}] {{:{:d}s}} ' 'Last modified in {{:s}}').format(maxdgt, maxlen) # build a list of (filename, modified time) nametime_lst = [(logname, os.path.getmtime(logname)) for logname in logname_lst] # sort with last modified time nametime_lst = sorted(nametime_lst, key=lambda v:v[1]) # print lognames one by one for i, (logname, mtime) in enumerate(nametime_lst): t = time.localtime(mtime) time_str = '{0:02d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format( *t) print(fmt_string.format(i, logname, time_str)) # repeat the loop until user give a valid logname ID while(True): string = input('Select an observing log: ') if string.isdigit() and int(string) < nlog: select_logname = nametime_lst[int(string)][0] break elif len(string.strip())==0: print('Warning: no logfile selected') else: print('Warning: {} is not a valid log ID'.format(string)) else: pass if verbose: message = 'Load obslog file: "{}"'.format(select_logname) print(message) logtable = read_obslog(select_logname, fmt=fmt) return logtable
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_linelog():", "def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values", "def load(logFile):\n pass #TODO", "def get_log():\n set_ctime()\n f = open(log_path, 'r')\n o = get_offset()\n f.seek(int(o))\n return f", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def get_added_logs(self):\n with open(self.path, \"r\") as log_file:\n log_file.seek(self.position)\n contents = log_file.read()\n self.position = log_file.tell()\n return contents", "def readtxt(obslog):\n\n logger = log.getLogger('obslog.readtxt')\n\n if not os.path.exists(obslog):\n logger.error('Cannot access %s', obslog)\n raise SystemExit\n\n logger.info('Reading %s', obslog)\n\n with open(obslog) as f: # Since we will have to go through the data twice, read the whole file at once.\n data = f.readlines()\n\n header = ['Observation ID', 'Data Labels', 'File Numbers', 'Dataset UT', 'Target Name', 'Filters', 'Slit',\n 'Grating/Wavelength', 'Camera/Prism', 'ExpTime/LNR/Coadds', 'ACQ']\n\n pattern = dict() # Enforce formatting rules to avoid parsing comments as data:\n pattern['Observation ID'] = re.compile(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}-[0-9]+$')\n pattern['Data Labels'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['File Numbers'] = re.compile(r'[0-9]+-*[0-9]*') # 1, 2-3, 45-67, 890-1234\n pattern['Dataset UT'] = re.compile(r'^[0-9]{2}:[0-9]{2}:[0-9]{2}$') # 09:58:15\n pattern['Target Name'] = re.compile(r'[a-zA-Z0-9_-]+') # Match any string\n pattern['Filters'] = re.compile(r'[A-Z0-9\\-]+') # H, XD, H2, X, J, H\n pattern['Slit'] = re.compile(r'[a-zA-Z0-9]+') # 0.675, ACQ, LgPin\n pattern['Grating/Wavelength'] = re.compile(r'[0-9]{2,3}/[0-9]\\.[0-9]{2}') # 32/1.65, 111/1.68\n pattern['Camera/Prism'] = re.compile(r'[A-Z]{2}/[A-Z]{3}') # LB/MIR, SB/SXD\n pattern['ExpTime/LNR/Coadds'] = re.compile(r'[0-9]+\\.[0-9]/[0-9]+/[0-9]+') # 0.2/1/25, 300.0/32/1\n pattern['ACQ'] = re.compile(r'^Y*$') # Y or ''\n\n indx = {}\n for line in data:\n if 'Electronic Observing Log' in line:\n date = line.split()[-1][7:]\n logger.debug('Log date: %s', date)\n if line[0:14] == 'Observation ID': # This defines the start of the header row\n for h in header:\n indx[h] = line.find(h) # Find where each column starts\n break # No need to go farther\n\n width = {} # Find the width of each row\n for i in range(len(header) - 1): # This requires that 'header' be an ordered array (not a dictionary)\n width[header[i]] = indx[header[i + 1]] - indx[header[i]]\n width[header[i+1]] = 1 # The ACQ field is either 'Y' or blank\n\n val = {}\n match = {}\n info = {}\n for line in data:\n logger.debug('\\n%s', line)\n files = []\n for h in header:\n val[h] = line[indx[h]: indx[h] + width[h]].strip()\n match[h] = re.match(pattern[h], val[h])\n logger.debug('%s: \"%s\" %s' % (h, val[h], match[h]))\n\n # Maybe throw a warning if only match 1 fails; indicating a likely bad pattern specification?\n\n if None in match.values():\n logger.debug('Failed to match all patterns -> This is a comment')\n continue\n\n if '-' in val['File Numbers']:\n start, stop = val['File Numbers'].split('-')\n for i in range(int(start), int(stop)+1):\n files.append(i)\n else:\n files.append(int(val['File Numbers']))\n\n for filenum in files:\n f = 'N%sS%04d.fits' % (date, filenum)\n logger.debug('File: %s', f)\n info[f] = {}\n for h in [header[0]] + header[3:]: # Skip 'Data Labels' and \"File Numbers'\n info[f][h] = val[h]\n\n logger.debug('info: %s', info)\n return info", "def listening(self):\n # starting point (CheckPoint)\n try:\n last_index = len(re.split('\\n', open(self.path, 'r').read())) - 1\n \n while True:\n \n curr_size = path.getsize(self.path)\n modified_time = path.getmtime(self.path)\n \n time.sleep(.2)\n # Latest.log Either got Archived by Minecraft or a new Instance of Minecraft Opened\n if self.fileSize > curr_size:\n print('\\033[31mDetected Change in Size')\n print('\\033[32mDid You reopen Minecraft?')\n self.fileSize = curr_size\n last_index = len(re.split('\\n', open(self.path, 'r').read())) - 1\n \n # MODIFIED??? must be minecraft dumping chat onto lastest.log\n if self.last_time_modified != modified_time:\n \n self.last_time_modified = modified_time\n chat = open(self.path, 'r').read()\n newChatLines = re.split('\\n', chat)[last_index:] # Reads Lines From the last checkpoint\n \n \n \n curr_index = -1\n\n for line in newChatLines:\n\n curr_index += 1\n # if line is not a \\n or \\r tag then our Line checkpoint is the current line\n if line:\n last_index += 1\n \n # Ignores ERRORS / WARNINGS focuses on chat logs\n if '[Client thread/INFO]: [CHAT]' in line:\n\n self.newLineEvent(line)\n # TODO LOGING\n except (FileExistsError, FileNotFoundError, PermissionError, NotADirectoryError) as e:\n err_helper.showError('0x1', e, crash=True)", "def process_log_file(cur, filepath):\n \n # open log file\n df = pd.read_json(filepath,lines=True)\n\n # filter by NextSong action - i.e. get only listening music events from the logs\n df = df[(df.page == \"NextSong\")]\n\n # insert time records\n __insert_time_data(cur, df)\n \n # insert user records\n __insert_user_data(cur, df)\n \n # insert songplay records\n __insert_songplay_data(cur, df)\n \n # erase dataframe\n df = df.iloc[0:0]", "def read_file(log_file):\n\t\tfile = open(log_file, 'r')\n\t\tresult = []\n\t\twhile 1:\n\t\t\tcontent = file.readline()\n\t\t\tif not content:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tdata = content.split(\"\\003\")\n\t\t\t\tif len(data) == 13:\n\t\t\t\t\ttrack = {\n\t\t\t\t\t\t\t 'device_no' : long(data[0][3:]), 'sim' : data[1], 'type':int(data[2]), 'gps_time' : MongoTrack.time_trans(data[3]),\n\t\t\t\t\t\t\t 'valid' : data[4], 'loc':{'long' : float(data[5]), 'lat' : float(data[6]) }, 'altitude' : float(data[7]),\n\t\t\t\t\t\t\t 'speed' : float(data[8]), 'course' : float(data[9]), 'km' : float(data[10]), 'para' : float(data[11]),\n\t\t\t\t\t\t\t 'rtime' : MongoTrack.time_trans(data[12].strip())\n\t\t\t\t\t\t\t}\n\t\t\t\t\tresult.append(track)\n\t\tfile.close()\n\t\treturn result", "def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()", "def os_open_comm_log( self, ):\r\n AppGlobal.os_open_txt_file( self.parameters.comm_logging_fn )", "def read_game_logs(file_path):\n\n if os.path.isfile(file_path):\n with open(file_path, \"r\") as read_file:\n log = json.load(read_file)\n # event_type = set([e[\"event\"] for e in log ])\n # the event types: command, text_message, set_attribute, join\n # print(\"event types\", event_type)\n\n # sort all messages chronologically\n log.sort(key=lambda x: x[\"date_modified\"])\n\n start = None\n end = None\n real_end = None # WHen The came master says COngrats or you die, because rest of the messages looks like bugs...\n episode_list = []\n length = len(log)\n game_finished = False\n # Episode are being searched between 2 starts commands\n # only the one where the command done has been issued is kept\n for i, l in enumerate(log):\n if \"command\" in l.keys():\n if l[\"command\"] == \"start\":\n if start == None:\n start = i\n elif end == None:\n end = i\n if l[\"command\"] == \"done\":\n game_finished = True\n\n if l[\"user\"][\"id\"] == 1 and l[\"event\"] == \"text_message\" and type(l[\"message\"]) is str and (\n l[\"message\"].startswith(\"Congrats\") or l[\"message\"].startswith(\n \"The rescue robot has not reached you\")):\n real_end = i + 1 # +1 because we want to include this message in the log slice...\n if start is not None and end is not None:\n if game_finished:\n episode_list.append(log[start:real_end])\n start = end\n end = None\n real_end = None\n game_finished = False\n\n if i + 1 == length:\n if start is not None and end is None and game_finished:\n episode_list.append(log[start:real_end])\n\n score_list = {}\n for i, e in enumerate(episode_list):\n # the number of answers the avatar utters gives us the number of question asked\n # num_questions = sum(\n # [1 for m in e if m[\"user\"][\"name\"] == \"Avatar\" and m[\"event\"] == \"text_message\"])\n\n # Just sum every messages ending with a question mark issueed by the user...\n num_questions = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].endswith(\"?\")])\n\n # user id 1 is alway the game master, we are looping here on the messages of the \"real\" player\n # when we tell the avatar to change location, we don't get an answer, this is why the substraction gives the number of orders\n # this does not include the order \"done\"\n # num_orders = sum(\n # [1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n # \"event\"] == \"text_message\"]) - num_questions\n\n # Just sum every order of type \"go west\". Describe orders are not counted.\n num_orders = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and (\n \"east\" in m[\"message\"].lower() or \"north\" in m[\"message\"].lower() or \"west\" in m[\n \"message\"].lower() or \"south\" in m[\"message\"].lower() or \"back\" in m[\"message\"].lower())])\n\n game_won = sum([1 for m in e if m[\"user\"][\"id\"] == 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].startswith(\"Congrats\")]) > 0\n\n # Work-Around - the final reward giving +1.0 on success and -1.0 on loss happens after the messages\n # Saying \"congratulations\" or \"you die horribly\" just repeating the message when the game starts.\n # We had to exclude that message to segment finished games but this is why we have to add these rewards here manually...\n\n final_reward = -1.0\n if game_won:\n final_reward = 1.0\n score_list[i] = {\"score\": sum([m[\"message\"][\"observation\"][\"reward\"] for m in e if\n \"message\" in m.keys() and type(m[\"message\"]) is dict])+final_reward,\n \"num_questions\": num_questions, \"num_orders\": num_orders, \"game_session\": e,\n \"game_won\": game_won}\n\n return score_list\n\n else:\n raise Exception(f\"{file_path} is not a correct file path.\")", "def read_agent_logfile(self):\n server = self.get_agent()\n assert server.logfile.exists(), \"don't have logfile?\"\n return server.logfile.read_text(errors=\"backslashreplace\")", "def get_log_file(self):\n return self.log_file.read_text(errors=\"backslashreplace\")", "def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))", "def readin(file, time_offset='+0000'):\n\tp = re.compile(r'^.*log\\.?(\\d)*$')\n\tif p.match(file):\n\t\treturn _readin_syslog(file, time_offset)\n\telse:\n\t\tp2 = re.compile(r'^.*\\.slogviz\\.json$')\n\t\tif p2.match(file):\n\t\t\treturn _readin_JSON(file)\n\t\telse:\n\t\t\tp3 = re.compile(r'^.*History$')\n\t\t\tif p3.match(file):\n\t\t\t\treturn _readin_chrome_history(file)\n\t\t\telse:\n\t\t\t\tp4 = re.compile(r'^.*places.*\\.sqlite$')\n\t\t\t\tif p4.match(file):\n\t\t\t\t\treturn _readin_moz_places(file)\n\t\t\t\telse:\n\t\t\t\t\tp5 = re.compile(r'^.*\\.evtx$')\n\t\t\t\t\tif p5.match(file):\n\t\t\t\t\t\treturn _readin_evtx(file)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn None", "def test_log(self):\r\n # expected result when no result_path is provided\r\n self.default_app(\r\n seq_path=self.tmp_seq_filepath,\r\n result_path=None,\r\n log_path=self.tmp_log_filepath,\r\n )\r\n\r\n # open the actual log file and the expected file, and pass into lists\r\n with open(self.tmp_log_filepath) as f:\r\n obs = [l.strip() for l in list(f)]\r\n exp = rdp_test1_log_file_contents.split('\\n')\r\n # sort the lists as the entries are written from a dict,\r\n # so order may vary\r\n obs.sort()\r\n exp.sort()\r\n self.assertEqual(obs, exp)", "def find_old_log(weight_path):\n pardir = os.path.dirname(weight_path)\n event_paths = glob.glob(os.path.join(pardir, \"event*\"))\n if len(event_paths) == 0:\n return None\n else:\n return event_paths[0]", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def test_device_logs_in_file_only(self, simulate_main, caplog, qtbot):\n QtTest.QTest.qWaitForWindowShown(simulate_main.form)\n qtbot.wait(1000)\n\n log_text = applog.get_text_from_log()\n assert \"SimulatedPM100 setup\" in log_text\n assert \"SimulatedPM100 setup\" not in caplog.text()", "def watch(log_file):\n log_file.seek(0, os.SEEK_END)\n while True:\n line = LogParser.read_line(log_file)\n if not line:\n time.sleep(1)\n continue\n yield line", "def watch(filename):\n with open(filename) as log:\n # Move to the end of the file\n file_size = os.stat(filename)[6]\n log.seek(file_size)\n while True:\n last_location = log.tell()\n line = log.readline()\n if not line:\n time.sleep(0.1)\n log.seek(last_location)\n else:\n yield line", "def read_log_file(log_file_pathname):\n try:\n with open(log_file_pathname, 'r') as file:\n return file.read()\n except Exception:\n print(\"Please check the file permission or format!\")", "def getLogs():", "def getLogs():", "def _read_log(self, **kwargs):\n\n log_file = find_log_file()\n\n if not log_file:\n raise RequestProcessingError(\n \"Error attempting to retrieve logs - unable to determine log filename. \"\n \"Please verify that the plugin is writing to a log file.\"\n )\n\n try:\n return read_log_file(log_file=log_file, **kwargs)\n except IOError as e:\n raise RequestProcessingError(\n \"Error attempting to retrieve logs - unable to read log file at {0}. \"\n \"Root cause I/O error {1}: {2}\".format(log_file, e.errno, e.strerror)\n )", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def read_log(prefix):\n l = []\n with open('%s.log' % prefix) as F:\n for line in F:\n if 'profile written' not in line:\n continue\n else:\n l.append(line.split()[0])\n return l" ]
[ "0.60699195", "0.5945723", "0.5875873", "0.5854158", "0.57434773", "0.57226753", "0.57217354", "0.57209086", "0.5714799", "0.5700735", "0.56245184", "0.5595432", "0.55869824", "0.557967", "0.5578444", "0.5529408", "0.5524706", "0.55045867", "0.54335624", "0.5433416", "0.538907", "0.5387977", "0.5341276", "0.53221834", "0.53130645", "0.5285628", "0.5285628", "0.52814204", "0.52653", "0.5261729" ]
0.64161575
0
Save frames to a single row or as a gif.
def save_frames(frames, out_dir, as_row=True, as_gif=False): os.makedirs(out_dir, exist_ok=True) if frames.dtype == torch.uint8: # save_image needs float value in [0, 1] frames = frames.float() frames = frames / 255. if as_gif: gif_dir = 'gif_images' os.makedirs(os.path.join(out_dir, gif_dir), exist_ok=True) for i, frames_i in enumerate(frames): if as_row: out_file = os.path.join(out_dir, f'img_{i:04d}.png') save_image(frames_i.clone(), out_file, nrow=frames_i.shape[0]) if as_gif: for j, frame in enumerate(frames_i): out_file = os.path.join(out_dir, gif_dir, f'img_{i:04d}_{j:04d}.png') save_image(frame.unsqueeze(0), out_file) out_file = os.path.join(out_dir, f'img_{i:04d}.gif') make_gif(os.path.join(out_dir, gif_dir), out_file, pattern=f'img_{i:04d}_*', fps=10) print(f'Saved images to {out_dir}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_gif(frames):\n print(\"Saving gif images!\")\n for i in range(len(frames)):\n im_out_path = \"gif/gif_emilie_will_\" + str(i) + \".png\"\n plt.imsave(im_out_path, frames[i])", "def saveFrames(filepath, frames):\n\n for i, frame in enumerate(frames):\n image = Image.fromarray(frame)\n image.save(filepath + str(i).zfill(8) + '.png')", "def _save_frame_as_png(\n self : \"animation\",\n frame : \"np.ndarray\",\n filename : \"str\"\n ):\n im = Image.fromarray(frame)\n im.save(filename)", "def saveanimation(frames,address=\"./movie.gif\"):\n imageio.mimsave(address, frames)", "def saveFrame(filepath, frame):\n if not filepath.lower().endswith('.png'):\n filepath += '.png'\n image = Image.fromarray(frame)\n image.save(filepath)", "def write_frames_to_gif(fname: str, frames, duration: int, progress=False):\n\n channels = frames[0].shape[0]\n # Convert to PIL\n pil_images = []\n if progress:\n frames = track(frames, \"Converting Frames\")\n for f in frames:\n pim = Image.fromarray((f*255).astype(np.uint8))\n if channels == 4:\n pim.info['transparency'] = 255\n #pim = pim.convert(\"P\")\n pil_images.append(pim)\n\n # Write GIF, show status spinner with rich\n if progress:\n console = Console()\n with console.status(f\"Writing GIF to {fname}\") as status:\n # loop=0 means the gif just repeats forever, which is what I think everyone probably expects\n pil_images[0].save(fname, save_all=True, append_images=pil_images[1:], loop=0, duration=duration)\n else:\n pil_images[0].save(fname, save_all=True, append_images=pil_images[1:], loop=0, duration=duration)", "def saveGIFBatch(directory, path, name=''):\n # for each frame in batch\n images = []\n for filename in directory:\n print(filename)\n images.append(imageio.imread(filename))\n\n name_gif = path + '/' + name + '.gif'\n imageio.mimsave(name_gif, images)", "def compose_in_gif(images, output_file, delay):\n images[0].save(\n output_file, \n format='GIF', append_images=images[1:], \n save_all=True, duration=delay, loop=0,\n )", "def save_GIF(ht, name=\"trajectory\"):\n # Generation of images\n counter = 0\n images = []\n for e in range(0, len(ht), 3):\n p = ht[e][0]\n s = ht[e][1]\n save_caronthehill_image(p, s, \"image\\\\state\" + str(counter) + \".png\")\n images.append(imageio.imread(\"image\\\\state\" + str(counter) + \".png\"))\n counter += 1\n imageio.mimsave(\"{}.gif\".format(name), images)", "def save(images, output):\n for image, frame in images:\n image.save(output(frame))", "def write_frames(self, images):\n for img in images:\n self.write_frame(img)", "def recordAnim(self):\n if self.currentMode == 'export':\n if os.path.isfile(self.tempGIFDir):\n try:\n os.chmod(self.tempGIFDir, 0777)\n os.remove(self.tempGIFDir)\n\n except Exception, result:\n logger.warning(result)\n\n modelPanelList = cmds.getPanel(type='modelPanel')\n for eachModelPanel in modelPanelList:\n cmds.modelEditor(eachModelPanel, e=1, alo=0)\n cmds.modelEditor(eachModelPanel, e=1, pm=1)\n\n startFrame = cmds.playbackOptions(min=1, q=1)\n endFrame = cmds.playbackOptions(max=1, q=1)\n\n tempImageList = list()\n for i in range(int(startFrame), int(endFrame+1)):\n tempImage = cmds.playblast(st=i, et=i, fmt='image', cc=1, v=0, orn=0, fp=1, p=100, c='png',\n wh=[512, 512], cf='%s/tempImg_%s.png' % (self.tempDir, i))\n tempImageList.append(tempImage)\n\n # make GIF from tempImageList\n frames = list()\n for tempImage in tempImageList:\n im = Image.open(tempImage)\n frames.append(im)\n\n frames[0].save(self.tempGIFDir, save_all=True, append_images=frames[1:], duration=50, loop=0)\n\n # remove temp images\n for i in tempImageList:\n if os.path.isfile(i):\n try:\n os.chmod(i, 0777)\n os.remove(i)\n except Exception, result:\n logger.warning(result)\n\n self.recordBtn.loadGIF2Button(path=self.tempGIFDir)", "def save_frame(self, save_path_filename):\n raise NotImplementedError", "def display_frames_as_gif(frames, video_name):\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n #plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi = 72)\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)\n# display(display_animation(anim, default_mode='loop'))\n anim.save(result_floder + '/' + video_name, writer=writer)", "def tasks_frame(self, task_id, frame_ids, outdir='', **kwargs):\n for frame_id in frame_ids:\n url = self.api.tasks_id_frame_id(task_id, frame_id)\n response = self.session.get(url)\n response.raise_for_status()\n im = Image.open(BytesIO(response.content))\n outfile = 'task_{}_frame_{:06d}.jpg'.format(task_id, frame_id)\n im.save(os.path.join(outdir, outfile))", "def save(self, fp, format=None, **params):\r\n if isinstance(fp, basstring):\r\n if fp.lower().endswith(\".gif\"):\r\n if numpy2gif_installed:\r\n if self.is_animated:\r\n numpy2gif.write_gif(self.frames, fp, fps=100//self.exts[0][['delay_time']])\r\n else:\r\n numpy2gif.write_gif(self._instance, fp)\r\n else:\r\n NotImplementedError(\"numpy2gif is not installed so cannot save gif images, install it with: pip install numpy2gif\")\r\n else:\r\n cv2.imwrite(fp, self._instance)\r\n return None\r\n if isinstance(fp, fil_object):\r\n fl = open(format, 'w')\r\n fl.write(fp.read())\r\n fl.close()\r\n return None\r\n return None", "def generate_gif(frames, reward, path, number=None, evaluation=False):\n for i, frame in enumerate(frames):\n frames[i] = resize(frame, (420, 320, 3),\n order=0, preserve_range=True).astype(np.uint8)\n if evaluation:\n path += '/atari-step-{}-reward-{}.gif'.format(number, reward)\n else:\n path += '/atari-play-reward-{}.gif'.format(reward)\n imageio.mimsave(path, frames, duration=1/30)", "def create_gif(trajectory):\r\n gif = []\r\n for i in range(len(trajectory)):\r\n p, s = trajectory[i][0]\r\n filename = 'images/car{}.jpeg'.format(i)\r\n save_caronthehill_image(p, s, filename)\r\n img = imageio.imread(filename)\r\n height, width, layers = img.shape\r\n gif.append(img)\r\n \r\n \r\n imageio.mimsave(\"visualization.gif\", gif, 'GIF')", "def _writeGifToFile(fp, images, durations, loops):\n \n # Obtain palette for all images and count each occurance\n palettes, occur = [], []\n for im in images: \n palettes.append( getheader(im)[1] )\n for palette in palettes: \n occur.append( palettes.count( palette ) )\n \n # Select most-used palette as the global one (or first in case no max)\n globalPalette = palettes[ occur.index(max(occur)) ]\n \n # Init\n frames = 0\n firstFrame = True\n \n \n for im, palette in zip(images, palettes):\n \n if firstFrame:\n # Write header\n \n # Gather info\n header = getheaderAnim(im)\n appext = getAppExt(loops)\n \n # Write\n fp.write(header)\n fp.write(globalPalette)\n fp.write(appext)\n \n # Next frame is not the first\n firstFrame = False\n \n if True:\n # Write palette and image data\n \n # Gather info\n data = getdata(im) \n imdes, data = data[0], data[1:] \n graphext = getGraphicsControlExt(durations[frames])\n # Make image descriptor suitable for using 256 local color palette\n lid = getImageDescriptor(im) \n \n # Write local header\n if palette != globalPalette:\n # Use local color palette\n fp.write(graphext)\n fp.write(lid) # write suitable image descriptor\n fp.write(palette) # write local color table\n fp.write('\\x08') # LZW minimum size code\n else:\n # Use global color palette\n fp.write(graphext)\n fp.write(imdes) # write suitable image descriptor\n \n # Write image data\n for d in data:\n fp.write(d)\n \n # Prepare for next round\n frames = frames + 1\n \n fp.write(\";\") # end gif\n return frames", "def save_video(video, save_path_template):\n try:\n from PIL import Image # pylint: disable=g-import-not-at-top\n except ImportError as e:\n tf.logging.warning(\n \"Showing and saving an image requires PIL library to be \"\n \"installed: %s\", e)\n raise NotImplementedError(\"Image display and save not implemented.\")\n\n for i, frame in enumerate(video):\n save_path = save_path_template.format(i)\n with tf.gfile.Open(save_path, \"wb\") as sp:\n Image.fromarray(np.uint8(frame)).save(sp)", "def save_sequence(seq_dir, seq_data, frm_idx_lst=None, to_bgr=False):\n\n if to_bgr:\n seq_data = seq_data[..., ::-1] # rgb2bgr\n\n # use default frm_idx_lst is not specified\n tot_frm = len(seq_data)\n if frm_idx_lst is None:\n frm_idx_lst = ['{:04d}.png'.format(i) for i in range(tot_frm)]\n\n # save for each frame\n os.makedirs(seq_dir, exist_ok=True)\n for i in range(tot_frm):\n cv2.imwrite(osp.join(seq_dir, frm_idx_lst[i]), seq_data[i])", "def saveFramesToVideo(frames, videoPath): \n fourcc = cv2.VideoWriter_fourcc('a','v','c','1')\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n [height,width]=frames[0].shape[0:2]\n writer = cv2.VideoWriter(videoPath, fourcc, 30, (width, height), 1)\n for i in range(frames.shape[0]):\n frameBGR = yiq2bgrUint(frames[i])\n writer.write(frameBGR)\n writer.release()", "def save_frame(frame):\n try:\n img = Image.fromarray(frame.array, 'RGB')\n out_path = settings['app']['web_path']\n if not os.path.isabs(out_path):\n out_path = os.path.join(basepath, out_path)\n filename = os.path.join(out_path, 'static', 'latest.jpg')\n tmp_filename = '{}.part'.format(filename)\n img.save(tmp_filename, 'jpeg')\n os.rename(tmp_filename, filename)\n except Exception, error:\n print('Error saving frame: {}'.format(error))", "def _write_frame(self : \"animation\",\n frame : \"np.ndarray\"\n ):\n self._writer.append_data(frame)\n self._frame_number += 1\n self._prevFrame = frame", "def save(self, fp: str):\n cv2.imwrite(fp, self.frame)", "def display_frames_as_gif(frames):\n fig=e.cube.show_layout(frames[0]) \n print(\"Drawn\")\n def animate(i):\n return e.cube.update_plot(frames[i])\n anim = animation.FuncAnimation(fig, animate, frames = len(frames), interval=50,blit=True)", "def write_frame(self, img):\n if img.shape[0] % 2 != 0:\n print(\"Warning: height is not divisible by 2! Dropping last row\")\n img = img[:-1]\n if img.shape[1] % 2 != 0:\n print(\"Warning: width is not divisible by 2! Dropping last column\")\n img = img[:, :-1]\n if self.post_processor:\n img = self.post_processor.process(img)\n if self.width is None:\n self.width = img.shape[0]\n self.height = img.shape[1]\n assert os.path.exists(self.directory)\n fn = FRAME_FN_TEMPLATE % self.frame_counter\n self.frame_fns.append(fn)\n imwrite(img, os.path.join(self.frame_directory, fn))\n self.frame_counter += 1\n if self.frame_counter % self.next_video_checkpoint == 0:\n if self.automatic_build:\n self.make_video()\n self.next_video_checkpoint *= 2", "def save_screen(screen):\n if not video_mode: # Don't record video\n return False\n # Make global variables writeable\n global current_frame\n global path_checked\n frames_directory = os.path.dirname(\n os.path.dirname(\n os.path.realpath(__file__))) + \"\\\\frames\\\\\"\n if not path_checked:\n check_folder(frames_directory)\n pygame.image.save(\n screen,\n frames_directory + \"ants-frame{}.jpeg\".format(\n str(current_frame).zfill(4)))\n current_frame += 1 # Move count to next frame", "def gif(filename, array, fps=10, scale=1.0):\n # ensure that the file has the .gif extension\n filename = filename + '.gif'\n\n # copy into the color dimension if the images are black and white\n if array.ndim == 3:\n array = array[..., np.newaxis] * np.ones(3)\n\n # make the moviepy clip\n clip = ImageSequenceClip(list(array), fps=fps).resize(scale)\n clip.write_gif(filename, fps=fps)\n return True", "def write_frame(self, data):\n try:\n cache_name = self.CACHE_FILE_NAME + str(time.time()) + '.jpg'\n file = open(cache_name, \"wb\")\n file.write(data)\n file.close()\n return cache_name\n except:\n return \"\"" ]
[ "0.73017335", "0.72355133", "0.6915706", "0.68751323", "0.6866969", "0.6702963", "0.64925975", "0.6485176", "0.6412501", "0.6385749", "0.6311999", "0.6305541", "0.6288399", "0.6262558", "0.6237853", "0.62274206", "0.61932164", "0.6116848", "0.60730493", "0.5990745", "0.59880346", "0.59761715", "0.5966294", "0.5964347", "0.59549135", "0.59447104", "0.5889584", "0.58597654", "0.58444655", "0.5818427" ]
0.7977077
0
mcxPyBot constructor initialises mcxDatabase connection and adds command handlers.
def __init__(self, channel, nickname, password, server, port = 6667, dbcon = False): # IRC connection SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname) # register event handler for all events self.ircobj.add_global_handler('all_events', getattr(self, 'on_event'), -10) self.channel = channel """The channel in which the bot will serve.""" self.__password = password """The password used to identify with nick services.""" self.__IpToUser = {} """A dict which stores IP (dcc) to user (mcx.user.id) relations.""" self.__quitmsgs = [] """A list of available quit messages used by the bot when quitting from a server.""" self.__initQuitMsgPool() self.__commandHandlers = {} """Dict saving all command handlers.""" # mcxDatbase self.__database = dbcon """A reference to a mcxDatabase object.""" if isinstance(self.__database, mcxDatabase): self.__databaseAvailable = self.__database.connected() """Flag whether mcxDatabase is reachable.""" # if database connection could be established if self.__databaseAvailable: EVENT_MCX_DATABASE_LOST.clear() EVENT_MCX_DATABASE_RECOVERED.set() else: self.__databaseAvailable = False # register all available command types self.__setupCommandHandlerTypes() # add new command handlers here # query commands self.__addCommandHandler('die', 'query') # channel commands self.__addCommandHandler('greet', 'channel') self.__addCommandHandler('pingDataBase', 'channel', True) self.__addCommandHandler('getMySQLVersion', 'channel', True) self.__addCommandHandler('getTestUserByBotKey', 'channel', True) # user commands self.__addCommandHandler('auth', 'not_authed_dcc', True) # registered user commands self.__addCommandHandler('getLatestMessage', 'authed_dcc', True) # admin commands # not implemented yet
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, user, password, database='mesomat', host='localhost'): \n \n \n self.config = {\n 'user' : user,\n 'password' : password,\n 'host' : host,\n 'database' : database,\n 'raise_on_warnings' : True,\n 'auth_plugin' : 'mysql_native_password'\n }\n \n self.INSERT_SAMPLE_COLUMN_COMMAND = ()\n \n \n self.connected = False\n self.cursor = None\n self.cnx = None", "def __init__(self):\n self.logger = logging.getLogger('modules.xbmc')\n\n XbmcServers.createTable(ifNotExists=True)\n htpc.MODULES.append({\n 'name': 'XBMC',\n 'id': 'xbmc',\n 'fields': [\n {'type':'bool',\n 'label':'Enable',\n 'name':'xbmc_enable'},\n {'type':'text',\n 'label':'Menu name',\n 'name':'xbmc_name'},\n {'type':'bool',\n 'label':'Enable PVR',\n 'name':'xbmc_enable_pvr'},\n {'type':'bool',\n 'label':'Hide watched',\n 'name':'xbmc_hide_watched'}\n ]})\n htpc.MODULES.append({\n 'name': 'XBMC Servers',\n 'id': 'xbmc_update_server',\n 'action': htpc.WEBDIR + 'xbmc/server',\n 'test': htpc.WEBDIR + 'xbmc/ping',\n 'fields': [\n {'type':'select',\n 'label':'Server',\n 'name':'xbmc_server_id',\n 'options':[\n {'name':'New', 'value':0}\n ]},\n {'type':'text',\n 'label':'Name',\n 'name':'xbmc_server_name'},\n {'type':'text',\n 'label':'IP / Host',\n 'name':'xbmc_server_host'},\n {'type':'text',\n 'label':'Port',\n 'name':'xbmc_server_port'},\n {'type':'text',\n 'label':'Username',\n 'name':'xbmc_server_username'},\n {'type':'password',\n 'label':'Password',\n 'name':'xbmc_server_password'},\n {'type':'text',\n 'label':'Mac addr.',\n 'name':'xbmc_server_mac'}\n ]})\n # Set current server to the first one in database\n try:\n self.current = XbmcServers.select(limit=1).getOne().name\n except SQLObjectNotFound:\n self.logger.debug(\"No XBMC-Server found in database.\")\n self.current = None", "def __init__(self):\n\t\tself.obtainDatabaseConnection()", "def __init__(self):\n self._connection = get_db_connection()", "def __init__(self, app_database):\n try:\n self.database_configuration = app_database\n self.conn = None\n self.cursor = None\n except Exception as error:\n print(f\"DBCM::__init__::{error}\")", "def __init__(self):\n\n\t\tself.connection = self.get_connection()", "def __init__(self):\n try:\n self._chat_db = sqlite3.connect(CHAT_DB_PATH)\n except OperationalError:\n print(\"Cannot access chat database.\\nGo to Settings->Security and Privacy->Privacy->Full Disk Access.\\n\"\n \"Give access to the application you are running and restart the program.\")\n sys.exit(1)\n\n self._contacts = Contacts(self._chat_db).get_contacts_df()\n\n try:\n self._message_db = sqlite3.connect(WEEKLY_MESSAGES_DB_PATH)\n except OperationalError:\n print(\"Could not connect to the database server.\")\n sys.exit(1)", "def __init__(self):\n self.dbcon = DbConnection.get_con()", "def __init__(self, dbconn, client, config, repeated_messages=4):\r\n # Debug\r\n self.debug = False\r\n\r\n # All necessary data\r\n self.client = client\r\n self.conn = dbconn\r\n self.config = config\r\n\r\n # Functions\r\n self.command_on_message_list = {}\r\n self.auto_on_message_list = {}\r\n self.on_member_join_list = {}\r\n self.on_member_update_list = {}\r\n\r\n self.channels = list(self.client.get_all_channels())\r\n self.repeat_n = repeated_messages\r\n self.repeated_messages_dict = {(channel.id):[] for channel in self.channels}\r\n\r\n # All Server Role IDs\r\n guild = client.get_guild(config[\"server_id\"]) # UCSB Server ID\r\n roles = {}\r\n for r in guild.roles:\r\n roles.update({r.name: r.id})\r\n self.roles = roles\r\n\r\n # Actions in Command on Message\r\n for action in command_on_message.__subclasses__():\r\n self.command_on_message_list[action.__name__] = action(bot = self)\r\n \r\n # Actions in Auto on Message\r\n for action in auto_on_message.__subclasses__():\r\n self.auto_on_message_list[action.__name__] = action(bot = self)\r\n\r\n # Actions on Member Join\r\n for action in on_member_join.__subclasses__():\r\n self.on_member_join_list[action.__name__] = action(bot = self)\r\n \r\n # Actions on Member Update\r\n for action in on_member_update.__subclasses__():\r\n self.on_member_update_list[action.__name__] = action(bot = self)", "def __init__(self, db_name: str = 'concord.db') -> None:\n\n self.controller_database: ControllerDatabase = ControllerDatabase(db_name)", "def __init__(self) -> None:\n self.Database = Database()", "def setup(cls):\n super().setup()\n cls.db = DBCommunication()", "def __init__(self, server, channel, nickname, password, port, command, auto_connect = False):\n self.server = server\n self.channel = channel\n self.nickname = nickname\n self.password = password\n self.port = port\n self.command = command\n self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # List of functions to call when processing a post\n self.functions = []\n if auto_connect:\n self.connect()", "def __init__(self):\n self._setup()\n # Encryption/decryption cipher handler\n self.__cipher = self.__get_cipher()\n # Setup the engine for the sqlite database\n self._engine = create_engine(self.db_uri)\n # Configure the SQLAlchemy metadata\n self._metadata = MetaData()\n self._metadata.bind = self._engine\n self._load_db()\n # Configure the auto-mapping base model\n self._base = automap_base(metadata=self._metadata)\n self._base.prepare()\n # Setup a session generator for database connections\n self._session = sessionmaker(bind=self._engine)", "def __init__(self, commands=None):\n self.commands = {}\n self.context = None", "def __init__(self, db=None):\n if db is None:\n self.db = \"file:foxrollbot_db?mode=memory&cache=shared\"\n else:\n self.db = db\n\n # This attribute is used to maintain a single connection to the\n # database, so that in-memory databases aren't just lost after every\n # connection is finished.\n self._main_connection = sqlite3.connect(self.db, uri=True)\n\n self._load_statements()\n self._init_db()", "def __init__(self, con_uri=None, db_name=\"douyin\"):\n super().__init__()\n self.con_uri = con_uri or 'localhost'\n self.client = AsyncIOMotorClient(self.con_uri)\n self.db = self.client[db_name]", "def __init__(self):\n self.dbconnect = dbConnection.connection", "def __init__(self):\n self.database = Database()\n self.load_config()", "def __init__(self, cmd_handler: Callable[[IRCClient], CommandHandler], *args, **kwargs):\n IRCClient.__init__(self, *args, **kwargs)\n self.command_handler: CommandHandler = cmd_handler(self)", "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)", "def _init(self, connection_strings):\n # Init connections\n self._connections = []\n for s in connection_strings:\n d = Dict() # don't do Dict(foo=x) bc PyScript only supports that for dict\n self._connections.append(d)\n d.fullname = s\n d.type = s.split('.')[-1]\n d.objects = []\n \n # Pending events for this handler\n self._scheduled_update = False\n self._pending = [] # pending events\n \n # Connect\n for index in range(len(self._connections)):\n self._connect_to_event(index)", "def initialize():\n\t\tDBHelper.con = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')", "def __init__(self):\n\n self.db = ImageDB()\n self.vitess = VitessConn()\n self.minio = MinioConn()", "def __init__(self):\n with open('config.json') as config:\n data = json.load(config)\n\n password = self.decode_password(data['db']['password'])\n db_conn_string = 'postgresql://' + data['db']['username'] + ':' + password + '@' + \\\n data['db']['hostname'] + ':' + data['db']['port'] + '/' + data['db']['database']\n\n self.engine = create_engine(db_conn_string)\n try:\n conn = self.engine.connect()\n if conn is not None:\n print(\"-I- Successful Database Connection\")\n except Exception as e:\n print(\"-W- \" + str(e))", "def __init__(self, db_handle):\n self.cur = db_handle.database().get_cursor()\n self.question_handler = db_handle.question()\n self.answer_handler = db_handle.answer()\n self.user_handler = db_handle.user()", "def __init__(self, config_file, ssl=False, plugin_config = {}):\n self.config_file = config_file\n self.botconfig = self.load_config(config_file)\n auth = self.botconfig.find('auth')\n logging.info(\"Logging in as %s\" % auth.attrib['jid'])\n sleekxmpp.ClientXMPP.__init__(self, auth.attrib['jid'], auth.attrib['pass'], auth.get('ssl', True), plugin_config)\n storageXml = self.botconfig.find('storage')\n if storageXml is not None:\n self.store = store(storageXml.attrib['file'])\n else:\n logging.warning(\"No storage element found in config file - proceeding with no persistent storage, plugin behaviour may be undefined.\")\n self.rooms = {}\n self.add_event_handler(\"session_start\", self.handle_session_start, threaded=True)\n self.register_xmpp_plugins()\n CommandBot.__init__(self)\n PlugBot.__init__(self, default_package = 'sleekbot.plugins')\n self.register_adhocs()", "def __init__(self, config, user=None, cred=None):\n\n self.client = pyorient.OrientDB(config.host, config.port)\n self.client.connect(config.user, config.cred)\n\n self.config = config\n\n if config.initial_drop:\n self._last_db = self._last_user = self._last_cred = None\n self.drop()\n\n db_name = config.db_name\n if db_name:\n self.open(db_name, config.storage, user, cred)\n\n self.registry = {}\n # Maps property dict from database to added class's property dict\n self.props_from_db = {}\n\n self.scripts = config.scripts or pyorient.Scripts()", "def __init__(self) -> None:\n settings = get_project_settings()\n self.db = pymysql.connect(\n host=settings['MYSQL_SERVER'],\n port=settings['MYSQL_PORT'],\n user=settings['MYSQL_USERNAME'],\n password=settings['MYSQL_PASSWORD'],\n db=settings['MYSQL_DB']\n ) \n self.cursor = self.db.cursor()", "def __init__(self, **manager_commands):\n self.package = manager_commands" ]
[ "0.61448985", "0.5992468", "0.59882843", "0.5959786", "0.5893138", "0.5862879", "0.58266366", "0.57569146", "0.575443", "0.57380295", "0.5736009", "0.56736994", "0.5635677", "0.56265783", "0.5626573", "0.5624763", "0.5621554", "0.5619871", "0.56172127", "0.5610549", "0.56074196", "0.5595911", "0.5581509", "0.5579204", "0.55788714", "0.5517988", "0.55016637", "0.54966915", "0.5495959", "0.54877007" ]
0.69860715
0
initialize some quit message and save them into a list by filling self.__quitmsgs
def __initQuitMsgPool(self): self.__quitmsgs.append("Infektion festgestellt... leite Quarantaenemassnahmen ein... trenne aktive Verbindung")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_quit(self, raw_msg, source, **kwargs):", "def __init__(self):\n\n\t\tself.count = 0\n\t\tself.messages = []", "def __init__(self, msg):\n super(QuitMessageException, self).__init__(msg)", "def getRandomQuitMsg(self):\n return self.__quitmsgs[randint(0, len(self.__quitmsgs)-1)]", "def __init__(self):\n self.messageSet = set()\n self.messageQueue = deque()", "def init_comms(self):\n mess = b'SHO' + self.end_mess_bytes\n self.sock.sendall(mess)", "def __init__(self):\n self.recent_messages= {}", "def __init__(self):\n self._msg_dict = {}", "def func_quit(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if check == 'quit':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message)\n self.finish()\n return True", "def get_msg_quit(self, username):\n return self.user_table[username]['msg_quit']", "def __init__(self):\n self.msg_dict = dict() # msg: last_print_time_stamp", "def get_and_delete_messages (self):\n return []", "def get_and_delete_messages (self):\n return []", "def sendQuitFlag():\n simuConfig[\"FLAG.QUIT\"] = True", "def _process_win_msgs(self):\n message = wintypes.MSG()\n while True:\n res = win32functions.PeekMessageW(pointer(message), 0, 0, 0, win32con.PM_REMOVE)\n if not res:\n break\n if message.message == win32con.WM_QUIT:\n self.stop()\n sys.exit(0)\n else:\n win32functions.TranslateMessage(byref(message))\n win32functions.DispatchMessageW(byref(message))", "def __init__(self):\n # MessageHandlers - index is MessageType and data is a list of\n # tuples (priority, MessageHandler object) that is sorted by\n # priorities.\n #XXX: rockstar - WTF?! Why is there a list comprehension being used\n # and still only returning an empty list?\n # pylint: disable-msg=W0612\n self.message_handlers = [\n [] for i in range(self.NUMBER_OF_MESSAGE_TYPES)\n ]\n self.lock = threading.Lock()\n self.logger = Logger().getLogger('backend.core.MessageBus')", "def _is_quit(self, message):\n\t\treturn message.lower() in (\"q\", \"quit\")", "def messages(self, messages):\r\n\r\n self._messages = messages", "def sub_callbackmsg(self, msg):\n\n print (msg.message)\n self.received_msg = self.received_msg + [msg.message]\n print (self.received_msg)", "def list_messages(self):", "def msg(self, msg):\r\n if self.__isInit != True:\r\n return", "def call_exit(self, _) -> None:\n self.save_class()\n for _ in range(self.PATH.count(\"/\") + 1):\n self.queue.insert(0, \"quit\")", "def __init__(self, msg, final=False):\n self.__msg = msg\n self.final = final", "def __init__(self, omssubtype, outlinetitle=\"\", tabletitle=\"\", caption=\"\", rowdim=\"\", coldim=\"\", columnlabels=[],\n procname=\"Messages\"):\n \n attributesFromDict(locals())\n self.rowlabels = []\n self.columnvalues = []\n self.rowcount = 0", "def test_do_quit(self):\n for string in self.random_strings:\n self.assertTrue(self.CommandParser.do_quit(string))", "def clearQuitFlag():\n simuConfig[\"FLAG.QUIT\"] = False", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def on_quit (self):", "def __init__(self):\n self.que = []\n self.tem_que = []", "def get_msg_quit(self, username):\n return \"Bye bye\"" ]
[ "0.5805985", "0.569918", "0.5597306", "0.55945224", "0.5488201", "0.5455527", "0.5361667", "0.5361548", "0.5293691", "0.52259815", "0.5221136", "0.52198446", "0.52198446", "0.52172184", "0.5200485", "0.51755583", "0.5162636", "0.51529586", "0.5147324", "0.5142985", "0.51370794", "0.51262856", "0.51241374", "0.50948757", "0.5091269", "0.50880957", "0.50869125", "0.50726414", "0.50699496", "0.5069207" ]
0.79404175
0
get any random quit message that was initialized by __initQuitMsgPool()
def getRandomQuitMsg(self): return self.__quitmsgs[randint(0, len(self.__quitmsgs)-1)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __initQuitMsgPool(self):\n self.__quitmsgs.append(\"Infektion festgestellt... leite Quarantaenemassnahmen ein... trenne aktive Verbindung\")", "def get_msg_quit(self, username):\n return self.user_table[username]['msg_quit']", "def get_msg_quit(self, username):\n return \"Bye bye\"", "def getQuitFlag(): \n return simuConfig[\"FLAG.QUIT\"]", "def getQuitFlag():\n return simuConfig[\"FLAG.QUIT\"]", "def genMsg(self):\n return os.urandom(self.messageSize)", "def get_ms_exit_game():\n return random.choice( data.MT_EXIT)", "def get_message():\n msg = random.choice(messages)\n logger.info(\"{}\".format(str(msg)))\n return str(msg)", "def on_quit(self, raw_msg, source, **kwargs):", "def quit():\n return 'quit'", "def get_random_emoji():\n return (random.choice(get_emoji_list())).encode('utf-8').decode('utf-8')", "def quit_cmd(self):\n print_debug(\"Executing QUIT\")\n command = \"QUIT\\r\\n\"\n msg_rec = self.send_and_log(self.s, command)\n self.close_socket(self.s) # Close socket since we're done.\n return msg_rec", "def exit_message(self):\n return self._exit_message", "def default_reply():\n return random.choice(DEFAULT_RESPONSES)", "def __init__(self, msg):\n super(QuitMessageException, self).__init__(msg)", "def suicide_reply():\n return random.choice(SUICIDE_RESPONSE)", "def call_quit(self, _):\n return MENU_QUIT", "def call_quit(self, _):\n return MENU_QUIT", "def call_quit(self, _):\n return MENU_QUIT", "def _wake_random_journal_thread():\n try:\n thread_id = random.choice(list(WAKE_UP_EVENTS))\n except IndexError:\n return\n WAKE_UP_EVENTS[thread_id].set()", "def menu_quit():\n return \"Quit\"", "def message_id(size):\n return os.urandom(size)", "def msg(self, msg):\r\n if self.__isInit != True:\r\n return", "def prime_pick():\n\n rnd = generate_random(0, len(PRIMES) - 1)\n return PRIMES[rnd]", "def pick(self, mess, args):\n return random.choice(args)", "def exitWithMsg(msg):\n\tprint(msg + \" -- quitting\")\n\tsys.exit(0)", "def _is_quit(self, message):\n\t\treturn message.lower() in (\"q\", \"quit\")", "def quit(phenny, input):\n # Can only be done in privmsg by the owner\n if input.sender.startswith('#'): return\n if input.owner: \n phenny.write(['QUIT'])\n __import__('sys').exit(0)", "def get_random_id(message_id):\r\n r = random.Random(message_id)\r\n return r.getrandbits(31) * r.choice([-1, 1])", "def generate_message(self, mtu):\r\n raise SystemExit(self.sm.__exit_msg__)" ]
[ "0.7058567", "0.637686", "0.6002327", "0.5931621", "0.58883643", "0.5877688", "0.5856501", "0.5820552", "0.579374", "0.5449078", "0.54188406", "0.538813", "0.5298811", "0.5292298", "0.5277682", "0.52572966", "0.51838183", "0.51838183", "0.51838183", "0.51563007", "0.51104975", "0.508718", "0.50158733", "0.4990803", "0.49806616", "0.49721283", "0.4968789", "0.49619165", "0.49488884", "0.4942027" ]
0.82633847
0
commands executed after connected to the server triggered if the chosen nickname on construction is already in use
def on_nicknameinuse(self, c, e): c.nick(c.get_nickname() + "_")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_nicknameinuse(self, raw_msg, busy_nickname, **kwargs):", "def on_nicknameinuse(self, conn, event) -> None:\n self._nickname += '_'\n conn.nick(self._nickname)", "def on_nick(self, raw_msg, source, old_nickname, new_nickname, **kwargs):", "def on_welcome(self, raw_msg, server, port, nickname, **kwargs):", "def on_command(server, user, command, args):", "def _register(self):\n self._log(self.botlog, 'Registering as %s' % self.nickname)\n self._send('USER %s B C :%s' % (self.ident, self.realname))\n self._send('NICK %s' % self.nickname)", "async def on_ready():\n print(\"{} has joined the server\".format(client.user.name))", "async def on(self, ctx, *, nickname=\"\"):\n nickname = nickname.strip()\n mention_here = True\n mention_everyone = True\n if nickname == \"\":\n nickname = \"Dank Bot |Music on voice!\"\n try:\n await self.bot.change_nickname(ctx.message.server.me, nickname)\n await self.bot.say(\"Hey, music is playing on voice channel come! @here\")\n await self.bot.delete_message(ctx.message)\n except discord.Forbidden:\n await self.bot.say(\"I cannot do that, I miss the `Change Nickname` or `Manage Messages` permission\")", "def on_welcome(self, c, e):\n c.privmsg('NICKSERV', 'GHOST %s %s' % (self._nickname, self.__password))\n c.nick(self._nickname)\n c.privmsg('NICKSERV', 'IDENTIFY %s' % self.__password)\n c.join(self.channel)", "def on_your_host(self, connection, event):\r\n print(event)\r\n print(event.arguments)\r\n\r\n if(len(event.arguments) != 0):\r\n message = event.arguments[0]\r\n else:\r\n message = str(event.arguments)\r\n\r\n res = self.cursor.execute(\"\"\"SELECT * FROM `IRC_servers` WHERE `Registred_users_userID` = %s AND `serverID` = %s;\"\"\", (self.userID, connection.serverID))\r\n if res != 0:\r\n result = self.cursor.fetchall()\r\n serverID_res = int(result[0][0])\r\n print(\"serverID = {}\".format(serverID_res))\r\n\r\n if serverID_res == int(connection.serverID): # pokud se získané ID z databáze rovná tomu, které v sobě\r\n # uchovává connection, redundantní check, ale just4safety\r\n res = self.cursor.execute(\"\"\"INSERT INTO `IRC_other_messages` (IRC_servers_serverID,\r\n fromHostmask,\r\n messageBody,\r\n commandType,\r\n timeReceived)\r\n values (%s, %s, %s, %s, %s)\"\"\", (serverID_res, event.source, message, event.type.upper(),\r\n datetime.datetime.utcnow()))\r\n\r\n\r\n self.db.commit()", "def nameservers(self, irc, msg, args):\n irc.reply('Our Nameservers are NS1.CHEMICALSERVERS.COM and NS2.CHEMICALSERVERS.COM')", "def on_nick_change(bot, trigger):\n\told_nick = trigger.nick\n\tnew_nick = Identifier(trigger.args[0])\n\tfor channel in bot.privileges:\n\t\tif new_nick in bot.privileges[channel]:\n\t\t\tlog(bot, channel, '*** {} is now known as {}', old_nick, new_nick);", "def pre_irc_client(self):\n pass", "def test_nickChange(self):\n oldnick = \"foo\"\n newnick = \"bar\"\n self.protocol.register(oldnick)\n self.protocol.irc_RPL_WELCOME(\"prefix\", [\"param\"])\n self.protocol.setNick(newnick)\n self.assertEqual(self.protocol.nickname, oldnick)\n self.protocol.irc_NICK(\"{}!quux@qux\".format(oldnick), [newnick])\n self.assertEqual(self.protocol.nickname, newnick)", "def test_overrideAlterCollidedNick(self):\n nick = \"foo\"\n self.protocol.alterCollidedNick = lambda nick: nick + \"***\"\n self.protocol.register(nick)\n self.protocol.irc_ERR_NICKNAMEINUSE(\"prefix\", [\"param\"])\n lastLine = self.getLastLine(self.transport)\n self.assertEqual(lastLine, \"NICK {}\".format(nick + \"***\"))", "def _switch_nick(self):\n self.nickname = self.firstnick + str(random.randint(1000, 9999))\n self._log(self.botlog, 'Switching to nick %s' % self.nickname)\n self._send('NICK %s' % self.nickname)", "def connected(self, host, port):\n nick = self.nick\n hostname = self.hostname\n name = f\"{nick} on {hostname} using circuits/{systemVersion}\"\n\n self.fire(NICK(nick))\n self.fire(USER(nick, hostname, host, name))", "def handle(self, msg):\n\n if msg.command == \"PING\":\n self._sendmsg(\"PONG :{}\".format(msg.args[0]))\n\n elif msg.command == \"JOIN\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has joined {}\".format(name, channel))\n\n elif msg.command == \"PART\":\n name = msg.sendername\n channel = msg.args[0]\n print(\"{} has left {}\".format(name, channel))\n\n elif msg.command == \"KICK\":\n name = msg.sendername\n channel = msg.args[0]\n victim = msg.args[1]\n print(\"{} has kicked {} from {}\".format(name, victim, channel))\n\n elif msg.command == \"QUIT\":\n name = msg.sendername\n print(\"{} has quit IRC\".format(name))\n\n elif msg.command == \"KILL\":\n name = msg.sendername\n victim = msg.args[0]\n print(\"{} has killed {}\".format(name, victim))\n\n elif msg.command == \"NICK\":\n name = msg.sendername\n newname = msg.args[0]\n print(\"{} is now known as {}\".format(name, newname))\n\n elif msg.command == \"MODE\":\n name = msg.sendername\n target = msg.args[0]\n mode = msg.args[1]\n print(\"{} has set the mode of {} to {}\".format(name, target, mode))\n\n elif msg.command == \"NOTICE\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}]! {}\".format(name, target, message))\n\n elif msg.command == \"PRIVMSG\":\n name = msg.sendername\n target = msg.args[0]\n message = msg.args[1]\n print(\"[{} -> {}] {}\".format(name, target, message))\n\n elif msg.command.isdigit():\n print(msg.args[-1])\n\n else:\n print(str(msg))\n\n hooks.handle(self, msg)", "def on_me_joined(self, raw_msg, **kwargs):", "def conn(self):\n self.send(\"setname/{}/{}\".format(self.msg_id, self.name))\n self.msg_id += 1", "def connected(self, host, port):\n\n nick = self.nick\n hostname = self.hostname\n name = \"{0:s} on {1:s} using isomer/{2:s}\".format(\n nick, hostname, '1.0'\n )\n\n self.fire(USER(nick, hostname, host, name))\n self.fire(NICK(nick))", "def registration_complete(self):\n s = self.server\n nick = self.ident.nick\n log.debug(f'{self} ## {self.ident} is now registered to {s}')\n self.send_as_server(RPL_WELCOME, f'{nick} :Welcome to the Internet Relay Network {self.ident}')\n self.send_as_server(RPL_YOURHOST, f'{nick} :Your host is {s}, running version {s.version}')\n self.send_as_server(RPL_CREATED, f'{nick} :This server was created {s.created}')\n user_modes = ''.join(s.supported_user_modeset)\n chan_modes = ''.join(s.supported_chan_modeset)\n self.send_as_server(RPL_MYINFO, f'{nick} :{s} {s.version} {user_modes} {chan_modes}')\n self.send_as_server(RPL_LUSERCLIENT, f'{nick} :There are {len(s.clients)} user(s) on 1 server')\n self.send_as_server(RPL_LUSERCHANNELS, f'{nick} {len(s.channels)} :channels formed')\n self.set_mode('+i')", "async def on_ready():\r\n await bot.change_presence(activity=discord.Game(name='Sailing the High Seas | $help'))\r\n print('Logged in as:\\n{0} (ID: {0.id})'.format(bot.user))", "async def off(self, ctx, *, nickname=\"\"):\n nickname = nickname.strip()\n if nickname == \"\":\n nickname = None\n try:\n await self.bot.change_nickname(ctx.message.server.me, nickname)\n await self.bot.delete_message(ctx.message)\n await self.bot.say(\"RIP music lol\")\n except discord.Forbidden:\n await self.bot.say(\"I cannot do that, I miss the `Change Nickname` or `Manage Messages` permission\")", "def accept_command():\n # TODO", "def on_connection(self, name):\n if name == \"admin\":\n self.name = \"admin\"\n self.handler = Handler(self.simulation, ProxyLock(self.service),\n [Attribute(\"weather\"), Attribute(\"current_date\"),\n ListAttribute(\"parties\",\n MultiAttribute(Attribute(\"name\"))),\n ListAttribute(\"companies\",\n MultiAttribute(Attribute(\"name\"),\n Attribute(\"color\"),\n Attribute(\"market_share\"))),\n ListAttribute(\"energy_types\",\n MultiAttribute(Attribute(\"percentage_use\"),\n Attribute(\"name\"),\n Attribute(\"color\")))],\n [Attribute(\"paused\"), Attribute(\"is_setup\")], [])\n self.send_packet(msg_type=\"initial\")\n return True\n entity_type = ''\n parties = list(filter(lambda p: p.name == name, self.simulation.parties))\n if len(parties) == 1:\n self.handler = Handler(self.simulation, ProxyLock(parties[0]), self.party_watchers[0],\n self.party_watchers[1], self.party_watchers[2])\n entity_type = 'party'\n else:\n companies = list(filter(lambda c: c.name == name, self.simulation.companies))\n if len(companies) == 1:\n self.handler = Handler(self.simulation, ProxyLock(companies[0]),\n self.company_watchers[0], self.company_watchers[1],\n self.company_watchers[2])\n entity_type = 'company'\n else:\n self.connection.error(\"No company/party with name {}!\".format(name))\n return False\n self.name = name\n self.send_packet(msg_type=\"initial\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n protocol.send_news(\"New {type} named {name} created!\"\\\n .format(type=entity_type, name=name))\n return True", "async def on_start(self):\n\t\t\n\t\t# Register callback.\n\t\tawait self.instance.command_manager.register(\n\t\t\tCommand(command='muffin', target=self.muffin, admin=False, description='Bake a muffin').add_param(name='login', required=True))", "def irc_NICK(self, prefix, params):\n old_nick = prefix.split('!')[0]\n new_nick = params[0]\n self.logger.log(\"%s is now known as %s\" % (old_nick, new_nick))", "def log_in(self, username):\n if len(username) < 3 or len(username) > 9:\n self.send(SIG.BAD_NAME_LENGTH)\n elif username in clients:\n self.send(SIG.USERNAME_TAKEN)\n else:\n clients[username] = self\n print(username, \"connected\")", "async def on_ready():\n print(\"I'm in\")\n print(client.user)\n await client.change_presence(activity=discord.Game(name=IN_GAME))" ]
[ "0.694035", "0.6767303", "0.66506344", "0.65627545", "0.6395496", "0.62655073", "0.61659", "0.6128941", "0.61008", "0.6065453", "0.6005221", "0.5998594", "0.599429", "0.597298", "0.5958442", "0.59406614", "0.5906776", "0.58808583", "0.5872089", "0.5869215", "0.58509284", "0.5829039", "0.5829014", "0.58110374", "0.58089983", "0.5800944", "0.57834065", "0.5763199", "0.57603765", "0.5760203" ]
0.6800195
1
commands executed when the bot received a private message forwards the command and the event to self.do_command()
def on_privmsg(self, c, e): self.do_command(e.arguments()[0], c, e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def on_private_message(self, private_message):\n pass", "def privmsg(self, user, channel, message):\n # Only actually private messages\n user = user.split('!', 1)[0]\n if (channel != self.help_channel\n or user in self.ignore\n or not user.strip()):\n return\n if message.startswith('.ident'):\n self.whois(user)\n # Code dispatching\n if user in self.admins:\n if message[:3] in self.prefixes: \n self.engine.code(user, message[3:])\n # Built-In Commands\n elif message[0] == '.':\n parts = message[1:].split()\n command, args = parts[0], parts[1:]\n # Public commands\n # echo #\n if command == \"echo\":\n self.msg(self.help_channel, ' '.join(args))\n # mktmp user1 [user2 ...] #\n if command == \"redent\":\n self.engine.code(user, \" \".join(args))\n self.engine.code(user, \"\\n\")\n if command == \"mktmp\":\n if len(args) == 0:\n self.msg(user, \"Usage:\")\n self.msg(user, \"mktmp <user1> [<user2> ...]\")\n else:\n map(self.engine.temp_admin, args)\n # mkadmin user1 [user2 ...] #\n if command == \"mkadmin\":\n if len(args) == 0:\n self.msg(user, \"Usage:\")\n self.msg(user, \"mkadmin <user1> [<user2> ...]\")\n else:\n map(self.engine.make_admin, args)\n # rmadmin user1 [user2 ...] #\n if command == \"rmadmin\":\n if len(args) == 0:\n self.msg(user, \"Usage:\")\n self.msg(user, \"rmadmin <user1> [<user2> ...]\")\n else:\n map(self.engine.remove_admin, args)\n # reset #\n if command == \"reset\":\n self.engine.reset()\n if command == \"timeout\":\n try:\n if len(args) == 0: raise Exception\n timeout = int(args[0])\n self.engine.set_timeout(timeout)\n except:\n self.msg(user, \"Usage:\")\n self.msg(user, \"timeout <seconds>\")\n if command == \"move\":\n if len(args) == 0:\n self.msg(user, \"Usage:\")\n self.msg(user, \"move <newchannel>\")\n else:\n self.leave(self.help_channel)\n self.engine.set_channel(args[0])\n self.help_channel = args[0]\n self.join(self.help_channel)\n if command == \"home\":\n self.leave(self.help_channel)\n self.engine.set_channel(IRC_CHANNEL)\n self.help_channel = IRC_CHANNEL\n self.join(self.help_channel)\n if command == \"admins\":\n self.msg(self.help_channel, \"[%s]\" % \", \".join(self.engine.admins()))", "async def on_message(self, message):\n if message.author.bot:\n return # Ignore all bots.\n await self.process_commands(message)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def cmd(self, message):\n pass", "def execute_message_received(self, message_received):\n pass", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def _on_op_private(self, msg):\r\n private = msg[\"private\"]\r\n handler = None\r\n try:\r\n handler = getattr(self, \"_on_op_private_\" + private)\r\n except AttributeError:\r\n self.debug(\"### _on_op_private() ignoring: private=%s\" % private)\r\n self.debug(pretty_format(msg))\r\n\r\n if handler:\r\n handler(msg)", "async def on_message(message):\r\n if message.channel.is_private: # < makes PMs show up on command line\r\n sendto = \"\"\r\n if message.author.name == bot.user.name:\r\n sendto = \"(-> {}) \".format(message.channel.user.name)\r\n print(\"{} {}{}: {}\".format(str(datetime.datetime.now())[5:19], sendto, message.author.name, message.content))\r\n if message.content.startswith('~'): # < makes commands case-insensitive\r\n a = message.content\r\n b = message.content.find(' ')\r\n if b != -1:\r\n c = message.content.replace(a[:b], a[:b].lower(), 1)\r\n else:\r\n c = message.content.lower()\r\n message.content = c\r\n await bot.process_commands(message)", "def receivePrivateMessage(self, user, message):\n pass", "def command(self, msg):\n self.cmd_pub.publish(msg)", "def execute(cmd, msg, private=False):\n cmd_dict = private_cmds if private else cmds\n if cmd in cmd_dict:\n return cmd_dict[cmd](msg)", "def on_command(server, user, command, args):", "def _command(self, *cmd, handler=None):", "def privmsg(self, user, channel, msg):\n user = user.split('!', 1)[0]\n self.logger.log(\"<%s> %s\" % (user, msg))\n\n # Check to see if they're sending me a private message\n if channel == self.nickname:\n self.on_pm(user,channel,msg)\n return\n\n # Check to see if they're asking me for help\n if msg.startswith(self.nickname + \": help\"):\n msg = \"%s: I'm a little stupid at the minute; current commands I accept are:\" % user\n self.logged_msg(channel, msg)\n msg = \"%s: is the space open?\" % self.nickname\n self.logged_msg(channel, msg)\n return\n\n # If its a message directed at me, deal with it\n if msg.startswith(self.nickname + \":\"):\n self.on_msg(user, channel,msg)\n return\n\n\n # Otherwise check to see if it is a message directed at me\n if msg.startswith(self.nickname + \":\"):\n msg = \"%s: I am a log bot\" % user\n msg += \", say \\'%s:help\\' for more information\" % self.nickname\n self.logged_msg(channel, msg)\n return", "async def on_message(message: Message):\n if message.author.bot:\n # Ignore messages from bots\n return\n elif message.content == appearance.default_prefix + \"help\":\n # Default help command\n await help.help_command(message)\n elif message.guild is None:\n # Private messages\n pass\n else:\n # Guild messages\n await client.process_commands(message)", "def command(data):\n LOG.debug(f\"Received text from {data['user']['name']}: {data['command']}\")\n\n room_id = data[\"room\"]\n user_id = data[\"user\"][\"id\"]\n\n if user_id != self.user:\n timer = self.timers_per_room.get(room_id)\n timer.reset()\n\n message = data[\"command\"]\n for user in self.players_per_room[room_id]:\n if user[\"id\"] == user_id:\n user[\"msg_n\"] += 1\n # Let's do some message mangling, but only to every second message\n if user[\"msg_n\"] % 2 == 0:\n message = message[::-1]\n message = message.upper()\n\n # emit the message to all other users\n # (the user who sent will see the original; has already seen it)\n for user in self.players_per_room[room_id]:\n if user[\"id\"] != user_id:\n self.sio.emit(\n \"text\",\n {\n \"room\": data[\"room\"],\n \"receiver_id\": user[\"id\"],\n \"message\": message,\n \"impersonate\": user_id,\n },\n callback=self.message_callback,\n )", "def on_command(self, game) -> None:\n pass", "async def on_message(message):\n\n # This line prevent the bot to answer itself\n if message.author == client.user:\n return\n\n if message.content.startswith(config.COMMAND_KEY):\n \"\"\"\n We dont want the bot to be scanning other messages that do not start with the command\n \"\"\"\n\n for command in config.registered_commands:\n if command.content == message.content or command.alt == message.content:\n await command.on_triggered(message)\n return\n\n # Handle here if the commando is not registered\n help_message = '```\\nComandos disponibles:\\n'\n for command in config.registered_commands:\n help_message += command.__str__()\n help_message += '```'\n\n await message.channel.send(\n help_message\n )", "async def on_message(self, msg: discord.Message) -> None:\n # If the prefix is set\n if self.prefix != '':\n # Check if the received message was not sent by the bot itself\n if msg.author != self.user:\n if msg.content.startswith(self.prefix):\n command = msg.content[1:].split(' ')[0]\n args = msg.content[1:].split(' ')[1:]\n # Send command with arguments to on_command function\n await self.on_command(command, args, msg)", "async def event_message(ctx):\n\n # the bot should not react to itself\n if ctx.author.name.lower() == BOT_NICK.lower():\n return\n\n # relay message to command callbacks\n await bot.handle_commands(ctx)", "def on_dccmsg(self, c, e):\n\n args = e.arguments()[0].split(\" \", 1)\n if len(args) > 0:\n self.do_command(args[0], c, e)", "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def _handle_commands(self, event, session):\n message = event['body']\n\n for regex, func, help in self._COMMANDS:\n match = regex.match(message)\n if match is not None:\n func(self, event, session=session, **match.groupdict())\n return True\n\n return False", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def execute(self, irc_c, msg, cmd):", "def privmsg(self, user, channel, msg):\n user = user.split('!', 1)[0]\n\n # Check to see if they're sending me a private message\n if channel == self.nickname:\n msg = \"I see you...\"\n self.msg(user, msg)\n return\n\n # Otherwise check to see if it is a message directed at me\n if msg.startswith(self.nickname):\n msg = \"%s: I'm a bot ding ding.\" % user\n self.msg(channel, msg)\n self.logger.log(\"<%s> %s\" % (self.nickname, msg))\n\t\n if msg.startswith(\"!monies\"):\n\t\t\ttarget = msg.split(' ')[1]\n\t\t\tamount = msg.split(' ')[2]\n\t\t\tself.money.calculate(user, target, amount)\n\t\t\tself.msg(channel, \"%s: Thank you\" % user)", "def _act_on(self, message):\n if 'PING' in self.ts.get_human_readable_message(message): # PING/PONG silliness\n self._add_to_chat_queue(self.ts.get_human_readable_message(message.replace('PING', 'PONG')))\n\n db_session = self.Session()\n command = self._get_command(message, db_session)\n if command is not None:\n user = self.ts.get_user(message)\n user_is_mod = self.ts.check_mod(message)\n if self._has_permission(user, user_is_mod, command, db_session):\n self._run_command(command, message, db_session)\n else:\n self._add_to_whisper_queue(user,\n 'Sorry {} you\\'re not authorized to use the command: !{}'\n .format(user, command[0]))\n db_session.commit()\n db_session.close()", "def privmsg(self, user, channel, msg):\n user = user.split('!', 1)[0]\n log.msg(user, channel, msg)\n if not self.responding:\n return\n # Check to see if they're sending me a private message\n # If so, the return channel is the user.\n observers = []\n if channel.lower() == self.nickname.lower():\n respondTo = user\n ss = self.defaultSession\n for s in self.findSessions(user):\n for o in s.observers:\n observers.append(o.name.decode(ss.encoding))\n else:\n respondTo = channel\n ss = self.findSessions(channel)[0]\n\n msg = msg.decode(ss.encoding)\n try:\n sentence = linesyntax.parseSentence(msg)\n except (RuntimeError, ParserSyntaxError):\n return\n\n user = user.decode(ss.encoding)\n respondTo = respondTo.decode(ss.encoding)\n req = Request(user, respondTo, msg)\n req.sentence = sentence\n\n _observers = [o.decode(ss.encoding) for o in observers]\n if sentence.command:\n # ignore people talking to other people\n if sentence.botName is not None and sentence.botName != self.nickname.lower():\n return\n\n if respondTo == user:\n response = ss.privateCommand(req, *_observers)\n else:\n response = ss.command(req)\n self.sendResponse(response)\n elif sentence.verbPhrases:\n if respondTo == user:\n response = ss.privateInteraction(req, *_observers)\n else:\n response = ss.interaction(req)\n self.sendResponse(response)\n else:\n pass", "def do_command(self, args):\n pass" ]
[ "0.6949553", "0.6780412", "0.6731864", "0.6605808", "0.6591112", "0.6566711", "0.65491927", "0.6518306", "0.65104854", "0.64871526", "0.6435231", "0.6421582", "0.6418484", "0.6351781", "0.6327283", "0.63123596", "0.6310017", "0.6302293", "0.62811166", "0.62644047", "0.62423533", "0.6223958", "0.6195988", "0.6184645", "0.61709124", "0.616448", "0.61611927", "0.6148865", "0.61253434", "0.6090772" ]
0.7241555
0
commands executed when the bot received a dcc message currently does nothing
def on_dccmsg(self, c, e): args = e.arguments()[0].split(" ", 1) if len(args) > 0: self.do_command(args[0], c, e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self, irc_c, msg, cmd):", "async def on_message(message):\n\n # This line prevent the bot to answer itself\n if message.author == client.user:\n return\n\n if message.content.startswith(config.COMMAND_KEY):\n \"\"\"\n We dont want the bot to be scanning other messages that do not start with the command\n \"\"\"\n\n for command in config.registered_commands:\n if command.content == message.content or command.alt == message.content:\n await command.on_triggered(message)\n return\n\n # Handle here if the commando is not registered\n help_message = '```\\nComandos disponibles:\\n'\n for command in config.registered_commands:\n help_message += command.__str__()\n help_message += '```'\n\n await message.channel.send(\n help_message\n )", "def cmd(self, message):\n pass", "async def on_message(message):\n #Before doing anything\n #Check to see if the message started with the command character\n if not message.content.startswith(commandCharacter):\n #If it didn't, return\n return\n \n #Ensure the bot wasn't the one who sent the message\n if message.author == client.user:\n #If it was, return\n return\n \n #Kill is checked by default (cannot be disabled)\n if message.content.startswith(commandCharacter+'kill'):\n await client.send_message(message.channel, 'Goodbye Forever...')\n await client.logout()\n os.system('stty sane')\n exit(0)\n \n #Parse through the list of all enabled commands\n for command in enabledCommands:\n #We want to ignore case when comparing the message content\n messageContent = message.content.lower()\n #If the message matches one of our commands, we will handle it\n #Requires whitespace after command name\n if messageContent.startswith(commandCharacter+command):\n await handleCommand(client, message, voicePlayerList)", "def on_privmsg(self, c, e):\n self.do_command(e.arguments()[0], c, e)", "def received_message(self, msg):\n command = int(msg[:8], base=16)\n msg = msg[8:]\n self.log.debug(\"CONTROLLER - RECEIVED COMMAND: \" + str(command))\n self.log.debug(\"CONTROLLER - MSG: \" + str([int(msg[i:i+8], base=16) for i in range(0, len(msg), 8)]))\n if command == 0:\n # 0 - opponent start the game\n self.master.add_log(\"Opponent starts the game.\")\n elif command == 1:\n # 1 - you start the game\n self.master.add_log(\"You start the game! Your turn.\")\n self.master.first = True\n self.master.new_round(False)\n elif command == 2:\n # 2 - start of your turn\n self.master.add_log(\"Your turn.\")\n self.master.new_round()\n elif command == 3:\n # 3 - opponent draws a card\n self.master.opp_hand.add_placeholder()\n self.master.add_log(\"Opponent draw a card.\")\n elif command == 4:\n # 4,x,y - opponent plays a card with x id on y spot on gameboard\n c_id = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n card = self.master.database.get_card(c_id)\n if card.card_type == \"Spell\":\n self.master.opp_sfield.set_card(card)\n else:\n self.master.opp_bfield.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent played a card {card.name}.\")\n elif command == 5:\n # 5,v,x,y - player v picks up card from x space from y spot to his hand\n # v - 0/1 - you/opponent\n # x - 0/1 - mana/battlefield\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n card = self.master.mana.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from mana zone to your hand.\")\n elif c_space == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from battle zone to your hand.\")\n elif c_player == 1:\n if c_space == 0:\n card = self.master.opp_mana.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from mana to his hand.\")\n elif c_space == 1:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from battle zone to his hand.\")\n elif command == 6:\n # 6,v,x,y - player v puts card from x space from y spot to his graveyard\n # v - 0/1 - you/opponent\n # x - 0/1/2 - mana/battlefield/hand\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"yu_mn\", c_pos)\n elif c_space == 1:\n self.master.a_move_to_graveyard(\"yu_bf\", c_pos)\n elif c_space == 2:\n card = self.master.hand[c_pos]\n self.master.a_move_to_graveyard(\"yu_hd\", c_pos)\n self.master.send_message(15, card.id) # Sent back which card was discarded\n elif c_player == 1:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"op_mn\", c_pos, False)\n elif c_space == 1:\n # Do not change to a_move_to_graveyard\n if c_pos == 5:\n card = self.master.opp_sfield.remove_card()\n else:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent's card {card.name} from battle zone was moved to his graveyard.\")\n elif command == 7:\n # 7,x,y - opponent puts y card from x space to manazone\n # x - 0/1/2/3 - hand/deck/graveyard\n c_space = int(msg[:8], base=16)\n c_id = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card {card.name} from his hand to the mana zone\")\n elif c_space == 1:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his deck to the mana zone\")\n elif c_space == 2:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_graveyard.remove_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to the mana zone\")\n elif command == 8:\n # 8,x - opponent adds card from his hand to y shield (face down)\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.add_placeholder(c_pos)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card from his hand to shields\")\n elif command == 9:\n # 9,x,y - Opponent tap/untap card on y spot in mana zone\n # x - 0/1 - tap/untap\n c_tap = bool(int(msg[:8]))\n c_pos = int(msg[8:16], base=16)\n if c_tap:\n self.master.opp_mana.untap_card(c_pos)\n else:\n self.master.opp_mana.tap_card(c_pos)\n elif command == 10:\n # 10,x - (info) opponent looks under his shield on x spot\n c_pos = int(msg[:8], base=16)\n self.master.add_log(f\"Opponent is peeking his {c_pos} shield\")\n elif command == 11:\n # 11,x,y - opponent looks under my shield/card on hand on y spot\n # x - 0/1 - hand/shield\n c_space = int(msg[:8])\n c_pos = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.hand[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} card in hand\")\n elif c_space == 1:\n card = self.master.shields[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} shield\")\n self.master.send_message(111, card.id)\n elif command == 111:\n # 111,x - \n c_id = int(msg[:8], base=16)\n # TODO: split command to separate hand and shield\n # TODO: show in the UI what the card actually is\n self.master.add_log(f\"The choosen card is {c_id}\")\n elif command == 12:\n # 12,x,y - opponent attacks your x card with his y card on the battlefield\n c_opp_pos = int(msg[:8], base=16)\n c_my_pos = int(msg[8:16], base=16)\n opp_card = self.master.opp_bfield[c_opp_pos]\n my_card = self.master.bfield[c_my_pos]\n self.master.add_log(f\"Opponent is attacking your card {my_card.name} with card {opp_card.name}.\")\n self.master.creature_attacked(c_opp_pos, c_my_pos)\n elif command == 112:\n # 112,x - returned which card you will attack\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 13:\n # 13,x,y1,y2,... - opponent attacks your shields with y card\n # x - position of creature on the board\n # ya - a-th shield attacked by this creature\n creature_pos = int(msg[:8], base=16)\n msg = msg[8:]\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n shields_string = \", \".join([str(pos) for pos in shields_pos])\n self.master.add_log(f\"Your shields at pos {shields_string} are being attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.shields_attacked(creature_pos, shields_pos)\n elif command == 113:\n # 113,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block shield attack, continue\n self.master.attack_shield()\n else:\n # Oppponent blocked with creature\n self.master.selected_shields = []\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 14:\n # 14,y1,y2,... - opponent destroys your shields\n # ya - a-th shield\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n self.master.shield_destroyed(shields_pos)\n elif command == 114:\n # 114,x - opponent picked up x shield to his hand\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up {c_pos} shield to his hand.\")\n self.master.refresh_screen()\n elif command == 214:\n # 214,x - opponent played x shield to spell/battle zone\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.add_log(f\"Opponent played a card from {c_pos} shield trigger.\")\n self.master.refresh_screen()\n elif command == 314:\n # 314 - opponent ended handling shield attack\n self.master.selected_card = []\n self.master.your_turn = 1\n elif command == 15:\n # 15 - id of the discarded card\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent discarded {card.name}\")\n self.master.refresh_screen()\n elif command == 16:\n # 16,v,x,y - x player taps/untaps a y creature\n # v - 0/1 - tap/untap\n # x - 0/1 - you/opponent\n # y - pos\n c_tap = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_tap == 0:\n # Tap\n if c_player == 0:\n # You\n self.master.bfield.set_tapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now tapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_tapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now tapped.\")\n if c_tap == 1:\n # Untap\n if c_player == 0:\n # You\n self.master.bfield.set_untapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now untapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_untapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now untapped.\")\n self.master.refresh_screen()\n elif command == 17:\n # 17,c,s1,p1,s2,p2... - opponent chooses which cards to destroy from the list\n # c - how many creatures to destoy\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_destoyed(count, target_list)\n elif command == 117:\n # 117 - opponent choosed cards and his actions ended\n self.master.post_destroy_creatures()\n elif command == 18:\n # 18,x - opponent adds card x from his deck to hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his deck to his hand\")\n elif command == 19:\n # 19,x - opponent adds card x from his graveyard to his hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.remove_card(card)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to his hand\")\n elif command == 20:\n # 20,c,s1,p1,s2,p2... - opponent chooses which cards to move to manazone from the list\n # c - how many creatures to sacrafice\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_put_to_mana(count, target_list)\n elif command == 120:\n # 120 - opponent choosed cards and his actions ended\n self.master.post_sacrafice_creatures()\n elif command == 21:\n # 21,y,x - player x puts card from y pos on battlefield zone to manazone\n # x - 0/1 - opponent/you\n # y - position\n c_player = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n if c_player == 0:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent moved card {card.name} from his battlezone to the mana zone\")\n elif c_player == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.mana.add_card(card)\n self.master.add_log(f\"Opponent moved your card {card.name} from battlezone to your mana zone\")\n elif command == 22:\n # 22,x - player x puts card from y pos on battlefield zone to hand\n # x - position\n c_pos = int(msg[:8], base=16)\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up card {card.name} from his battlezone to his hand\")\n elif command == 23:\n # 23 - opponent added an z effect to x card on y battefield\n c_pos = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_effect_name = int(msg[16:24], base=16)\n effect_name = EffectName(c_effect_name).name\n if c_player == 0:\n # to the opponent\n card = self.master.opp_bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to his card {card.name}\")\n elif c_player == 1:\n # to the player\n card = self.master.bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to your card {card.name}\")\n elif command == 24:\n # 24,x - opponent attacks you directly with x card\n # x - position of creature on the board\n creature_pos = int(msg[:8], base=16)\n self.master.add_log(f\"You are being directly attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.directly_attacked(creature_pos)\n elif command == 124:\n # 124,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block, you win\n self.master.win()\n else:\n # Oppponent blocked with creature\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 25:\n # 25 - opponent won the game\n self.master.lose(True)\n elif command == 26:\n # 26 - opponent lost the game\n self.master.win(True)\n elif command == 27:\n # 27 - start of the next turn\n self.master.turn_count += 1\n self.master.add_turn_info()", "def _custom_privmsg(self, data) :\n try:\n self.socket.send(data)\n if self.dcctype == \"chat\":\n self.socket.send(\"\\n\")\n log('Sent %d bytes of data over DCC' % len(data), 2)\n\n except socket.error:\n # Ouch!\n self.disconnect(\"Connection reset by peer.\")", "async def event_message(ctx):\n\n # the bot should not react to itself\n if ctx.author.name.lower() == BOT_NICK.lower():\n return\n\n # relay message to command callbacks\n await bot.handle_commands(ctx)", "def exec_commands(com):\n reply = ''\n if com is not None:\n if com == commands[0]:\n tables = db.create_tables(houses, from_)\n if tables == True:\n for j in range(len(c_responses[0]) - 1):\n# can use join and split functions to create softer code?? at least in future instances\n bot.send_message(c_responses[0][j], from_)\n else:\n reply = c_responses[0][(len(c_responses[0])-1)]\n elif com == commands[1]:\n house_info = db.house_info(from_)\n # Add feautures to find highest scoring house and return number of members\n reply = \"Houses:\\n\"\n for house in house_info:\n reply += house[1] + \"\\n\"\n if house[2] != None:\n reply += f\"Score: {house[2]}pts\\n\\n\"\n else:\n reply += f\"Score: 0pts\\n\\n\"\n elif com.startswith(commands[2]):\n instructions = com.split()\n id = 0\n info = user_query()\n user_id = info['user']['id']\n check = db.check_admin(from_, user_id)\n if check and check != 'not sorted':\n for house in houses:\n id += 1\n if house == instructions[1]:\n score = db.update_house_score(id, instructions[2], from_)\n reply = f\"{instructions[1]} new score is {score}\"\n else:\n reply = \"You have no power over me! PS:(if you are an admin use the /appoint me command to be recognised as such)\"\n\n\n elif com == commands[3]:\n username = item['message']['from']['username']\n user_id = item['message']['from']['id']\n num = db.add_member_info(username, from_, user_id)\n if num[1]:\n reply = f\"Better be... {houses[num[0]-1]}\"\n else:\n print(num[0][0])\n reply = f\"I stand by my decision, {houses[num[0][0]-1]} will help you on the way to greatness!\"\n elif com == commands[4]:\n m_list = db.member_info(from_)\n reply = str(m_list)\n elif com == commands[5]:\n info = user_query()\n username = info['user']['username']\n m_info = db.member_info(from_, username)\n reply = f\"\"\"\n Username: {m_info[2]}\\nHouse: {houses[m_info[3]]}\\nStatus: {m_info[4]}\\nScore: {m_info[5]}\\n\n \"\"\"\n elif com == commands[6]:\n info = user_query()\n username = info['user']['username']\n user_id = info['user']['id']\n status_info = info['status']\n if status_info == 'creator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Headmaster')\n reply = f\"Rise Headmaster {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Headmaster\"\n elif status_info == 'administrator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Professor')\n reply = f\"Hence forth you shall be known as Professor {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Professor\"\n else:\n reply = 'Desist pretender! Only the entitled may command me so!'\n elif com == commands[7]:\n for command in commands:\n reply += f'{command}\\n'\n print(reply)\n \n return reply", "def on_dccchat(self, c, e):\n\n self.__privMsg(c, e, FEATURE_DISABLED)\n return\n\n # check parameters\n if len(e.arguments()) != 2:\n return\n\n # retrieve parameters\n args = e.arguments()[1].split()\n if len(args) == 4:\n try:\n address = ip_numstr_to_quad(args[2])\n port = int(args[3])\n except ValueError:\n return\n\n DCCQueue.append((address, port))\n self.__privMsg(c, e, DCC_CONNECTION_QUEUED)\n\n #try:\n # con = self.dcc_connect(address, port)\n # self.__IpToUser[self.getIpStringByDCCConnection(con)] = {\"auth\": NOT_AUTHED, \"userid\": 0}\n #except DCCConnectionError, error:\n # print 'DCC Connection failed: %s:%s' % (address, port)\n # print error", "def handleCommand(self,message):\n command = message[0]\n pcaId = None\n if len(message) > 1:\n pcaId = message[1].decode()\n if command == codes.ping:\n self.commandSocket.send(codes.ok)\n elif command == codes.pcaAsksForDetectorStatus:\n pcaId = message[1].decode()\n if pcaId and pcaId in self.PCAs:\n if pcaId in self.pcaConfigTag:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode(),self.pcaConfigTag[pcaId].encode()])\n else:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode()])\n elif command == codes.addPartition:\n data = partitionDataObject(json.loads(message[1].decode()))\n self.addPartition(data)\n self.commandSocket.send(codes.ok)\n elif command == codes.deletePartition:\n pcaId = message[1].decode()\n self.deletePartition(pcaId)\n self.commandSocket.send(codes.ok)\n elif command == codes.remapDetector:\n detectorId = message[2].decode()\n if message[1] == codes.removed:\n self.abortFunction(self.detectorMapping[detectorId])\n del self.detectorMapping[detectorId]\n else:\n pcaId = message[1].decode()\n self.abortFunction(pcaId)\n if detectorId in self.detectorMapping:\n self.abortFunction(self.detectorMapping[detectorId])\n self.detectorMapping[detectorId] = pcaId\n self.commandSocket.send(codes.ok)\n #transitions\n elif command.decode() == GlobalSystemTransitions.configure:\n conf = None\n if len(message) > 2:\n conf = configObject(json.loads(message[2].decode()))\n if self.isPCAinTransition[pcaId]:\n self.commandSocket.send(codes.busy)\n elif not self.StateMachineForPca[pcaId].checkIfPossible(GlobalSystemTransitions.configure) or not conf:\n self.commandSocket.send(codes.error)\n print(\"error\")\n else:\n self.commandSocket.send(codes.ok)\n self.isPCAinTransition[pcaId] = True\n workThread = threading.Thread(name=\"worker\", target=self.configure, args=(pcaId,conf))\n workThread.start()\n elif command.decode() == GlobalSystemTransitions.abort:\n if pcaId and pcaId in self.PCAs:\n self.abortFunction(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n self.commandSocket.send(codes.error)\n elif command.decode() == GlobalSystemTransitions.reset:\n self.reset(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n #command unknown\n return False\n return True", "async def command(self,ctx):\n await ctx.send(\"Yes this is a command.\")", "async def çıkış(con):\r\n check=str(con.message.channel)\r\n if check == 'Direct Message with {}'.format(con.message.author.name):#COMMAND USED IN DM\r\n await bot.send_message(con.message.channel,\"**You must be in a `server voice channel` to use this command**\")\r\n\r\n if check != 'Direct Message with {}'.format(con.message.author.name):#COMMAND NOT IN DM\r\n \r\n # IF VOICE IS NOT CONNECTED\r\n if bot.is_voice_connected(con.message.server) == False:\r\n await bot.send_message(con.message.channel,\"**Bot kanala bağlanmamış !**\")\r\n\r\n # VOICE ALREADY CONNECTED\r\n if bot.is_voice_connected(con.message.server) == True:\r\n bot.loop.create_task(queue_songs(con,True))", "async def on_message(message):\n if message.author == client.user or not is_enabled(message):\n return\n # if message.author.id != 169896955298709505:\n # return\n # if message.author.id == 244573404059926529:\n # return\n # log_chat(\"chatlogs\", message)\n # if message.content.lower().startswith('hello') or message.content.lower().startswith(\"hey\"):\n # await message.channel.send('Hello {} !'.format(message.author.mention))\n # return\n\n await client.process_commands(message)\n await react_to_msg(message, client)\n await copypasta_on_msg(message)\n await blacklistCheck(message)\n await translate(message)", "async def on_message(self, message):\n if message.author.bot:\n return # Ignore all bots.\n await self.process_commands(message)", "async def on_message(message):\r\n if message.channel.is_private: # < makes PMs show up on command line\r\n sendto = \"\"\r\n if message.author.name == bot.user.name:\r\n sendto = \"(-> {}) \".format(message.channel.user.name)\r\n print(\"{} {}{}: {}\".format(str(datetime.datetime.now())[5:19], sendto, message.author.name, message.content))\r\n if message.content.startswith('~'): # < makes commands case-insensitive\r\n a = message.content\r\n b = message.content.find(' ')\r\n if b != -1:\r\n c = message.content.replace(a[:b], a[:b].lower(), 1)\r\n else:\r\n c = message.content.lower()\r\n message.content = c\r\n await bot.process_commands(message)", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "def cmd(self, context, message):\r\n return True", "def on_message(data):\n pass", "def test_receivedMOTD(self):\n lines = [\n \":host.name 375 nickname :- host.name Message of the Day -\",\n \":host.name 372 nickname :- Welcome to host.name\",\n \":host.name 376 nickname :End of /MOTD command.\",\n ]\n for L in lines:\n self.assertEqual(self.client.calls, [])\n self.client.dataReceived(L + \"\\r\\n\")\n\n self.assertEqual(\n self.client.calls,\n [\n (\n \"receivedMOTD\",\n {\n \"motd\": [\n \"host.name Message of the Day -\",\n \"Welcome to host.name\",\n ]\n },\n )\n ],\n )\n\n # After the motd is delivered, the tracking variable should be\n # reset.\n self.assertIdentical(self.client.motd, None)", "def on_command(self, game) -> None:\n pass", "def disable_cmd_ended_cb(self, event):\n this_server = TangoServerHelper.get_instance()\n if event.err:\n log_msg = (\n f\"{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}\"\n )\n self.logger.error(log_msg)\n this_server.write_attr(\"activityMessage\", log_msg, False)\n else:\n log_msg = f\"{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}\"\n self.logger.info(log_msg)\n this_server.write_attr(\"activityMessage\", log_msg, False)", "async def on_message(self, msg: discord.Message) -> None:\n # If the prefix is set\n if self.prefix != '':\n # Check if the received message was not sent by the bot itself\n if msg.author != self.user:\n if msg.content.startswith(self.prefix):\n command = msg.content[1:].split(' ')[0]\n args = msg.content[1:].split(' ')[1:]\n # Send command with arguments to on_command function\n await self.on_command(command, args, msg)", "def on_message(mosq, obj, msg):\n print(msg.topic + \" - \" + str(msg.payload))\n nodes = msg.topic.split('/')\n global timeoutstarted\n global timeoutdisplayblocks\n global myLcdManager\n if nodes[0]=='clients':\n if nodes[2]=='configure':\n if str(msg.payload) == 'reboot':\n os.system('reboot')\n else:\n myLcdManager = lcd_manager.LcdManager(sortedlist, config)\n processRoundConfig(str(msg.payload))\n timeoutstarted = 0.0\n timeoutdisplayblocks = 0\n elif nodes[2] == 'instructions':\n myLcdManager.display(str(msg.payload), 20, \"0\")\n #start timer?\n if 'timeout' in roundconfig and roundconfig['timeout'] > 0.0:\n resetBlocks = True\n timeoutstarted = time.time()\n elif nodes[2] == 'timeout':\n roundconfig['timeout'] = float(str(msg.payload))\n elif nodes[2] in controlids:\n ctrlid = nodes[2]\n if nodes[3] == 'enabled':\n if str(msg.payload) == \"0\":\n roundconfig['controls'][ctrlid]['enabled'] = False\n #switch it off\n myLcdManager.display(\" \", config['local']['controls'][ctrlid]['display']['width'], ctrlid)\n else:\n roundconfig['controls'][ctrlid]['enabled'] = True\n #switch it on\n myLcdManager.display(roundconfig['controls'][ctrlid]['name'], config['local']['controls'][ctrlid]['display']['width'], ctrlid)\n elif nodes[3] == 'name':\n if str(msg.payload) == '':\n myLcdManager.clear(ctrlid)\n else:\n myLcdManager.display(str(msg.payload), config['local']['controls'][ctrlid]['display']['width'], ctrlid, False)\n elif nodes[0] == 'server':\n if nodes[1] == 'ready':\n mess = str(msg.payload)\n if mess == 'started':\n myLcdManager = lcd_manager.LcdManager(sortedlist, config)\n client.publish(\"server/register\", json.dumps(config['interface']))\n elif mess == 'ready':\n global hasregistered\n if not hasregistered:\n hasregistered = True\n client.publish(\"server/register\", json.dumps(config['interface']))\n elif mess == 'poweroff':\n os.system('poweroff')", "def on_ctcp(self, raw_msg, source, msg, **kwargs):", "def Execute(data):\n if data.IsChatMessage():\n if Parent.HasPermission(data.User, \"Caster\", \"\") and MySet.Caster:\n return\n\n wordslist = MySet.Blacklist.split(\",\")\n for word in wordslist:\n if data.UserName.lower() == word.lower():\n return\n\n process(data.Message, data.UserName)\n\n global COUNT\n\n if (COUNT>=MySet.EmoteTreshold):\n EnqueueAudioFile(MySet.Sound)\n Parent.SendStreamMessage(MySet.EventTriggerMessage)\n reset()\n\n lastMessage = time.time()", "async def on_message(message: Message):\n if message.author.bot:\n # Ignore messages from bots\n return\n elif message.content == appearance.default_prefix + \"help\":\n # Default help command\n await help.help_command(message)\n elif message.guild is None:\n # Private messages\n pass\n else:\n # Guild messages\n await client.process_commands(message)", "def cmnd(self, event, name, cmnd):\n\n name = name.lower()\n bot = self.byname(name)\n if not bot:\n return 0\n from gozerbot.eventbase import EventBase\n j = EventBase(event)\n j.txt = cmnd\n q = Queue.Queue()\n j.queues = [q]\n j.speed = 3\n start_new_thread(plugins.trydispatch, (bot, j))\n result = waitforqueue(q)\n if not result:\n return\n res = [\"[%s]\" % bot.name, ]\n res += result\n event.reply(res)", "def on_message_received(ch, method, properties, body):\n # the body contains the command flag followed by a colon ':' and the message for the drone\n # decode the body to utf8\n received_bytes = body.decode('utf-8')\n # split the received_bytes to get the command _flag and message\n recieved_message = received_bytes.split(':')\n # since rabbit mq body is a byte\n if (str(recieved_message[0]) == \"c01\"):\n # c01 - command center orders the drone to deliver a item\n print(\"Order Received from the command center to deliver an item to the following address \\n\", str(\n recieved_message[1]))\n time.sleep(2)\n # print in the drone's console that the item has been lift off\n print('\\nLifting off the Item to the delivery address.')\n print('\\nUpdating Status to the command centre ......')\n # Assume the drone has reached the delivery address . Now send a\n # message to the warehouse command center that it has reached the\n # delivery area\n time.sleep(5)\n rpc_sendback(\"c02\")\n # Assume the drone has delivered the item and issue the status message\n # to the command center\n time.sleep(5)\n rpc_sendback(\"c03\")\n # #Assume the drone has reached the parking spot and issue the message to the command center that is available for next instruction\n time.sleep(5)\n rpc_sendback(\"c04\")\n\n else:\n print(\"Received Instruction from Warehouse \" +\n str(recieved_message[1]))\n channel.basic_ack(delivery_tag=method.delivery_tag)\n # channel.start_consuming()", "def commands():\n pass" ]
[ "0.6557355", "0.64251477", "0.62862307", "0.60895944", "0.60687983", "0.6026806", "0.5992381", "0.5973637", "0.59235543", "0.5915687", "0.5910744", "0.59014237", "0.585649", "0.5838838", "0.579819", "0.57697064", "0.575427", "0.574659", "0.5727791", "0.567641", "0.5660974", "0.5658647", "0.5654163", "0.5646824", "0.5646128", "0.56438935", "0.5634747", "0.5634653", "0.56327146", "0.56313884" ]
0.69047314
0
returns the userid of the connected user on a DCCConnection
def __getUserIdByDCCConnection(self, c): try: UserId = self.__IpToUser[self.getIpStringByDCCConnection(c)]['userid'] if UserId > 0: return UserId else: return NOT_AUTHED except KeyError: return NOT_AUTHED
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getUserID(self):\n\t\treturn self.UserID", "def _getLoggedinUserId(self):\n securityManager = getSecurityManager()\n return securityManager.getUser()._login", "def get_user_id():\n csc_name = get_user_csc_name()\n if csc_name:\n return csc_name\n haka_id = get_user_haka_identifier()\n if haka_id:\n return haka_id\n return None", "def __authUser(self, c, e):\n try:\n UserId = self.__database.getUserIdByBotKey(self.getParameterListByEvent(e)[0]);\n self.__IpToUser[self.getIpStringByDCCConnection(c)]['userid'] = int(UserId)\n return UserId\n except IndexError:\n return 0;", "def user_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_id\")", "def get_userid(self):\n return util.kbase_env.user", "def get_user_id(self):\n return self.id_user", "def _get_user(self, call_info):\n unique_name = call_info['sender']\n uid = self._dbus_proxy.GetConnectionUnixUser(unique_name)\n return pwd.getpwuid(uid).pw_name", "def get_uid(username):\n\t\tif username is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from auth_user WHERE username=%s\" % (username)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None", "def get_user_id():\n return os.getuid()", "def get_userid():\n return _userid()", "def get_user_id(self):\n raise NotImplementedError", "def user_id(self):\n # type: () -> string_types\n return self._user_id", "def user_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"user_id\")", "def fetch_current_user_id(s):", "def user_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def getId(self):\n return self.getUserName()", "def getId(self):\n return self.getUserName()", "def userid(self):\n mtool = getToolByName(self.context, 'portal_membership')\n return mtool.getAuthenticatedMember().getId()", "def _get_unknown_userid(self):\n cursor = self.conn.cursor()\n unknown_user_str = dbtypes.User.null\n cursor.execute(\"select id from users where uniqueid='%s'\" % unknown_user_str)\n return cursor.fetchone()[0]", "def get_id(self):\r\n return self.username", "def user_id(self) -> str:\n return self._user_id", "def user_id(self) -> str:\n return self._user_id", "def user_id(self):\n return self.status.user[\"id\"]", "def get_id(self):\n return self.username" ]
[ "0.6915114", "0.66406924", "0.6616438", "0.65946686", "0.6594121", "0.659404", "0.65924335", "0.6588098", "0.6537157", "0.64954484", "0.64458466", "0.6439603", "0.6431099", "0.6419845", "0.63970035", "0.6389727", "0.637988", "0.637988", "0.637988", "0.637988", "0.637988", "0.6375318", "0.6375318", "0.63499457", "0.63481563", "0.6340589", "0.6339289", "0.6339289", "0.63360405", "0.6316396" ]
0.80359054
0
execute the command given by an event
def do_command(self, command, c, e): # get command type cmdtype = self.__resolveCommandType(command, e) # ensure the cmd is valid if self.__commandExists(command, cmdtype): try: # only if command is registered if self.__commandHandlers[cmdtype].has_key(command): # check for recovered db if EVENT_MCX_DATABASE_RECOVERED.isSet(): self.__databaseAvailable = True # if database required but not available if self.__commandHandlers[cmdtype][command]['db'] == True and not self.__databaseAvailable: # tell the user self.__privMsg(c, e, DATABASE_SERVER_NOT_AVAILABLE) # otherwise execute command else: self.__commandHandlers[cmdtype][command]['func'](c, e) # command not registered, tell the user else: self.__privMsg(c, e, (COMMAND_NOT_FOUND % command)) # database was set, but is not available anymore except NoDatabaseException, (error): self.__databaseAvailable = False self.__privMsg(c, e, DATABASE_CONNECTION_INTERRUPTED) # fire event if not EVENT_MCX_DATABASE_LOST.isSet(): EVENT_MCX_DATABASE_LOST.set() # command does not exist else: self.__privMsg(c, e, (COMMAND_NOT_FOUND % command))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _command(self, *cmd, handler=None):", "def execute(self, command_name, *args):\n if command_name in self._commands.keys():\n self._history_position += 1\n self._commands[command_name].execute(args)\n if len(self._history) == self._history_position:\n # This is a new event in hisory\n self._history.append((time.time(), command_name, args))\n else:\n # This occurs if there was one of more UNDOs and then a new\n # execute command happened. In case of UNDO, the history_position\n # changes, and executing new commands purges any history after\n # the current position\"\"\"\n self._history = self._history[:self._history_position+1]\n self._history[self._history_position] = {\n time.time(): [command_name, args]\n }\n else:\n print(f\"Command [{command_name}] not recognised\")", "def ConsoleRun(self, command, sender):\n pass", "def execute_command(self, command):\n raise NotImplementedError", "def send_event(self, event):\n cmd = \"event \" + event\n self.mgen_pipe.Send(cmd)", "def on_command(server, user, command, args):", "def do_command(self, args):\n pass", "def execute_command(self):\n raise Exception(\"Not implemented\")", "def event_queue_proc(self,event):\r\n event()", "def command():\n pass", "def run(self, event):\n pass", "def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover", "def on_dccmsg(self, c, e):\n\n args = e.arguments()[0].split(\" \", 1)\n if len(args) > 0:\n self.do_command(args[0], c, e)", "def process_command(self, command):\n\t\tif not Mover.executing_action:\n\t\t\tcmd = command.split(' ')[0]\n\t\t\ttry:\n\t\t\t\tparam = float(command.split(' ')[1])\n\t\t\texcept:\n\t\t\t\tparam = None\n\t\t\tfinally:\n\t\t\t\tMover.executing_action = True\n\t\t\t\t#Load sets the thread's run target and parameters\n\t\t\t\tself.action_thread.load(getattr(self, cmd), param)\n\t\t\t\t#spawn an action thread\n\t\t\t\tself.action_thread.run()\n\t\t\t\tMover.executing_action = False", "def execute_event(self, level):\n\t\tif self.text_set:\n\t\t\tdialog_set = self.build_dialog_set(self.text_set)\n\t\t\tevent = GameEvent([dialog_set[0]])\n\t\t\tevent.execute(level)", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "async def terminal(event):\r\n command = utils.raw(event.message)\r\n await event.edit(f\"**Running command:**\\n`{command}`\")\r\n result = subprocess.getoutput(command)\r\n await event.edit(f\"**Running command:**\\n`{command}`\\n**Result:**\\n`{result}`\")", "def runCommand(command):\n None", "def do(self, line): \n self.interface.onecmd(line)", "def process_event(self, event):\n options = {\n Actions.Spawned: self.process_spawned_event,\n Actions.Walked: self.process_walked_event,\n Actions.Ate: None,\n Actions.Eaten: self.process_died_event,\n Actions.Mitosed: None,\n Actions.Died: self.process_died_event,\n Actions.Expired: self.process_died_event,\n Actions.NoAction: None,\n }\n if options[event.action] is not None:\n print(event)\n print('-'*32)\n options[event.action](event)", "def execute_cmd(self, text=None, session=None):\n if not self.ndb.ev_channel and self.db.ev_channel:\n # cache channel lookup\n self.ndb.ev_channel = self.db.ev_channel\n if self.ndb.ev_channel:\n self.ndb.ev_channel.msg(text, senders=self.id)", "def execute_cmd(self, text=None, session=None):\n if not self.ndb.ev_channel and self.db.ev_channel:\n # cache channel lookup\n self.ndb.ev_channel = self.db.ev_channel\n if self.ndb.ev_channel:\n self.ndb.ev_channel.msg(text, senders=self.id)", "def execute_cmd(self, text=None, session=None):\n if not self.ndb.ev_channel and self.db.ev_channel:\n # cache channel lookup\n self.ndb.ev_channel = self.db.ev_channel\n if self.ndb.ev_channel:\n self.ndb.ev_channel.msg(text, senders=self.id)", "def register_events():\n return [Events.Command(\"example_command\")]", "def execute(self) -> None:\n self.command(self.target)", "def process_event(event, device_id):\n print(event)\n if event.type == EventType.ON_CONVERSATION_TURN_STARTED:\n adjustvolume('30')\n subprocess.Popen([\"aplay\", \"/opt/RPIGassistant/audio-files/Listening.wav\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n GPIO.output(5,GPIO.HIGH)\n led.ChangeDutyCycle(100)\n print()\n\n if (event.type == EventType.ON_RESPONDING_STARTED and event.args and not event.args['is_error_response']):\n GPIO.output(5,GPIO.LOW)\n GPIO.output(6,GPIO.HIGH)\n led.ChangeDutyCycle(50)\n\n if event.type == EventType.ON_RESPONDING_FINISHED:\n GPIO.output(6,GPIO.LOW)\n GPIO.output(5,GPIO.HIGH)\n led.ChangeDutyCycle(100)\n print()\n\n if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT):\n say(random.choice(['sorry, i did not hear what you said', \n 'sorry, i did not hear anything', \n 'pardon', \n 'sorry, have you said something?']))\n restorevolume()\n print()\n\n if (event.type == EventType.ON_NO_RESPONSE):\n restorevolume()\n print()\n\n if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and\n event.args and not event.args['with_follow_on_turn']):\n restorevolume()\n GPIO.output(5,GPIO.LOW)\n led.ChangeDutyCycle(0)\n print()\n\n if event.type == EventType.ON_DEVICE_ACTION:\n for command, params in process_device_actions(event, device_id):\n print('Do command', command, 'with params', str(params))", "def trigger(self, type, event):", "def callback(self, event):\n button = event[\"button\"]\n\n cmd = self._callbacks.get(self._uuidstr(self.global_id, button), None)\n cmd = self._callbacks.get(self._uuidstr(event[\"name\"], button), cmd)\n cmd = self._callbacks.get(self._uuidstr(event[\"instance\"], button), cmd)\n\n if cmd is None:\n return\n if callable(cmd):\n cmd(event)\n else:\n bumblebee.util.execute(cmd, False)", "def execute_command(VD):\n \n command = VD['command']\n print(f\"command = {command}\")\n \n if command == \"cmd_inference_exit\":\n VD['exit'] = True\n elif command == \"cmd_inference_stop\":\n stop_recording(VD)\n elif command == \"cmd_inference_kill\":\n kill_inference_process(VD)\n stop_recording(VD)\n elif command in VD['command_dict']:\n function, params = VD['command_dict'][command]\n function(*params)\n elif command == \"cmd_clear_terminal\":\n VD['terminal_output'] = \"\"\n elif command == \"cmd_clear_all_text\":\n VD['fixed_text'] = \"\"\n elif command == \"cmd_inference_break\":\n pass\n else:\n VD['terminal_output'] += f\"Command <{command}> is not known.\\n\"\n \n VD['command'] = \"\"" ]
[ "0.671452", "0.6637844", "0.6502819", "0.6483133", "0.6397224", "0.63965106", "0.6337618", "0.63246924", "0.6317841", "0.6308281", "0.62820333", "0.62799823", "0.62768865", "0.62648654", "0.6263708", "0.62426805", "0.6224748", "0.6214349", "0.62103915", "0.6210015", "0.62017584", "0.6193798", "0.6193798", "0.6193798", "0.6187178", "0.61813873", "0.6166011", "0.6158859", "0.6147377", "0.61447215" ]
0.6656607
1
checks whether a given command is registered on the given type
def __commandExists(self, command, cmdtype): try: # method exists if hasattr(self, self.__getFullCommandName(command, cmdtype)): # command handler type exists if self.__commandHandlerTypeExists(cmdtype): return True else: return False else: return False # any key does not exist except KeyError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __commandHandlerTypeExists(self, type):\n return self.__commandHandlers.has_key(type)", "def is_of_type(cmd):\r\n raise NotImplementedError()", "def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False", "def command_registered(self, command: str) -> bool:\n return command in self._commands", "def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes", "def has_command(self, command):\n for pbt in self._plugins.values():\n if pbt.command == command:\n return True\n return False", "def is_valid_command(args):\n if args.command is not None:\n return True\n return False", "def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False", "def validate_command(command):\n return command in list(VALID_COMMANDS.keys())", "def _is_command(obj, cli):\n if not inspect.isfunction(obj) or obj.__name__.startswith(\"_\"):\n return False\n return hasattr(obj, \"__module__\") and obj.__module__ == cli.__name__", "def has_command_with_name(self, command_name):\n return command_name in self.commands", "def is_valid_command(command):\n return is_get(command) or is_insert(command) or is_update(command) or is_delete(command) or is_showall(command) or is_search(command)", "def check_commands(self):\n pass", "def test_command_method_exists(self):\n motor_shield = MotorShield(self.options, self.connection)\n\n for command in motor_shield.commands:\n self.assertIn(command, dir(motor_shield))", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)", "def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True", "def is_registered(self, type):\n attr = self._type_to_attr(type)\n return getattr(self, attr, None) is not None", "def is_cmd(self, name):\n \n return name in self.cmds", "def cmd_type(args):", "def is_valid_command(command):\n # TODO(etscrivner): Eventually we'd like to construct this dynamically from\n # a list of all available commands\n valid_commands = [\n 'add', 'append', 'decr', 'delete', 'flush_all', 'get', 'gets', 'incr',\n 'prepend', 'quit', 'replace', 'set', 'stats', 'verbosity', 'version',\n ]\n\n if not command:\n return False\n\n parts = command.split('\\r\\n')\n command_parts = parts[0].split(' ')\n\n command = command_parts[0]\n return command.strip().lower() in valid_commands", "def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True", "def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass", "def is_configured(command):\n return command in COMMANDS", "def is_match(self, command_bytes):", "def _msg_is_command(self, msg):\n return isinstance(msg, dict)", "def _is_push_command(self):\n return self._match_memory_pattern(\"push\")", "def _known_command(self, command, do_command):\n result = self.known_commands.get(command)\n if result is not None:\n return result\n translated_command = self.gtp_aliases.get(command, command)\n try:\n response = do_command(\"known_command\", translated_command)\n except BadGtpResponse:\n known = False\n else:\n known = (response == 'true')\n self.known_commands[command] = known\n return known", "def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None", "def expects_result(self, command):\n return isinstance(command, (self.package(\"Syntax\").Operator,\n self.package(\"Syntax\").Formule))" ]
[ "0.75897187", "0.7336485", "0.73289627", "0.72346324", "0.70128125", "0.6790929", "0.66802984", "0.66593987", "0.665149", "0.6643384", "0.66387373", "0.6612062", "0.65696615", "0.6531041", "0.64635706", "0.64447665", "0.64256", "0.64225155", "0.63799554", "0.6340413", "0.63357025", "0.6278686", "0.6262686", "0.62485087", "0.62267786", "0.61959296", "0.61649007", "0.6137309", "0.61344755", "0.6114829" ]
0.77672505
0
resolves the command type by an event and a command
def __resolveCommandType(self, command, e): # check for existing DCC Connection try: if self.__IpToUser[e.source()]['auth'] == NOT_AUTHED: return 'not_authed_dcc' else: return 'authed_dcc' # DCC Connection does not exist except KeyError: if not is_channel(e.target()): return 'query' else: # defaults to channel return 'channel'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def type_command(ctx, name_from, name_to):", "def __resolveCommandFunction(self, command, e):\n return self.__getFullCommandName(command, self.__resolveCommandType(command, e))", "async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def _command(self, *cmd, handler=None):", "def decompose_commands(command_str: str):\n\n new_command = Command.from_string(command_str)\n\n post_event(\"decompose_new_command\", new_command)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def __init__(self, type, _command_input):\n self._command_input = _command_input\n \"\"\"\n Represents the command\n \"\"\"\n self._command = type", "def do_command(self, command, c, e):\n # get command type\n cmdtype = self.__resolveCommandType(command, e)\n\n # ensure the cmd is valid\n if self.__commandExists(command, cmdtype):\n try:\n # only if command is registered\n if self.__commandHandlers[cmdtype].has_key(command):\n # check for recovered db\n if EVENT_MCX_DATABASE_RECOVERED.isSet():\n self.__databaseAvailable = True\n\n # if database required but not available\n if self.__commandHandlers[cmdtype][command]['db'] == True and not self.__databaseAvailable:\n # tell the user\n self.__privMsg(c, e, DATABASE_SERVER_NOT_AVAILABLE)\n # otherwise execute command\n else:\n self.__commandHandlers[cmdtype][command]['func'](c, e)\n # command not registered, tell the user\n else:\n self.__privMsg(c, e, (COMMAND_NOT_FOUND % command))\n # database was set, but is not available anymore\n except NoDatabaseException, (error):\n self.__databaseAvailable = False\n self.__privMsg(c, e, DATABASE_CONNECTION_INTERRUPTED)\n # fire event\n if not EVENT_MCX_DATABASE_LOST.isSet():\n EVENT_MCX_DATABASE_LOST.set()\n # command does not exist\n else:\n self.__privMsg(c, e, (COMMAND_NOT_FOUND % command))", "def handleCommand(self,message):\n command = message[0]\n pcaId = None\n if len(message) > 1:\n pcaId = message[1].decode()\n if command == codes.ping:\n self.commandSocket.send(codes.ok)\n elif command == codes.pcaAsksForDetectorStatus:\n pcaId = message[1].decode()\n if pcaId and pcaId in self.PCAs:\n if pcaId in self.pcaConfigTag:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode(),self.pcaConfigTag[pcaId].encode()])\n else:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode()])\n elif command == codes.addPartition:\n data = partitionDataObject(json.loads(message[1].decode()))\n self.addPartition(data)\n self.commandSocket.send(codes.ok)\n elif command == codes.deletePartition:\n pcaId = message[1].decode()\n self.deletePartition(pcaId)\n self.commandSocket.send(codes.ok)\n elif command == codes.remapDetector:\n detectorId = message[2].decode()\n if message[1] == codes.removed:\n self.abortFunction(self.detectorMapping[detectorId])\n del self.detectorMapping[detectorId]\n else:\n pcaId = message[1].decode()\n self.abortFunction(pcaId)\n if detectorId in self.detectorMapping:\n self.abortFunction(self.detectorMapping[detectorId])\n self.detectorMapping[detectorId] = pcaId\n self.commandSocket.send(codes.ok)\n #transitions\n elif command.decode() == GlobalSystemTransitions.configure:\n conf = None\n if len(message) > 2:\n conf = configObject(json.loads(message[2].decode()))\n if self.isPCAinTransition[pcaId]:\n self.commandSocket.send(codes.busy)\n elif not self.StateMachineForPca[pcaId].checkIfPossible(GlobalSystemTransitions.configure) or not conf:\n self.commandSocket.send(codes.error)\n print(\"error\")\n else:\n self.commandSocket.send(codes.ok)\n self.isPCAinTransition[pcaId] = True\n workThread = threading.Thread(name=\"worker\", target=self.configure, args=(pcaId,conf))\n workThread.start()\n elif command.decode() == GlobalSystemTransitions.abort:\n if pcaId and pcaId in self.PCAs:\n self.abortFunction(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n self.commandSocket.send(codes.error)\n elif command.decode() == GlobalSystemTransitions.reset:\n self.reset(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n #command unknown\n return False\n return True", "def add_command(self, name, command):\n if command['type'] == 'topic':\n if 'deadman_buttons' not in command:\n command['deadman_buttons'] = []\n command['buttons'] = command['deadman_buttons']\n if 'deadman_axes' not in command:\n command['deadman_axes'] = []\n command['axes'] = command['deadman_axes']\n elif command['type'] == 'action':\n if 'action_goal' not in command:\n command['action_goal'] = {}\n elif command['type'] == 'service':\n if 'service_request' not in command:\n command['service_request'] = {}\n self.command_list[name] = command", "def processCommand(self, command, args):\n\n commandMap = { \n \"new\" : self.createNewList,\n \"view\" : self.trelloView,\n \"add\" : self.trelloAddCard, \n \"remove\" : self.trelloDeleteCard, \n }\n\n if command not in commandMap: return \">> Command not found\" \n \n return commandMap[command](args)", "def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser", "def _receive_command(self, command):\n if command.startswith('RET '):\n print(command[4:]) # Return value\n elif command.startswith('ERROR '):\n logger.error('JS - ' + command[6:].strip())\n elif command.startswith('WARN '):\n logger.warn('JS - ' + command[5:].strip())\n elif command.startswith('PRINT '):\n print(command[5:].strip())\n elif command.startswith('INFO '):\n logger.info('JS - ' + command[5:].strip())\n elif command.startswith('SET_PROP '):\n # todo: seems weird to deal with here. implement by registring some handler?\n # Should be better when we implement a more formal protocol\n _, id, name, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._set_prop_from_js(name, txt)\n elif command.startswith('SET_EVENT_TYPES '):\n _, id, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._set_event_types_js(txt)\n elif command.startswith('EVENT '):\n _, id, name, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._emit_from_js(name, txt)\n else:\n logger.warn('Unknown command received from JS:\\n%s' % command)", "def _get_command_lookup(self, command_dict):", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)", "def known_command(self, command):\n return self._known_command(command, self.do_command)", "def _process_command(self, command, opts):\n command_type = postproc.get_structure_type(command)\n if not opts.Ignore_motion and command_type == MOTION_COMMAND:\n return _process_motion_command(command, opts)\n elif not opts.Ignore_IOs and command_type == IO_COMMAND:\n return _process_io_command(command, opts)", "def _handle_bot_command(self, bot_command: BotCommand) -> str:\n try:\n player = self.ping_pong_service.get_player(bot_command.sender_id)\n except pingpong_service.PlayerDoesNotExist:\n self.ping_pong_service.add_new_player(bot_command.sender_id)\n return responses.new_player()\n\n if bot_command.command_type is None:\n return responses.unknown_command()\n elif bot_command.command_type == CommandType.HELP:\n return responses.help()\n elif bot_command.command_type == CommandType.NAME:\n if bot_command.command_value:\n success = self.ping_pong_service.update_display_name(player, bot_command.command_value.lower())\n if success:\n return responses.name_updated(bot_command.command_value.lower())\n else:\n return responses.name_taken()\n else:\n return responses.name(player.name)\n elif bot_command.command_type == CommandType.MATCH:\n return self._handle_match_command(bot_command.command_value)\n elif bot_command.command_type == CommandType.STATS:\n name = bot_command.command_value\n if name:\n try:\n rating, wins, losses, ratio = self.ping_pong_service.get_player_stats(name)\n return responses.player_stats(name, rating, ratio, wins, losses)\n except pingpong_service.PlayerDoesNotExist:\n return responses.player_does_not_exist()\n else:\n return responses.stats(\n self.ping_pong_service.get_total_matches(), self.ping_pong_service.get_leaderboard()\n )\n elif bot_command.command_type == CommandType.UNDO:\n return responses.unknown_command()\n # w_name, w_rating, l_name, l_rating = pingpong_service.undo_last_match()\n # return responses.match_undone(w_name, w_rating, l_name, l_rating)\n return responses.unknown_command()", "def _handle_command(self, command: Command) -> None:\n if isinstance(command.result, LoadLabwareResult):\n # If the labware load refers to an offset, that offset must actually exist.\n if command.result.offsetId is not None:\n assert command.result.offsetId in self._state.labware_offsets_by_id\n\n definition_uri = uri_from_details(\n namespace=command.result.definition.namespace,\n load_name=command.result.definition.parameters.loadName,\n version=command.result.definition.version,\n )\n\n self._state.definitions_by_uri[definition_uri] = command.result.definition\n\n self._state.labware_by_id[\n command.result.labwareId\n ] = LoadedLabware.construct(\n id=command.result.labwareId,\n location=command.params.location,\n loadName=command.result.definition.parameters.loadName,\n definitionUri=definition_uri,\n offsetId=command.result.offsetId,\n displayName=command.params.displayName,\n )\n\n elif isinstance(command.result, MoveLabwareResult):\n labware_id = command.params.labwareId\n new_location = command.params.newLocation\n new_offset_id = command.result.offsetId\n\n self._state.labware_by_id[labware_id].offsetId = new_offset_id\n self._state.labware_by_id[labware_id].location = new_location", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def handle_command(message, slack_config):\n\n message.react(\"+1\")\n\n handler = {\n \"schedule_job\": handle_schedule_job,\n \"cancel_job\": handle_cancel_job,\n \"schedule_suppression\": handle_schedule_suppression,\n \"cancel_suppression\": handle_cancel_suppression,\n }[slack_config[\"type\"]]\n\n handler(message, slack_config)", "def dispatch_command(self, args):\n\t\targuments = {k: v for k, v in vars(args).items() if v is not None}\n\t\tfor c in self.COMMANDS.keys():\n\t\t\tcmd = arguments.get(c, False)\n\t\t\tidx = c\n\t\t\tif cmd:\n\t\t\t\tbreak\n\t\telse:\n\t\t\treturn None\n\n\t\tif cmd not in self.COMMANDS[idx]:\n\t\t\traise CommandNotFoundError(\"{cmd} not registered\".format(cmd=cmd))\n\n\t\treturn getattr(self, self.COMMANDS[idx][cmd])(arguments)", "def parse_command(self, command):\n \n #chcek operation type\n mod_type = re.findall('.*(rotate|translate|zoom|make|time).*',command)[0]\n \n #for each operation type recover necessary parameters\n if mod_type == 'rotate':\n angle = int(re.findall('.*rotate by (\\d+).*', command)[0])\n axis = list(map(int,re.findall('.*around \\((\\d+)\\,(\\d+)\\,(\\d+).*', command)[0]))\n\n #if the rotation angle is large split it into 3 to ensure the rotation is accomplished fully\n if angle >= 180:\n new_q = self.q.create_from_axis_angle(angle/3*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False)\n result = [(mod_type, new_q),(mod_type, new_q),(mod_type, new_q)]\n else:\n new_q = self.q.create_from_axis_angle(angle*2*np.pi/360, axis[0], axis[1], axis[2], degrees=False)\n result = (mod_type, new_q)\n\n elif mod_type == 'zoom':\n factor = float(re.findall('.*factor of (\\d*\\.*\\d+).*', command)[0])\n result = (mod_type, factor)\n\n elif mod_type == 'translate':\n translate = np.array(list(map(int,re.findall('.*by \\((\\-*\\d+)\\,(\\-*\\d+)\\,(\\-*\\d+).*', command)[0])))\n result = (mod_type, translate)\n\n elif mod_type == 'make':\n layer = int(re.findall('.*make layer (\\d+).*', command)[0])\n vis_status = command.split()[-1]\n if vis_status == 'invisible':\n result = ('vis', layer, False)\n else:\n result = ('vis', layer, True)\n \n elif mod_type == 'time':\n time_shift = int(re.findall('.*by (\\-*\\d+).*', command)[0])\n result = (mod_type, time_shift)\n return result", "def __setupCommandHandlerTypes(self):\n # dict saving all command handler types\n self.__commandHandlers = {'channel': {}, 'query': {}, 'not_authed_dcc': {}, 'authed_dcc': {}}", "def execute_command(command):\r\n if 0 == len(command):\r\n return\r\n\r\n if command[0] in verbs[\"move\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"go where?\")\r\n else:\r\n execute_go(command[1])\r\n\r\n elif command[0] in verbs[\"take\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Take what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_take(item_id)\r\n\r\n elif command[0] in verbs[\"drop\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Drop what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_drop(item_id)\r\n\r\n elif command[0] in verbs[\"use\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"use what?\")\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n execute_use(item_id)\r\n\r\n elif command[0] in verbs[\"look\"]:\r\n if len(command) == 1:\r\n print_room(current_room)\r\n elif command[1] in nouns[\"inventory\"]:\r\n print_inventory_items(inventory)\r\n elif command[1] in nouns[\"self\"]:\r\n print_condition()\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if item_id in inventory.keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif item_id in current_room[\"items\"].keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif entity_id in current_room[\"entities\"].keys():\r\n wrap_print(entities[entity_id][\"description\"])\r\n else:\r\n wrap_print(\"You can not view that.\")\r\n\r\n elif command[0] in verbs[\"attack\"]:\r\n if len(command) > 2:\r\n item_id = get_multi_word_string(command, items)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if len(command) <= 1:\r\n wrap_print(\"attack what?\")\r\n elif entity_id not in current_room[\"entities\"].keys():\r\n wrap_print(\"You cannot attack that.\")\r\n elif len(command) <= 2:\r\n wrap_print(\"What with?\")\r\n elif item_id not in inventory.keys():\r\n wrap_print(\"You do not have a that item.\")\r\n elif items[item_id][\"damage\"] == False:\r\n wrap_print(\"You cannot attack using that item.\")\r\n else:\r\n execute_attack(entity_id, item_id)\r\n\r\n elif command[0] == \"help\":\r\n print(\"To move in a given direction type: go <DIRECTION>\")\r\n print(\"To pick up an item type: take <ITEM>\")\r\n print(\"To drop an item type: drop <ITEM>\")\r\n print(\"To use an item type: use <ITEM>\")\r\n print(\"To look at something of interest type: view <ITEM>\")\r\n print(\"to attack a character type: attack <CHARACTER> with <item>\")\r\n print(\"to : attack <CHARACTER> with <item>\")\r\n print(\"To quit the game type: quit\\n\")\r\n wrap_print(\"\"\"Verb variations are supported, so 'run south', or 'inspect item' are valid inputs.\"\"\")\r\n wrap_print(\"\"\"Items and characters with multiple words in their name are also supported like regular items.\"\"\")\r\n\r\n elif command[0] == \"quit\":\r\n if len(command) == 1:\r\n wrap_print(\"goodbye!\")\r\n global playing\r\n playing = False\r\n\r\n else:\r\n wrap_print(\"That makes no sense.\")", "def recognizeEvent(self, command: Union[str, Command]) -> BaseHouseEvent:\n if not isinstance(command, Command):\n if isinstance(command, str):\n command = Command(command)\n else:\n raise ValueError(f'Invalid argument type \"{type(command)}\" for command')\n\n builder = HouseEventBuilder(command)\n builder.findType()\n builder.findLocation(self.location)\n builder.setCurrentLocation(self.currentLocation)\n builder.findDevice()\n\n return builder.build()", "def _transform_command(self) -> None:\n self.command = None if self.command == [] else self.command" ]
[ "0.6290656", "0.62902445", "0.62281656", "0.6176488", "0.615144", "0.6104532", "0.6022825", "0.5793602", "0.57491267", "0.5735428", "0.5729035", "0.57039285", "0.5692468", "0.565963", "0.5639507", "0.5625254", "0.56236553", "0.562066", "0.56075734", "0.56060076", "0.56032777", "0.5587945", "0.557556", "0.55675024", "0.5566811", "0.55586725", "0.55495983", "0.5499362", "0.5485217", "0.546675" ]
0.62959677
0
resolve the function to call by an event and a command
def __resolveCommandFunction(self, command, e): return self.__getFullCommandName(command, self.__resolveCommandType(command, e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __exec_function_by_code(self,command,*args):\n\t\tself.__printer(\"Preparing to execute function: {0}\".format(command),level=LL_DEBUG)\n\t\t\n\t\tif command is None:\n\t\t\treturn\n\t\t\t\n\t\tcmd_exec = Commands()\n\t\tif command not in cmd_exec.command_list:\n\t\t\treturn\n\t\t\n\t\tif args:\n\t\t\tvalid_params = cmd_exec.validate_args(command,*args)\n\t\telse:\n\t\t\tvalid_params = None\n\t\t\t\t\n\t\t# Check if we have an event for this..\n\t\tif self.event_mode_command:\n\t\t\n\t\t\tfor emc in self.event_mode_command:\n\t\t\t\tif command in emc['command']:\n\t\t\t\t\t\n\t\t\t\t\t# TODO, check if a required mode is specified\n\t\t\t\t\t#if any(x in new_active_modes for x in emc['modes']):\n\t\t\t\t\t\n\t\t\t\t\t# HIT!\n\t\t\t\t\tprint \"DEBUG EVENT-MODE HIT!\"\n\t\t\t\t\tprint emc\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t \"name\": \"mode_track\"\n\t\t\t\t\t, \"type\": \"mode_change\"\n\t\t\t\t\t, \"modes\": [ \"track\" ]\n\t\t\t\t\t, \"device\": \"rgb_1\"\n\t\t\t\t\t, \"pattern\": \"on\"\n\t\t\t\t\t, \"rgb\": \"#ff0000\"\n\t\t\t\t\t\"\"\"\n\t\t\t\t\trgb_dev = self.get_device_config(emc['device'])\n\t\t\t\t\tpin_r = rgb_dev['r']\n\t\t\t\t\tpin_g = rgb_dev['g']\n\t\t\t\t\tpin_b = rgb_dev['b']\n\t\t\t\t\t\n\t\t\t\t\t# ignore pattern for now..\n\t\t\t\t\t#turn on rgb_1, using ff0000\n\t\t\t\t\tself.gpio.pwm_rgb(pin_r,pin_g,pin_b,emc['rgb'])\n\t\t\t\t\t\n\t\tif callable(self.callback_function):\n\t\t\tprint \"-> Calling callback\"\t#blocking ??? yeah, probably..\n\t\t\tself.callback_function(command,valid_params)\n\t\t\t\n\t\t# return to original led color", "def _command(self, *cmd, handler=None):", "def _register_command(self, function, command_name):\r\n if command_name in self._commands:\r\n raise self.Error('Found two definitions for command %s' % command_name)\r\n self._commands[command_name] = function\r\n return function", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def __call__(self, trigger, type, event):", "def known_command(self, command):\n return self._known_command(command, self.do_command)", "def CmdHandler(self, *events: str, colon: bool = False,\n ircv3: bool = False) -> Callable:\n ...", "def resolve_events(self, args, context, info):\n params = {\n 'execution_id': self.id,\n }\n return EventLoader.get().load(params)", "def command(self, function=None, name=None):\r\n if name is None:\r\n return self._command(function)\r\n else:\r\n return partial(self._command, name=name)", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def dispatch_command(self, args):\n\t\targuments = {k: v for k, v in vars(args).items() if v is not None}\n\t\tfor c in self.COMMANDS.keys():\n\t\t\tcmd = arguments.get(c, False)\n\t\t\tidx = c\n\t\t\tif cmd:\n\t\t\t\tbreak\n\t\telse:\n\t\t\treturn None\n\n\t\tif cmd not in self.COMMANDS[idx]:\n\t\t\traise CommandNotFoundError(\"{cmd} not registered\".format(cmd=cmd))\n\n\t\treturn getattr(self, self.COMMANDS[idx][cmd])(arguments)", "def _get_command_lookup(self, command_dict):", "def register_command(self, func):\n self.commands[func.__name__] = func", "def exec_event_functions(self):\n for name, fdict in self._event_functions.items():\n exec_func=False\n if self.is_eventCodePresent(fdict['eventCode']) \\\n and (self.ievent % fdict['nevents']) == 0:\n exec_func = True\n \n det_class = psutils.getattr_complete(self,fdict['det'])\n \n if exec_func and det_class.is_in_keys:\n# print 'executing',det_class._name, fdict['attr']\n func = psutils.getattr_complete(det_class,fdict['attr']) \n func(**fdict['kwargs'])", "def on_command(self, session, cmd_list):\n assert cmd_list\n\n cmd = cmd_list[0]\n if cmd in self._commands:\n return self._commands[cmd].function(session, cmd_list)\n else:\n self.reply_text(session, \"NG:Unknown command [%s]\" % cmd)\n return True", "def __command_handler(self,\n command: int) -> Callable[[Dict[str, int]], None]:\n\n return {\n 0x00: self.__update_health,\n 0x0A: self.__update_warning,\n 0x0C: self.__update_firmware_state,\n 0x05: self.__update_modules,\n 0x07: self.__update_topology,\n 0x1F: self.__update_property,\n }.get(command, lambda _: None)", "def work(self):\n\n cmd = self.options.command\n cmdargs = self.options.args\n\n # find function\n fname = \"cmd_\" + cmd.replace('-', '_')\n if not hasattr(self, fname):\n self.log.error('bad subcommand, see --help for usage')\n sys.exit(1)\n fn = getattr(self, fname)\n\n b = inspect.signature(fn).bind(*cmdargs)\n\n fn(*b.args, **b.kwargs)", "def get_target_func(command: commands.BaseCommand) -> Callable:\n target_func = {\n 'unite': perform.perform_unite,\n 'make_migrations': perform.perform_make_migrations,\n 'make_relocations': perform.perform_make_relocations,\n 'migrate': perform.perform_migrate,\n 'relocate': perform.perform_relocate,\n 'sync': perform.perform_sync,\n 'freeze': perform.perform_freeze,\n 'show_tree': perform.perform_show_tree,\n 'runserver': perform.perform_runserver,\n }[getattr(command, 'name')]\n\n return target_func", "def route(self, command):\n\n def _route(func):\n self._command_hash_views[command] = func\n\n def __route(*args, **kwargs):\n return func(*args, **kwargs)\n\n return __route\n\n return _route", "def main(self, function):\n captured = self.command(function)\n self.default_command = captured.__name__\n return captured", "def handle_func_command(cls, command):\n cmd, _, args, kwargs = command\n\n try: # will work if tensors are wrappers\n\n # Replace all TensorFlow tensor with their child attribute\n # Note that we return also args_type which helps handling case 3 in the docstring\n new_args, new_kwargs, new_type, args_type = hook_args.unwrap_args_from_function(\n cmd, args, kwargs, return_args_type=True\n )\n # This handles case 3: it redirects the command to the appropriate class depending\n # of the syft type of the arguments and returns\n if args_type not in FrameworkTensor:\n return args_type.handle_func_command(command)\n\n # build the new command\n new_command = (cmd, None, new_args, new_kwargs)\n # Send it to the appropriate class and get the response\n response = new_type.handle_func_command(new_command)\n # Put back the wrappers where needed\n response = hook_args.hook_response(cmd, response, wrap_type=args_type)\n except PureFrameworkTensorFoundError: # means that it's not a wrapper but a pure tensor\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n command = cls.rgetattr(cls, cmd)\n return command(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: clean this line\n cmd_split = cmd.split(\".\")\n cmd_path = cmd_split[:-1]\n cmd_name = cmd_split[-1]\n cmd = \"syft.local_worker.hook.\" + \".\".join(cmd_path) + \".native_\" + cmd_name\n\n # Run the native function with the new args\n # Note the the cmd should already be checked upon reception by the worker\n # in the execute_command function\n if isinstance(args, tuple):\n response = eval(cmd)(*args, **kwargs)\n else:\n response = eval(cmd)(args, **kwargs)\n\n return response", "def run_command(self, command):\n print(f\"{self._name} is trying {command}\")\n location = self.position()\n self._variables['x'] = location[0]\n self._variables['y'] = location[1]\n #print(f\"Variables: {self._variables}\")\n\n try:\n command, *data = command.split(\" \")\n if command in ['var', 'add', 'sub', 'mult', 'div', 'neg']:\n data = list(map(self.convert, data))\n else:\n data = list(map(self.convert_vars, data))\n except:\n print(f\"Error: {self._name} could not perform command {command}\")\n return None\n if command in self.keywords:\n getattr(self, command)(*data)\n else:\n print(f\"{self._name} doesn't know how to do {command}.\")", "def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover", "def command(*args, **kwargs):\n def deco(fct):\n return Command(fct, **kwargs)\n if args:\n return deco(*args)\n return deco", "def command(self, function=None, prefix=None):\n def _command(func):\n captured_f = self.capture(func, prefix=prefix)\n self.commands[func.__name__] = captured_f\n return captured_f\n\n if function is not None:\n return _command(function)\n else:\n return _command", "def call_function(self):\n try:\n arg_list = self.argument_list()\n function_dict = {}\n info = []\n for name_arg in arg_list:\n type_arg = self.arguments_type[name_arg]\n function_dict[name_arg] = utils.value_from_rpc(self.argument(name_arg)[1])\n info.append('{0}({1}): {2}'.format(name_arg, type_arg, function_dict[name_arg]))\n\n log.info('Execute command \\'{0}\\' with arguments [{1}] from device \\'{2}\\''\n .format(self.name(), '; '.join(info), self.device.id))\n self.function(self.device, **function_dict)\n\n except Exception as err:\n t = traceback.format_exc()\n log.error('Command \\'{0}\\' raise exception: {1}'.format(self.name(), decode_string(t)))", "def command(f):\n commands.append(f)\n return f", "def command(f):\n commands.append(f)\n return f", "def _known_command(self, command, do_command):\n result = self.known_commands.get(command)\n if result is not None:\n return result\n translated_command = self.gtp_aliases.get(command, command)\n try:\n response = do_command(\"known_command\", translated_command)\n except BadGtpResponse:\n known = False\n else:\n known = (response == 'true')\n self.known_commands[command] = known\n return known", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()" ]
[ "0.581403", "0.5763023", "0.5742588", "0.56628585", "0.5634916", "0.56182754", "0.5588224", "0.5545319", "0.5525151", "0.5498268", "0.5489367", "0.5486277", "0.5469649", "0.5465293", "0.5461583", "0.54580754", "0.54303193", "0.5419406", "0.5382144", "0.53759444", "0.53471094", "0.5336339", "0.5335424", "0.53343624", "0.5312535", "0.5311128", "0.5294914", "0.5294914", "0.5294331", "0.52870464" ]
0.691852
0
returns the method name of this object for the given command and command type
def __getFullCommandName(self, command, type): return 'cmd_%s_%s' % (type, command)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_name(self):\n return None", "def name(self):\n module_filepath = inspect.getfile(type(self))\n module_filename = os.path.basename(module_filepath)\n command_name, _ = os.path.splitext(module_filename)\n return command_name", "def command_type(self):\n return self._command_type", "def get_method_name(self):\n\t\treturn self.method_name", "def _getMethodName(self):\n return self.id().split('.')[-1]", "def name(self) -> str:\n return f\"{self.class_object.__name__}.{self.method_str}\"", "def method_name(self):\n pass", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def get_cmd(self, command):\n return self.commands[command][\"cmd\"]", "def command_type(self):\n t = self.current_command.split(' ')[0]\n if t in commands.get('arithmetic'):\n return 'C_ARITHMETIC'\n\n if t not in commands:\n raise ValueError('{} is an invalid command type.'.format(t))\n\n return commands.get(t)", "def get_command(self):\n return self.command", "def method(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"method\")", "def command_type(self) -> int:\n cur_inst = self._cur()\n if \"push\" in cur_inst:\n return C_PUSH\n elif \"pop\" in cur_inst:\n return C_POP\n elif \"if\" in cur_inst:\n return C_IF\n elif \"label\" in cur_inst:\n return C_LABEL\n elif \"goto\" in cur_inst:\n return C_GOTO\n elif \"return\" in cur_inst:\n return C_RETURN\n elif \"call\" in cur_inst:\n return C_CALL\n elif \"function\" in cur_inst:\n return C_FUNCTION\n else:\n return C_ARITHMETIC", "def getCommand(self, name):\n return self.commands[name]()", "def __resolveCommandFunction(self, command, e):\n return self.__getFullCommandName(command, self.__resolveCommandType(command, e))", "def get_command(self):\n return self.c_dict['COMMAND']", "def commandType(self):\n if self.currentCommand.startswith(AT) and len(self.currentCommand) > 1:\n return A_COMMAND\n\n elif self.currentCommand.startswith(LBRKT):\n return L_COMMAND\n\n elif EQU in self.currentCommand or SEMIC in self.currentCommand:\n return C_COMMAND", "def get_command(self) -> str:\n return 'title'", "def command(self):\n return self._command", "def method_name(self) -> str:\n if isinstance(self.view_func, str):\n return self.view_func\n return self.view_func.__name__", "def get_method_name(self) -> Optional[str]:\n current_mode = self.get_mode()\n # Check that 'Solvent' program is enabled.\n # Retreiving the remaining time without\n # this programm being selected first would trigger\n # a key error when unpacking the device reply.\n if current_mode != 'Method':\n self.logger.warning(\"Can't retreive selected method of the 'Method' \"\n \"program since this program is not currently \"\n f\"selected (selected program is '{current_mode}'). \"\n \"Select 'Method' program first.\")\n return None\n else:\n return self.send(self.cmd.GET_METHOD_NAME)", "def _getCommand(self, cmd):\n try:\n cmd_str = cmd.decode('utf-8')\n return getattr(self, 'do_' + cmd_str, None)\n except:\n return None", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def getcurrentmethod(self):\n if self._methodname == None:\n print(\"No method defined.\")\n else:\n return self._methodname", "def get_command(self):\n req_type = type(self.req)\n\n if req_type == ureq.CreateEntryRequest:\n return commands.CreateCommand(self.req.results)\n elif req_type == ureq.ReadEntryRequest:\n return commands.ReadCommand(self.req.results)\n elif req_type == ureq.UpdateEntryRequest:\n return commands.UpdateCommand(self.req.results)\n elif req_type == ureq.DeleteEntryRequest:\n return commands.DeleteCommand(self.req.results)", "def getCommand(self):\n return self.__cmd", "async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler", "def get_method_name(self):\n return 'lipisha'", "def get_command_with_name(self, command_name):\n return self.commands[command_name]", "def __getattr__(self, name):\n return Command(self.cmd, name)" ]
[ "0.7045542", "0.7001352", "0.6930984", "0.6901749", "0.68966097", "0.6881658", "0.6783473", "0.6668294", "0.66286486", "0.66184616", "0.654716", "0.6532719", "0.6515743", "0.64767367", "0.6460036", "0.64187056", "0.6412228", "0.640943", "0.63901263", "0.6382807", "0.6377871", "0.6315287", "0.6302754", "0.62590575", "0.6251621", "0.62469554", "0.623554", "0.6193321", "0.61911446", "0.618066" ]
0.80738705
0
adds a new command handler to the system
def __addCommandHandler(self, command, type = 'channel', requiresdb = False): try: # ensure we are dealing with booleans if not requiresdb: requiresdb = False else: requiresdb = True # add the handler # check for existing command type if self.__commandHandlerTypeExists(type): cmdExec = self.__getFullCommandName(command, type) # if database required but no database available raise exception if requiresdb and not self.__databaseAvailable: raise ConfigurationException(CONFIG_DATABASE_NOT_AVAILABLE % cmdExec) # add handler only if the correct method exists if self.__commandExists(command, type): cmdHandler = {'func': getattr(self, cmdExec), 'db': requiresdb} self.__commandHandlers[type][command] = cmdHandler else: raise ConfigurationException(CONFIG_COMMAND_EXEC_NOT_FOUND % cmdExec) else: raise ConfigurationException(CONFIG_COMMAND_TYPE_NOT_FOUND % type) except ConfigurationException, (e): print 'Configuration failed: ', print 'Could not add the command handler for %s: ' % command print e.parameter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _addCommand(self, command):\n self.updater.dispatcher.add_handler(command)", "def add_handler(self, handler):\n pass", "def register(self, command: str, handler: Any):\n\n if not command.startswith(\"/\"):\n command = f\"/{command}\"\n\n LOG.info(\"Registering %s to %s\", command, handler)\n self._routes[command].append(handler)", "def add_command_handler(self,command,command_handler):\n\t\tif(callable(command_handler)):\n\t\t\tif isinstance(command, str):\n\t\t\t\tself.command_handlers[command] = command_handler\n\t\t\telse:\n\t\t\t\traise NotAStringException(\"{} isn't a valid command name. Command names must be string\")\n\t\telse:\n\t\t\traise NotCallableException(\"{} is not a function\".format(command_handler))", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n def add_meta(f):\n def decorator(*args, **kwargs):\n f(*args, **kwargs)\n decorator.bytes_needed = len_args - 1 # exclude self\n decorator.__name__ = f.__name__\n return decorator\n func = add_meta(func)\n self._command_handlers[cmd] = func", "def cmd(self, command):\n self._commands.append(command)", "def _command(self, *cmd, handler=None):", "def add_command(self, name, command_class, ns=None):\n ep = EntryPointWrapper(name, command_class)\n self.add_command_ep(ep, ns=ns)", "def addHandler(self, fn):\n self.handlers.append(fn)", "def add(self, name, command):", "def addhandler(self, txt, handler):\n self.handlers[txt] = handler\n rlog(0, 'webserver', '%s handler added' % txt)", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def add(self, handler, on_error=None):\n self.handlers.append(handler)", "def add_command(self, cmd: Command):\n self._command_list.append(cmd)", "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "def make_new_handler(self, *args, **kwargs):", "def add_command(self, newcmd, section=None):\r\n if section not in self._commands:\r\n self._commands[section] = list()\r\n\r\n self._commands[section].append(newcmd)", "def register_handler(self, handler):\r\n self.handler = handler", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def _register(self, comm, handler):", "def add_command(self, cmd):\n self.command_queue.put(cmd)", "def add_command(self, command_info):\n self.commands[command_info.name] = command_info", "def add_command(self, command):\n self.command.extend(command)", "def custom(self, command):\n self.command.append(command)\n return self", "def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name", "def _register_handler(self, callback, cmd, helphint, hidden, handlers,\n synonyms=(), plugin=None):\n # Register any synonyms (done before we frig with the handlers)\n for entry in synonyms:\n self._register_handler(callback, entry, helphint, True, handlers,\n plugin=plugin)\n\n # Allow simple commands to be passed as strings\n cmd = cmd.split() if isinstance(cmd, (str, unicode)) else cmd\n\n for part in cmd:\n handlers = handlers.subcommands.setdefault(part, Handlers([], {}))\n handlers.handlers.append(Registration(callback, \" \".join(cmd),\n helphint, hidden, plugin))", "def add(self, method: str, pattern: str, handler: Callable) -> None:", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def add_command(self, command):\n assert_type(command, str)\n assert not self._finalized, \"Cannot add a command if finalized\"\n self.commands.append(command)", "def register_handler(self, method, handler):\n self.handlers[method] = handler" ]
[ "0.76060593", "0.7179329", "0.71357757", "0.7014966", "0.6918433", "0.6892781", "0.68578625", "0.67891854", "0.67860955", "0.67845434", "0.6774669", "0.672538", "0.671378", "0.6682264", "0.6660707", "0.66473126", "0.66458935", "0.66225463", "0.66026723", "0.658991", "0.6584247", "0.6568199", "0.65544695", "0.6545544", "0.65415126", "0.65257984", "0.6515038", "0.6478158", "0.6462184", "0.6414785" ]
0.7319573
1
function that registered all handled command types
def __setupCommandHandlerTypes(self): # dict saving all command handler types self.__commandHandlers = {'channel': {}, 'query': {}, 'not_authed_dcc': {}, 'authed_dcc': {}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _command(self, *cmd, handler=None):", "def _commands(self) -> Dict[str, List[str]]:\r\n pass", "def commands():", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def getCommands(self):", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "def type_command(ctx, name_from, name_to):", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def accept_command():\n # TODO", "def register_command(func):\n supported_commands.append(func.__name__)\n return func", "def process_commands(self, commands: List[str]):", "def register_command(self, func):\n self.commands[func.__name__] = func", "def _register(self, comm, handler):", "def _register_handler(self, callback, cmd, helphint, hidden, handlers,\n synonyms=(), plugin=None):\n # Register any synonyms (done before we frig with the handlers)\n for entry in synonyms:\n self._register_handler(callback, entry, helphint, True, handlers,\n plugin=plugin)\n\n # Allow simple commands to be passed as strings\n cmd = cmd.split() if isinstance(cmd, (str, unicode)) else cmd\n\n for part in cmd:\n handlers = handlers.subcommands.setdefault(part, Handlers([], {}))\n handlers.handlers.append(Registration(callback, \" \".join(cmd),\n helphint, hidden, plugin))", "def cmd_type(args):", "def init_command_objects(self):\n super().init_command_objects()\n device_data = DeviceData.get_instance()\n\n args = (device_data, self.state_model, self.logger)\n\n self.register_command_object(\"SetStowMode\", SetStowMode(*args))\n self.register_command_object(\n \"SetStandbyLPMode\", SetStandbyLPMode(*args)\n )\n self.register_command_object(\"SetOperateMode\", SetOperateMode(*args))\n self.register_command_object(\"Scan\", Scan(*args))\n self.register_command_object(\"EndScan\", EndScan(*args))\n self.register_command_object(\"Configure\", Configure(*args))\n self.register_command_object(\"StartCapture\", StartCapture(*args))\n self.register_command_object(\"StopCapture\", StopCapture(*args))\n self.register_command_object(\n \"SetStandbyFPMode\", SetStandbyFPMode(*args)\n )\n self.register_command_object(\"Slew\", Slew(*args))\n self.register_command_object(\"Track\", Track(*args))\n self.register_command_object(\"StopTrack\", StopTrack(*args))\n self.register_command_object(\"Abort\", Abort(*args))\n self.register_command_object(\"Restart\", Restart(*args))\n self.register_command_object(\"ObsReset\", ObsReset(*args))", "async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler", "def load_commands():\n return [AddBook, FindBook, FindBooks, EditBook, RemoveBook, ReviewBook]", "def register_commands(self):\n for module in copy.copy(sys.modules).values():\n for command in module_functionalities(module, 'MARA_CLICK_COMMANDS', click.Command):\n if 'callback' in command.__dict__ and command.__dict__['callback']:\n package = command.__dict__['callback'].__module__.rpartition('.')[0]\n if package != 'flask':\n register_command(self, command, package)", "def register(self, parent):\n parent.registerCommand('delete', self.processDeleteCommand)\n parent.registerCommand('meshcreated', self.processMeshCreated)", "def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser", "def init_command_objects(self):\n super().init_command_objects()\n device_data = DeviceData.get_instance()\n args = (device_data, self.state_model, self.logger)\n self.register_command_object(\"TelescopeOn\", TelescopeOn(*args))\n self.register_command_object(\"TelescopeOff\", TelescopeOff(*args))\n self.register_command_object(\"Disable\", Disable(*args))\n self.register_command_object(\n \"TelescopeStandby\", TelescopeStandby(*args)\n )", "def register_events():\n return [Events.Command(\"example_command\")]", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)" ]
[ "0.6938134", "0.6763681", "0.67493486", "0.67387176", "0.66848844", "0.66441447", "0.6633142", "0.6633142", "0.6633142", "0.6633142", "0.65997595", "0.65389496", "0.65150225", "0.64862967", "0.6465988", "0.64483017", "0.6414662", "0.6412694", "0.638031", "0.6344845", "0.6317439", "0.6315696", "0.6307236", "0.62572926", "0.62475175", "0.6204256", "0.6198927", "0.6196534", "0.6191177", "0.6148646" ]
0.7765803
0
checks whether the given command type exists
def __commandHandlerTypeExists(self, type): return self.__commandHandlers.has_key(type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __commandExists(self, command, cmdtype):\n try:\n # method exists\n if hasattr(self, self.__getFullCommandName(command, cmdtype)):\n # command handler type exists\n if self.__commandHandlerTypeExists(cmdtype):\n return True\n else:\n return False\n else:\n return False\n # any key does not exist\n except KeyError:\n return False", "def is_of_type(cmd):\r\n raise NotImplementedError()", "def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False", "def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes", "def is_valid_command(args):\n if args.command is not None:\n return True\n return False", "def cmd_type(args):", "def is_valid_command(command):\n return is_get(command) or is_insert(command) or is_update(command) or is_delete(command) or is_showall(command) or is_search(command)", "def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)", "def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True", "def check_commands(self):\n pass", "def _is_command(obj, cli):\n if not inspect.isfunction(obj) or obj.__name__.startswith(\"_\"):\n return False\n return hasattr(obj, \"__module__\") and obj.__module__ == cli.__name__", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False", "def validate_command(command):\n return command in list(VALID_COMMANDS.keys())", "def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass", "def has_command(self, command):\n for pbt in self._plugins.values():\n if pbt.command == command:\n return True\n return False", "def is_cmd(self, name):\n \n return name in self.cmds", "def has_command_with_name(self, command_name):\n return command_name in self.commands", "def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())", "def test_command_method_exists(self):\n motor_shield = MotorShield(self.options, self.connection)\n\n for command in motor_shield.commands:\n self.assertIn(command, dir(motor_shield))", "def _msg_is_command(self, msg):\n return isinstance(msg, dict)", "def is_valid_command(command):\n # TODO(etscrivner): Eventually we'd like to construct this dynamically from\n # a list of all available commands\n valid_commands = [\n 'add', 'append', 'decr', 'delete', 'flush_all', 'get', 'gets', 'incr',\n 'prepend', 'quit', 'replace', 'set', 'stats', 'verbosity', 'version',\n ]\n\n if not command:\n return False\n\n parts = command.split('\\r\\n')\n command_parts = parts[0].split(' ')\n\n command = command_parts[0]\n return command.strip().lower() in valid_commands", "def check_for(command):\n if shutil.which(command) is None:\n print(colored(\"{} not available on system\".format(command),\"red\"))\n sys.exit(1)", "def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True", "def is_command(self, text):\n return text.split(' ', 1)[0].startswith(\"!\")", "def test_status_command(self):\n # make sure STATUS is a valid command\n command = Command('STATUS')\n self.assertTrue(type(command) == Command)", "def valid_command(command):\n\n (command_name, arg1) = split_command_input(command)\n\n slipt_arg1 = arg1.split('-')\n digit = ''\n if \" \" in arg1:\n (digit, rev) = arg1.split(' ')\n \n\n\n return command_name.lower() in valid_commands and (len(arg1) == 0 or is_int(arg1)\\\n or arg1.lower() == 'silent' or arg1.lower() == 'reversed' or arg1.lower() \\\n == 'reversed silent' or (is_int(slipt_arg1[0]) and is_int(slipt_arg1[1]))\\\n or (is_int(digit) and rev == 'reversed') or (is_int(digit) and rev == 'silent'))", "def _is_non_real_command_found(self, script_data):\n is_valid = True\n depends_on_commands = script_data.get('depends_on')\n if depends_on_commands:\n for command in depends_on_commands:\n if command != 'test-module':\n if command.endswith('dev') or command.endswith('copy'):\n error_message, error_code = Errors.invalid_command_name_in_script(script_data.get('name'),\n command)\n if self.handle_error(error_message, error_code, file_path=\"id_set.json\"):\n return not is_valid\n return is_valid", "def command_registered(self, command: str) -> bool:\n return command in self._commands", "def is_command(oin, env, pred_name: YPredName, arg: Any=None):\n return (env.check_predicate(obj, pred_name, arg) for obj in oin)" ]
[ "0.79482687", "0.7405757", "0.73042387", "0.71309036", "0.70830023", "0.69709074", "0.6903397", "0.68951637", "0.6782659", "0.6747694", "0.6743214", "0.6715056", "0.6692837", "0.6678598", "0.65998846", "0.6545335", "0.6541338", "0.6527914", "0.6507906", "0.65017587", "0.6481657", "0.6383794", "0.63712144", "0.63600165", "0.6298213", "0.6269488", "0.62515277", "0.6228243", "0.6222383", "0.6189794" ]
0.76852494
1
Solves the power flow using a fast decoupled method. Solves for bus voltages given the full system admittance matrix (for all buses), the complex bus power injection vector (for all buses), the initial vector of complex bus voltages, the FDPF matrices B prime and B double prime, and column vectors with the lists of bus indices for the swing bus, PV buses, and PQ buses, respectively. The bus voltage vector contains the set point for generator (including ref bus) buses, and the reference angle of the swing bus, as well as an initial guess for remaining magnitudes and angles. C{ppopt} is a PYPOWER options vector which can be used to set the termination tolerance, maximum number of iterations, and output options (see L{ppoption} for details). Uses default options if this parameter is not given. Returns the final complex voltages, a flag which indicates whether it converged or not, and the number of iterations performed.
def decoupledpf(Ybus, Sbus, V0, pv, pq, ppci, options): # old algortihm options to the new ones pp2pypower_algo = {'fdbx': 2, 'fdxb': 3} # options tol = options["tolerance_mva"] max_it = options["max_iteration"] # No use currently for numba. TODO: Check if can be applied in Bp and Bpp # numba = options["numba"] # NOTE: options["algorithm"] is either 'fdbx' or 'fdxb'. Otherwise, error algorithm = pp2pypower_algo[options["algorithm"]] voltage_depend_loads = options["voltage_depend_loads"] v_debug = options["v_debug"] baseMVA = ppci["baseMVA"] bus = ppci["bus"] branch = ppci["branch"] gen = ppci["gen"] # initialize i = 0 V = V0 Va = angle(V) Vm = abs(V) dVa, dVm = None, None if v_debug: Vm_it = Vm.copy() Va_it = Va.copy() else: Vm_it = None Va_it = None # set up indexing for updating V pvpq = r_[pv, pq] # evaluate initial mismatch P, Q = _evaluate_mis(Ybus, V, Sbus, pvpq, pq) # check tolerance converged = _check_for_convergence(P, Q, tol) # create and reduce B matrices Bp, Bpp = makeB(baseMVA, bus, real(branch), algorithm) # splu requires a CSC matrix Bp = Bp[array([pvpq]).T, pvpq].tocsc() Bpp = Bpp[array([pq]).T, pq].tocsc() # factor B matrices Bp_solver = splu(Bp) Bpp_solver = splu(Bpp) # do P and Q iterations while (not converged and i < max_it): # update iteration counter i = i + 1 # ----- do P iteration, update Va ----- dVa = -Bp_solver.solve(P) # update voltage Va[pvpq] = Va[pvpq] + dVa V = Vm * exp(1j * Va) # evalute mismatch P, Q = _evaluate_mis(Ybus, V, Sbus, pvpq, pq) # check tolerance if _check_for_convergence(P, Q, tol): converged = True break # ----- do Q iteration, update Vm ----- dVm = -Bpp_solver.solve(Q) # update voltage Vm[pq] = Vm[pq] + dVm V = Vm * exp(1j * Va) if v_debug: Vm_it = column_stack((Vm_it, Vm)) Va_it = column_stack((Va_it, Va)) if voltage_depend_loads: Sbus = makeSbus(baseMVA, bus, gen, vm=Vm) # evalute mismatch P, Q = _evaluate_mis(Ybus, V, Sbus, pvpq, pq) # check tolerance if _check_for_convergence(P, Q, tol): converged = True break # the newtonpf/newtonpf funtion returns J. We are returning Bp and Bpp return V, converged, i, Bp, Bpp, Vm_it, Va_it
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc(nbus, bus_type, V, ang, Y, Pg, Qg, Pl, Ql, tol):\n\n SWING_BUS, GEN_BUS, LOAD_BUS = 1, 2, 3\n\n V = V.flatten()\n # voltage in rectangular co-ordinates.\n V_rect = [V[i] * complex(cos(ang[i]), sin(ang[i])) for i in range(len(ang))] \n V_rect = array(V_rect)\n\n # bus current injection.\n cur_inj = Y * V_rect\n\n # power output.\n S = V_rect * cur_inj.conj()\n P = S.real\n Q = S.imag\n delP = Pg.flatten() - Pl.flatten() - P\n delQ = Qg.flatten() - Ql.flatten() - Q\n\n # zero out mismatches on swing bus and generation bus.\n for i in range(nbus):\n if bus_type[i] == SWING_BUS:\n delP[i] = 0\n delQ[i] = 0\n elif bus_type[i] == GEN_BUS:\n delQ[i] = 0\n\n # total mismatch.\n mism = max(abs(delQ)) + max(abs(delP))\n if mism > tol:\n conv_flag = 1\n else:\n conv_flag = 0\n return delP, delQ, P, Q, conv_flag", "def solve(self):\n start = timer()\n # encode into milp\n me = MILPEncoder(MILPSolver.prob,\n MILPSolver.params.logger.LOGFILE, \n MILPSolver.params.INTRA_DEP_CONSTRS,\n MILPSolver.params.INTER_DEP_CONSTRS)\n if MILPSolver.lp == True:\n gmodel = me.lp_encode()\n else:\n gmodel = me.encode()\n # Set gurobi parameters\n pgo = 1 if MILPSolver.params.PRINT_GUROBI_OUTPUT == True else 0\n gmodel.setParam('OUTPUT_FLAG', pgo)\n tl = MILPSolver.params.TIMEOUT\n if tl != -1 : gmodel.setParam('TIME_LIMIT', tl)\n if not MILPSolver.params.DEFAULT_CUTS: \n MILPSolver.disable_default_cuts(gmodel)\n gmodel._vars = gmodel.getVars()\n # set callback cuts \n MILPSolver.id_form = IdealFormulation(MILPSolver.prob,\n gmodel, \n MILPSolver.params.IDEAL_FREQ,\n MILPSolver.params.logger.LOGFILE)\n MILPSolver.dep_cuts = DepCuts(MILPSolver.prob,\n gmodel,\n MILPSolver.params.DEP_FREQ,\n MILPSolver.params.INTRA_DEP_CUTS,\n MILPSolver.params.INTER_DEP_CUTS,\n MILPSolver.sip_params,\n MILPSolver.params.logger.LOGFILE)\n # Optimise\n if MILPSolver.params.callback_enabled() and MILPSolver.lp == False:\n gmodel.optimize(MILPSolver._callback)\n else:\n gmodel.optimize()\n\n runtime = timer() - start\n cex = None \n if MILPSolver.status == SolveResult.BRANCH_THRESHOLD:\n result = SolveResult.BRANCH_THRESHOLD\n elif gmodel.status == GRB.OPTIMAL:\n cex_shape = MILPSolver.prob.spec.input_layer.input_shape\n cex = np.zeros(cex_shape)\n for i in itertools.product(*[range(j) for j in cex_shape]):\n cex[i] = MILPSolver.prob.spec.input_layer.out_vars[i].x\n result = SolveResult.UNSATISFIED\n elif gmodel.status == GRB.TIME_LIMIT:\n result = SolveResult.TIMEOUT\n elif gmodel.status == GRB.INTERRUPTED:\n result = SolveResult.INTERRUPTED\n elif gmodel.status == GRB.INFEASIBLE or gmodel.status == GRB.INF_OR_UNBD:\n result = SolveResult.SATISFIED\n else:\n result = SolveResult.UNKNOWN\n \n # MILPSolver.logger.info('Verification problem {} solved, '\n # 'LP: {}, '\n # 'time: {:.2f}, '\n # 'result: {}.'\n # .format(MILPSolver.prob.id,\n # MILPSolver.lp,\n # runtime,\n # result.value))\n \n return SolveReport(result, runtime, cex)", "def test_pde_vector():\n eq = PDE({\"u\": \"vector_laplace(u) + exp(-t)\"})\n grid = UnitGrid([8, 8])\n field = VectorField.random_normal(grid)\n\n res_a = eq.solve(field, t_range=1, dt=0.01, backend=\"numpy\", tracker=None)\n res_b = eq.solve(field, t_range=1, dt=0.01, backend=\"numba\", tracker=None)\n\n res_a.assert_field_compatible(res_b)\n np.testing.assert_allclose(res_a.data, res_b.data)", "def solve_VFI(self):\r\n dimC = self.dimA ; dimA = self.dimA ; dimW = self.dimW \r\n C = self.c_grid ; A = self.a_grid ; W = self.W_grid\r\n tol = self.tol ; Niter = self.Niter ; R = self.R\r\n beta = self.beta ; Pi = self.Pi\r\n \r\n V0 = np.zeros((dimA,dimC,dimW))\r\n V1 = np.zeros((dimA,dimC,dimW))\r\n Pol = np.zeros((dimA,dimC,dimW))\r\n U = np.zeros((dimA,dimC,dimW))\r\n \r\n t0 = time()\r\n diff = 1 ; niter = 0\r\n \r\n while diff > tol:\r\n niter += 1\r\n # Value update step\r\n for ia in range(dimA):\r\n for ic in range(dimC):\r\n for iw in range(dimW):\r\n c = W[iw] + R*A[ia] - A\r\n x = C[ic]\r\n \r\n c[c < 0] = np.nan \r\n if x < 0:\r\n x = np.nan\r\n \r\n u = self.u(c,x) \r\n U[:,ic,iw] = u \r\n \r\n Objective = U + beta * V0 @ Pi.T\r\n V1[ia,:,:] = np.nanmax(Objective, axis = 0)\r\n Pol[ia,:,:] = np.nanargmax(Objective, axis = 0)\r\n \r\n # Evaluate distance between the value functions\r\n diff = np.max(np.max(np.abs(V1 - V0))) \r\n V0[:] = V1\r\n \r\n # Break the while loop if too many iterations\r\n #print(\"The current error is \"+str(diff))\r\n if niter > Niter:\r\n print('Ops, no convergence')\r\n break\r\n \r\n t1 = time()\r\n #print('VFI algorithm took {0:0d} iterations and {1:.2f} seconds.'.format(niter, t1 - t0))\r\n \r\n self.V1 = V1 ; self.Pol = Pol", "def solve(self):\n\n if self.optimizer == 'pulp':\n for constraint in self.constraints:\n self.engine_model += constraint\n\n self.engine_model += self.objective\n status = self.engine_model.solve(PULP_CBC_CMD(msg=False))\n solution = (\n np.vectorize(self._var_sol)(self.variable_set)\n if status == LpStatusOptimal\n else np.array([])\n )\n\n else:\n for constraint in self.constraints:\n self.engine_model.addConstr(constraint)\n\n self.engine_model.setObjective(self.objective, self.sense)\n self.engine_model.optimize()\n solution = (\n np.vectorize(self._var_sol)(self.variable_set)\n if self.engine_model.status == GRB.OPTIMAL\n else np.array([])\n )\n\n return solution", "def LTD_SolveCase(mirror=None):\n if mirror == None:\n flatStart = 0\n else:\n flatStart = 0 # never flat start ( could be changed to solnType options ) or reorder?\n if mirror.debug: print('flat start = %d' % flatStart)\n\n soln_start = time.time()\n errorCode = PSLF.SolveCase(\n 25, # maxIterations, Solpar.Itnrmx\n 0, \t# iterationsBeforeVarLimits, Solpar.Itnrvl\n 0,\t# flatStart, \n 1,\t# tapAdjustment, Solpar.Tapadj 1\n 1,\t# switchedShuntAdjustment, Solpar.Swsadj 1\n 1,\t# phaseShifterAdjustment, Solpar.Psadj 1\n 0,\t# gcdAdjustment, probably Solpar.GcdFlag 0\n 0,\t# areaInterchangeAdjustment, \n 1,\t# solnType, 1 == full, 2 == DC, 3 == decoupled \n 0, # reorder (in dypar default = 0)\n )\n soln_end = time.time()\n\n #handle timing \n if mirror:\n mirror.PFTime += (soln_end - soln_start)\n mirror.PFSolns += 1\n if mirror.debug: print('Power Flow Solution returns: %d' % errorCode)\n\n if errorCode == -1:\n '''Solution did not converge'''\n raise ValueError('*** PSLF power-flow solution did not converge.')\n return\n if errorCode == -2:\n '''Maximum iterations hit'''\n raise ValueError('*** PSLF power-flow solution stopped due to maximum number of iterations.')\n return\n\n #converged\n return", "def solve_CBC(self, lp):\n\t\tif not self.executable(self.path[1]):\n\t\t\traise \"PuLP: cannot execute \"+self.path[1]\n\t\tif not self.keepFiles:\n\t\t\tpid = os.getpid()\n\t\t\ttmpLp = os.path.join(self.tmpDir, \"%d-pulp.mps\" % pid)\n\t\t\ttmpSol = os.path.join(self.tmpDir, \"%d-pulp.sol\" % pid)\n\t\telse:\n\t\t\ttmpLp = lp.name+\"-pulp.mps\"\n\t\t\ttmpSol = lp.name+\"-pulp.sol\"\n##\t\tvs, variablesNames, constraintsNames, objectiveName = lp.writeMPS(tmpLp, rename = 1)\r\n\t\tvs = lp.writeMPS(tmpLp, rename = 0)\n\t\tif not self.msg:\n\t\t\tcbc = os.popen(self.path[1]+\" - > /dev/null 2> /dev/null\",\"w\")\n\t\telse:\n\t\t\tcbc = os.popen(self.path[1]+\" -\",\"w\")\n\t\tcbc.write(\"import \"+tmpLp+\"\\n\")\n\t\tif self.presolve:\n\t\t\tcbc.write(\"presolve on\\n\")\n\t\tcbc.write(\"strong %d\\n\" % self.strong)\n\t\tif self.cuts:\n\t\t\tcbc.write(\"gomory on\\n\")\n\t\t\tcbc.write(\"oddhole on\\n\")\n\t\t\tcbc.write(\"knapsack on\\n\")\n\t\t\tcbc.write(\"probing on\\n\")\n\t\tfor option in self.options:\n\t\t\tcbc.write(option+\"\\n\")\n\t\tif lp.sense == LpMinimize:\n\t\t\tcbc.write(\"min\\n\")\n\t\telse:\n\t\t\tcbc.write(\"max\\n\")\n\t\tif self.mip:\n\t\t\tcbc.write(\"branch\\n\")\n\t\telse:\n\t\t\tcbc.write(\"initialSolve\\n\")\n\t\tcbc.write(\"solution \"+tmpSol+\"\\n\")\n\t\tcbc.write(\"quit\\n\")\n\t\tif cbc.close() != None:\n\t\t\traise \"PuLP: Error while trying to execute \"+self.path[1]\n\t\tif not os.path.exists(tmpSol):\n\t\t\traise \"PuLP: Error while executing \"+self.path[1]\n\t\tlp.status, values = self.readsol_CBC(tmpSol, lp, vs)\n\t\tlp.assign(values)\n\t\tif not self.keepFiles:\n\t\t\ttry: os.remove(tmpLp)\n\t\t\texcept: pass\n\t\t\ttry: os.remove(tmpSol)\n\t\t\texcept: pass\n\t\treturn lp.status", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def solve(n_vec, m_vec, p_vec, repeat, dns_level, seed, solver='gurobi'):\n\n print(\"Solving random problems with solver %s\\n\" % solver)\n\n # Define statistics to record\n std_solve_time = np.zeros(len(n_vec))\n avg_solve_time = np.zeros(len(n_vec))\n min_solve_time = np.zeros(len(n_vec))\n max_solve_time = np.zeros(len(n_vec))\n\n n_prob = len(n_vec)\n\n # Store also OSQP time\n if solver == 'miosqp':\n # Add OSQP solve times statistics\n avg_osqp_solve_time = np.zeros(len(n_vec))\n\n # reset random seed\n np.random.seed(seed)\n\n for i in range(n_prob):\n\n # Get dimensions\n n = n_vec[i]\n m = m_vec[i]\n p = p_vec[i]\n\n print(\"problem n = %i, m = %i, p = %i\" % (n, m, p))\n\n # Define vector of cpu times\n solve_time_temp = np.zeros(repeat)\n\n # Store also OSQP time\n if solver == 'miosqp':\n osqp_solve_time_temp = np.zeros(repeat)\n\n for j in tqdm(range(repeat)):\n # for j in range(repeat):\n\n # Generate random vector of indeces\n i_idx = np.random.choice(np.arange(0, n), p, replace=False)\n\n # Generate random Matrices\n Pt = spa.random(n, n, density=dns_level)\n P = spa.csc_matrix(np.dot(Pt, Pt.T))\n q = sp.randn(n)\n A = spa.random(m, n, density=dns_level)\n u = 2 + sp.rand(m)\n l = -2 + sp.rand(m)\n\n # Enforce [0, 1] bounds on variables\n i_l = np.zeros(p)\n i_u = np.ones(p)\n # A, l, u = miosqp.add_bounds(i_idx, 0., 1., A, l, u)\n\n if solver == 'gurobi':\n # Solve with gurobi\n prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n res_gurobi = prob.solve(solver=mpbpy.GUROBI,\n verbose=False, Threads=1)\n if res_gurobi.status != 'optimal':\n import ipdb\n ipdb.set_trace()\n solve_time_temp[j] = 1e3 * res_gurobi.cputime\n\n elif solver == 'miosqp':\n # Define problem settings\n miosqp_settings = {\n # integer feasibility tolerance\n 'eps_int_feas': 1e-03,\n # maximum number of iterations\n 'max_iter_bb': 1000,\n # tree exploration rule\n # [0] depth first\n # [1] two-phase: depth first until first incumbent and then best bound\n 'tree_explor_rule': 1,\n # branching rule\n # [0] max fractional part\n 'branching_rule': 0,\n 'verbose': False,\n 'print_interval': 1}\n\n osqp_settings = {'eps_abs': 1e-03,\n 'eps_rel': 1e-03,\n 'eps_prim_inf': 1e-04,\n 'verbose': False}\n\n model = miosqp.MIOSQP()\n model.setup(P, q, A, l, u, i_idx, i_l, i_u,\n miosqp_settings,\n osqp_settings)\n res_miosqp = model.solve()\n\n # DEBUG (check if solutions match)\n # prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n # res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False)\n # if (np.linalg.norm(res_gurobi.x - res_miosqp.x) /\n # np.linalg.norm(res_gurobi.x)) > 1e-02:\n # import ipdb; ipdb.set_trace()\n#\n # import ipdb; ipdb.set_trace()\n\n if res_miosqp.status != miosqp.MI_SOLVED:\n import ipdb\n ipdb.set_trace()\n \n # Solution time \n solve_time_temp[j] = 1e3 * res_miosqp.run_time\n\n # Store OSQP time in percentage\n if solver == 'miosqp':\n osqp_solve_time_temp[j] = \\\n 100 * (res_miosqp.osqp_solve_time / res_miosqp.run_time)\n\n # Get time statistics\n std_solve_time[i] = np.std(solve_time_temp)\n avg_solve_time[i] = np.mean(solve_time_temp)\n max_solve_time[i] = np.max(solve_time_temp)\n min_solve_time[i] = np.min(solve_time_temp)\n\n # Store also OSQP time\n if solver == 'miosqp':\n avg_osqp_solve_time[i] = np.mean(osqp_solve_time_temp)\n\n # Create pandas dataframe for the results\n df_dict = {'n': n_vec,\n 'm': m_vec,\n 'p': p_vec,\n 't_min': min_solve_time,\n 't_max': max_solve_time,\n 't_avg': avg_solve_time,\n 't_std': std_solve_time}\n\n # Store also OSQP time\n if solver == 'miosqp':\n df_dict.update({'t_osqp_avg': avg_osqp_solve_time})\n\n timings = pd.DataFrame(df_dict)\n\n return timings", "def PowerFlowAnalysis(BusData_Location, LineData_Location, Output_FileName, tolerance, S_Base):\r\n df_BusData, df_LineData = import_BusAndLineData(BusData_Location, LineData_Location)\r\n n = df_BusData.shape[0]\r\n \"\"\"Create Admittance Matrix in forms of Y and seperated into G and B\"\"\"\r\n sys_Y, sys_G, sys_B = build_AdmittanceMatrix(df_LineData, n)\r\n \"\"\"Creation of sys_Data\"\"\"\r\n sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef = init_BusData(df_BusData)\r\n sys_Data = init_SysData(sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef, sys_G, sys_B, S_Base)\r\n \"\"\"Initial Prime for mismatch detetction and storage\"\"\"\r\n mismatch_P = sys_Data[1:n,4]\r\n mismatch_Q = sys_Data[1:n,6]\r\n mismatch_max = [max(abs(mismatch_P)), max(abs(mismatch_Q))]\r\n iteration = 0\r\n iteration_list = []\r\n mismatch_P_list = []\r\n mismatch_Q_list = []\r\n max_P_bus = []\r\n max_Q_bus = []\r\n \r\n \"\"\"Loop until solution is reached or max iteration is exceeded\"\"\"\r\n while(iteration<15 and mismatch_max>tolerance):\r\n iteration_list.append(iteration)\r\n \r\n bus_P, = np.where(mismatch_P == max(abs(mismatch_P)))\r\n if len(bus_P) == 0:\r\n bus_P, = np.where(mismatch_P == -1*max(abs(mismatch_P)))\r\n max_P_bus.append(int(bus_P+2))\r\n bus_Q, = np.where(mismatch_Q == max(abs(mismatch_Q)))\r\n if len(bus_Q) == 0:\r\n bus_Q, = np.where(mismatch_Q == -1*max(abs(mismatch_Q)))\r\n max_Q_bus.append(int(bus_Q+2))\r\n mismatch_P_list.append(max(abs(mismatch_P)))\r\n mismatch_Q_list.append(max(abs(mismatch_Q)))\r\n \r\n sys_Data = update_SysData(sys_Data, sys_G, sys_B, sys_BusType)\r\n mismatch_P = sys_Data[1:n,4]\r\n mismatch_Q = sys_Data[1:n,6]\r\n mismatch_max = [max(abs(mismatch_P)), max(abs(mismatch_Q))]\r\n iteration += 1\r\n \r\n \"\"\"Final add to convergency history\"\"\"\r\n iteration_list.append(iteration) \r\n bus_P, = np.where(mismatch_P == max(abs(mismatch_P)))\r\n if len(bus_P) == 0:\r\n bus_P, = np.where(mismatch_P == -1*max(abs(mismatch_P)))\r\n max_P_bus.append(int(bus_P+2))\r\n bus_Q, = np.where(mismatch_Q == max(abs(mismatch_Q)))\r\n if len(bus_Q) == 0:\r\n bus_Q, = np.where(mismatch_Q == -1*max(abs(mismatch_Q)))\r\n max_Q_bus.append(int(bus_Q+2))\r\n mismatch_P_list.append(max(abs(mismatch_P)))\r\n mismatch_Q_list.append(max(abs(mismatch_Q)))\r\n \r\n \"\"\"Export final solution to excel file\"\"\"\r\n DataOutput(Output_FileName, sys_Data, df_LineData, sys_Y,iteration_list,mismatch_P_list,mismatch_Q_list,max_P_bus,max_Q_bus)", "def solve(self):\n\n # sanity check\n if self.G is None or self.curve is None or self.Q is None:\n print(\"Can't solve not all parameters are set\")\n return False # unsuccessful\n\n self.count = 1 # initial count\n self.start = time.time()\n\n\n ############ CHECK IF CURVE IS SUCEPTIBLE ############\n degree = 2\n card = self.curve.card\n p = self.curve.fp\n found = False\n\n if p % 4 == 3 and isPrime((p+1)/4):\n embedding = (pow(self.curve.fp, degree) - 1) / card\n\n if embedding == int(embedding):\n found = True\n\n if not found:\n if self.verbose:\n print(\"Not Suceptible to MOV attack\")\n return False\n\n\n ############ IF IT IS CONVERT ECDLP TO DLP ############\n\n G2, curve2 = secondCurve(self.curve, degree)\n\n if G2 == False:\n if self.verbose:\n print(\"Couldn't perform ECDLP to DLP reduction\")\n return False\n\n # calculate group\n m = curve2.group()\n\n # convert our two EC points into rational points over F_p^degree field\n dlpG = curve2.weil(G2, self.G, m)\n dlpQ = curve2.weil(G2, self.Q, m)\n\n print(dlpG, dlpQ, m)\n ############ SOLVE DLP ############\n\n self.k = cyclicLog(dlpQ, dlpG, m)\n\n self.time = time.time() - self.start\n\n # calculate estimated space and time\n m = int(m)\n print(m)\n bits = int(math.log(m, 2))\n\n multiplier = bits // 2\n bound = min(bits * 20 * multiplier, primes[-1])\n bound = bisect_left(primes, bound)\n\n # get subset of all possible prime factors which are B-smooth\n primesSub = primes[:bound]\n\n # filter list as we are only interested in square conguences\n residPrimes = list(filter(lambda p: quadRes(m, p) == 1, primesSub))\n\n phi = len(residPrimes)\n\n self.count = phi\n self.space = phi * phi\n\n if self.verbose:\n print(\"k:\", self.k)\n print(\"Time taken: %.3f s\" % (self.time)) # print time taken\n print(\"Space used: %d\" % (self.space)) # print space used\n print(\"Numbers checked:\", self.count) # print total count\n\n return True", "def solve_polyphase_instance(\n allele_matrix, genotype_list, param, timers, partial_phasing=None, quiet=False\n):\n num_vars = len(allele_matrix.getPositions())\n\n # Precompute block borders based on read coverage and linkage between variants\n if not quiet:\n logger.info(\"Detecting connected components with weak interconnect ..\")\n timers.start(\"detecting_blocks\")\n\n ploidy = param.ploidy\n sl = param.block_cut_sensitivity <= 1\n block_starts = compute_block_starts(allele_matrix, ploidy, single_linkage=sl)\n\n # Set block borders and split readset\n block_starts.append(num_vars)\n num_blocks = sum(1 for i, j in zip(block_starts[:-1], block_starts[1:]) if j > i + 1)\n if not quiet:\n logger.info(\n f\"Split heterozygous variants into {num_blocks} blocks (and {len(block_starts) - num_blocks - 1} singleton blocks).\"\n )\n\n # Process blocks independently\n results = []\n processed_blocks = 0\n timers.stop(\"detecting_blocks\")\n\n \"\"\"\n Python's multiprocessing makes hard copies of the passed arguments, which is not trivial for\n cython objects, especially when they contain pointers to other cython objects. Any passed\n object must be (de)serializable (in Python: pickle). All other objects created in the main\n thread are also accessible by the workers, but they are handled via the copy-on-write policy.\n This means, that e.g. the large main matrix is not hardcopied for every thread, as long as it\n is not modified there. This must be ensured to prevent a massive waste of memory consumption.\n \"\"\"\n if param.threads == 1:\n # for single-threading, process everything individually to minimize memory footprint\n for block_id, (start, end) in enumerate(zip(block_starts[:-1], block_starts[1:])):\n submatrix = allele_matrix.extractInterval(start, end)\n subphasing = partial_phasing.extractInterval(start, end) if partial_phasing else None\n if end - start > 1:\n processed_blocks += 1\n if not quiet:\n logger.info(\n f\"Processing block {processed_blocks} of {num_blocks} with {len(submatrix)} reads and {end - start} variants.\"\n )\n results.append(\n phase_single_block(\n block_id, submatrix, genotype_list[start:end], subphasing, param, timers, quiet\n )\n )\n del submatrix\n\n else:\n # sort block by descending size (4/3-approximation for scheduling problem)\n timers.start(\"phase_blocks\")\n joblist = list(zip(range(len(block_starts)), block_starts[:-1], block_starts[1:]))\n joblist.sort(key=lambda x: x[1] - x[2])\n\n with Pool(processes=param.threads) as pool:\n process_results = [\n pool.apply_async(\n phase_single_block_mt,\n (\n allele_matrix,\n partial_phasing,\n block_id,\n start,\n end,\n genotype_list[start:end],\n param,\n timers,\n job_id,\n num_blocks,\n quiet,\n ),\n )\n for job_id, (block_id, start, end) in enumerate(joblist)\n ]\n # collect all blockwise results\n blockwise_results = [res.get() for res in process_results]\n results = sorted(blockwise_results, key=lambda x: x.block_id)\n\n timers.stop(\"phase_blocks\")\n\n # Aggregate blockwise results\n if partial_phasing and param.block_cut_sensitivity == 0:\n # For lowest sensitivity, do not add block starts to global breakpoint list\n # (unless the partial phasing is also interrupted there)\n borders = {partial_phasing.getFirstPos(i) for i in range(len(partial_phasing))}\n else:\n borders = []\n return aggregate_results(results, ploidy, borders)", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def solve_fastpm(engine, pt, asteps, dlinear_k, s, v, s1, s2):\n code = CodeSegment(engine)\n code.solve_lpt(pt=pt, aend=asteps[0], dlinear_k=dlinear_k, s=s, v=v, s1=s1, s2=s2)\n\n def K(ai, af, ar):\n return 1 / (ar ** 2 * pt.E(ar)) * (pt.Gf(af) - pt.Gf(ai)) / pt.gf(ar)\n def D(ai, af, ar):\n return 1 / (ar ** 3 * pt.E(ar)) * (pt.Gp(af) - pt.Gp(ai)) / pt.gp(ar)\n\n code.assign(x=Literal(numpy.zeros_like(engine.q)), y='f')\n\n code.force(s=s, force='f', force_factor=1.5 * pt.Om0)\n for ai, af in zip(asteps[:-1], asteps[1:]):\n ac = (ai * af) ** 0.5\n code.kick(v=v, f='f', kick_factor=K(ai, ac, ai))\n code.drift(x=s, v=v, drift_factor=D(ai, ac, ac))\n code.drift(x=s, v=v, drift_factor=D(ac, af, ac))\n code.force(s=s, force='f', force_factor=1.5 * pt.Om0)\n code.kick(v=v, f='f', kick_factor=K(ac, af, af))\n return code", "def solve_inc(self, DU, DF, calcG=True):\n\n nu = len(self.udofs)\n np = len(self.pdofs)\n ndof = len(self.dofs)\n decompose = False\n if calcG: decompose = True\n scheme = self.scheme\n\n if calcG:\n if self.verbose and nu>500: print \" building system...\", ; sys.stdout.flush()\n self.mountG()\n\n # Mount G11.. G22 matrices\n cG = self.G.tocsc()\n self.G11 = cG[:nu , :nu ]\n self.G12 = cG[:nu , nu:]\n self.G21 = cG[ nu:, :nu ]\n self.G22 = cG[ nu:, nu:]\n cG = None # Free memory\n\n # Pick last values for disp, vel and accel\n U_0 = self.U.copy()\n Uv_0 = self.Uv.copy()\n Ua_0 = self.Ua.copy()\n\n # Mount RHS\n self.RHS = self.DF - dot(self.C, Uv_0 + (1.0-gamma)*h*Ua_0) - dot(self.K, U_0 + h*Uv_0 + (0.5-beta)*(h**2.0)*Ua_0) \n\n RHS1 = RHS[:nu]\n Ua2 = DU[nu:]\n\n # Solve linear system\n RHS2 = self.G22*Ua2 #sparse matrix * dense vector\n if nu:\n if self.verbose and nu>500: print \"solving...\", ; sys.stdout.flush()\n if scheme == \"MNR\" and decompose : self.LUsolver = factorized(self.G11)\n if scheme == \"NR\" or scheme == \"FE\": self.LUsolver = factorized(self.G11)\n U1 = scipy.sparse.linalg.spsolve(self.G11, RHS1 - self.G12*Ua2)\n RHS2 += self.G21*Ua1\n\n # updating disp, vel and accel\n self.Uv = Uv_0 + (1.0-gamma)*h*Ua_0 + gamma*h*self.Ua\n self.U = U_0 + h*Uv_0 + (0.5-beta)*(h**2.0)*Ua_0 + (h**2.0)*beta*self.Ua\n \n # calculating reactions\n self.DF = dot(self.M,self.Ua) + dot(self.C,self.Uv) + dot(self.K,self.U)\n for i in range(nu):\n self.F[self.udofs[i].eq_id] = F_bk[self.udofs[i].eq_id]\n\n # Complete vectors\n for i, dof in enumerate(self.udofs): DU[dof.eq_id] = U1[i]\n for i, dof in enumerate(self.pdofs): DF[dof.eq_id] = F2[i]\n\n if self.verbose and nu>500: print \"updating...\" ; sys.stdout.flush()\n DFint = self.update_elems_and_nodes(DU) # Also calculates DFint\n #if self.verbose: print \" done.\"\n\n R = DF - DFint\n return DFint, R", "def pe_solver(Aij, Bij, pi):\n # =========================================================================\n # Calculating the pressure at row i + 1\n # =========================================================================\n return np.dot(np.linalg.inv(Aij), np.dot(Bij, pi))", "def actualSolve(self, lp):\n\t\tif not self.executable(self.path):\n\t\t\traise \"PuLP: cannot execute \"+self.path\n\t\tif not self.keepFiles:\n\t\t\tpid = os.getpid()\n\t\t\ttmpLp = os.path.join(self.tmpDir, \"%d-pulp.lp\" % pid)\n\t\t\ttmpSol = os.path.join(self.tmpDir, \"%d-pulp.sol\" % pid)\n\t\telse:\n\t\t\ttmpLp = lp.name+\"-pulp.lp\"\n\t\t\ttmpSol = lp.name+\"-pulp.sol\"\n\t\tlp.writeLP(tmpLp, writeSOS = 0)\n\t\tproc = [\"glpsol\", \"--lpt\", tmpLp, \"-o\", tmpSol]\n\t\tif not self.mip: proc.append('--nomip')\n\t\tproc.extend(self.options)\n\t\tif not self.msg:\n\t\t\tproc[0] = self.path\n\t\t\tf = os.popen(\" \".join(proc))\n\t\t\tf.read()\n\t\t\trc = f.close()\n\t\t\tif rc != None:\n\t\t\t\traise \"PuLP: Error while trying to execute \"+self.path\n\t\telse:\n\t\t\tif os.name != 'nt':\n\t\t\t\trc = os.spawnvp(os.P_WAIT, self.path, proc)\n\t\t\telse:\n\t\t\t\trc = os.spawnv(os.P_WAIT, self.executable(self.path), proc)\n\t\t\tif rc == 127:\n\t\t\t\traise \"PuLP: Error while trying to execute \"+self.path\n\t\tif not os.path.exists(tmpSol):\n\t\t\traise \"PuLP: Error while executing \"+self.path\n\t\tlp.status, values = self.readsol(tmpSol)\n\t\tlp.assign(values)\n\t\tif not self.keepFiles:\n\t\t\ttry: os.remove(tmpLp)\n\t\t\texcept: pass\n\t\t\ttry: os.remove(tmpSol)\n\t\t\texcept: pass\n\t\treturn lp.status", "def actualSolve(self, lp):\n\t\tif not self.executable(self.path):\n\t\t\traise \"PuLP: cannot execute \"+self.path\n\t\tif not self.keepFiles:\n\t\t\tpid = os.getpid()\n\t\t\ttmpLp = os.path.join(self.tmpDir, \"%d-pulp.lp\" % pid)\n\t\t\t# Should probably use another CPLEX solution format\n\t\t\ttmpSol = os.path.join(self.tmpDir, \"%d-pulp.txt\" % pid)\n\t\telse:\n\t\t\ttmpLp = lp.name+\"-pulp.lp\"\n\t\t\t# Should probably use another CPLEX solution format\n\t\t\ttmpSol = lp.name+\"-pulp.txt\"\n\t\tlp.writeLP(tmpLp, writeSOS = 1)\n\t\ttry: os.remove(tmpSol)\n\t\texcept: pass\n\t\tif not self.msg:\n\t\t\tcplex = os.popen(self.path+\" > /dev/null 2> /dev/null\", \"w\")\n\t\telse:\n\t\t\tcplex = os.popen(self.path, \"w\")\n\t\tcplex.write(\"read \"+tmpLp+\"\\n\")\n\t\tfor option in self.options:\n\t\t\tcplex.write(option+\"\\n\")\n\t\tif lp.isMIP():\n\t\t\tif self.mip:\n\t\t\t\tcplex.write(\"mipopt\\n\")\n\t\t\t\tcplex.write(\"change problem fixed\\n\")\n\t\t\telse:\n\t\t\t\tcplex.write(\"change problem relaxed_milp\\n\")\n\t\t\t\t\n\t\tcplex.write(\"optimize\\n\")\n\t\tcplex.write(\"write \"+tmpSol+\"\\n\")\n\t\tcplex.write(\"quit\\n\")\n\t\tif cplex.close() != None:\n\t\t\traise \"PuLP: Error while trying to execute \"+self.path\n\t\tif not self.keepFiles:\n\t\t\ttry: os.remove(tmpLp)\n\t\t\texcept: pass\n\t\tif not os.path.exists(tmpSol):\n\t\t\tstatus = LpStatusInfeasible\n\t\telse:\n\t\t\tstatus, values = self.readsol(tmpSol)\n\t\tif not self.keepFiles:\n\t\t\ttry: os.remove(tmpSol)\n\t\t\texcept: pass\n\t\t\ttry: os.remove(\"cplex.log\")\n\t\t\texcept: pass\n\t\tif status != LpStatusInfeasible:\n\t\t\tlp.assign(values)\n\t\tlp.status = status\n\t\treturn status", "def qubit_adapt_vqe(\n hamiltonian_sp,\n hamiltonian_sp_sparse,\n reference_ket,\n nqubits,\n pool_mix,\n hf_init_sp,\n fci,\n n_max_grads=2,\n adapt_conver=\"norm\",\n adapt_thresh=1e-08,\n adapt_maxiter=45,\n tolerance_sim=1e-07,\n method_sim=\"BFGS\",\n):\n iterations_sim = {\n \"energies\": [],\n \"energies_substracted_from_fci\": [],\n \"norms\": [],\n \"Max_gradient\": [],\n \"CNOTs\": [],\n \"Hadamard\": [],\n \"RY\": [],\n \"RX\": [],\n }\n result_sim = {}\n\n iterations_ana = {\n \"energies\": [],\n \"energies_substracted_from_fci\": [],\n \"norms\": [],\n \"Max_gradient\": [],\n }\n result_ana = {}\n\n parameters_sim = []\n parameters_ana = []\n\n ansatz_ops = [] # SQ operator strings in the ansatz\n curr_state = prepare_hf_state(hf_init_sp, pool_mix)\n ref_energy = hf_energy(curr_state, hamiltonian_sp)\n ref_energy_ana = (\n reference_ket.T.conj().dot(hamiltonian_sp_sparse.dot(reference_ket))[0, 0].real\n )\n print(\"reference_energy from the simulator:\", ref_energy)\n print(\"reference_energy from the analytical calculations:\", ref_energy_ana)\n curr_state_open_f = prepare_adapt_state(\n reference_ket, ansatz_ops, parameters_ana, nqubits\n )\n print(\" --------------------------------------------------------------------------\")\n print(\" \")\n print(\" Start Qubit ADAPT-VQE algorithm:\")\n print(\" \")\n print(\" --------------------------------------------------------------------------\")\n print(\" \")\n # chosegrad = 2\n Y = int(n_max_grads)\n print(\" ------------------------------------------------------\")\n print(\" The number of maximum gradients inserted in each iteration:\", Y)\n print(\" ------------------------------------------------------\")\n op_indices = []\n\n prev_norm = 0.0\n for n_iter in range(adapt_maxiter):\n print(\"\\n\")\n print(\n \" --------------------------------------------------------------------------\"\n )\n print(\" Qubit ADAPT-VQE iteration: \", n_iter)\n print(\n \" --------------------------------------------------------------------------\"\n )\n next_deriv = 0\n curr_norm = 0\n list_grad = []\n print(\"\\n\")\n print(\" ------------------------------------------------------\")\n print(\" Start the analytical gradient calculation:\")\n print(\" ------------------------------------------------------\")\n for i in range(len(pool_mix)):\n # print(\"i\",i)\n # gi = #compute_commutator_i(listcommutators2[i],curr_state)\n operator_sparse = term_to_matrix_sparse(pool_mix[i])\n gi = calculate_gradient(\n operator_sparse, curr_state_open_f, hamiltonian_sp_sparse\n )\n curr_norm += gi * gi\n list_grad.append(gi)\n if abs(gi) > abs(next_deriv):\n next_deriv = gi\n mylist_value_without_0 = value_without_0(list_grad)\n\n mylist_index_without_0 = index_without_0(list_grad)\n sorted_mylist_value_without_0 = abs_sort_desc(\n value_without_0(list_grad)\n )\n print(\n \"sorted_mylist_value of gradient_without_0\", sorted_mylist_value_without_0\n )\n sorted_index = corresponding_index(\n mylist_value_without_0,\n mylist_index_without_0,\n sorted_mylist_value_without_0,\n )\n curr_norm = np.sqrt(curr_norm)\n max_of_gi = next_deriv\n\n print(\" Norm of <[H,A]> = %12.8f\" % curr_norm)\n print(\" Max of <[H,A]> = %12.8f\" % max_of_gi)\n\n converged = False\n if adapt_conver == \"norm\":\n if curr_norm < adapt_thresh:\n converged = True\n else:\n print(\" FAIL: Convergence criterion not defined\")\n exit()\n\n if converged or (abs(curr_norm - prev_norm) < 10 ** (-7)):\n print(\" Ansatz Growth Converged!\")\n result_sim[\"optimizer\"] = method_sim\n result_sim[\"final_norm\"] = curr_norm\n result_sim[\"indices\"] = op_indices\n result_sim[\"len_operators\"] = len(op_indices)\n result_sim[\"parameters\"] = parameters_sim\n result_sim[\"final_energy\"] = opt_result_sim.fun\n\n # result_ana[\"optimizer\"] = method_ana\n # result_ana[\"final_norm\"] = curr_norm\n # result_ana[\"indices\"] = op_indices\n # result_ana[\"len_operators\"] = len(op_indices)\n # result_ana[\"parameters\"] = parameters_ana\n # result_ana[\"final_energy\"] = opt_result_ana.fun\n\n gates = curr_state.ops\n m = count(\"CNOT\", gates)\n print(\" -----------Final ansatz----------- \")\n print(\" %4s %12s %18s\" % (\"#\", \"Coeff\", \"Term\"))\n for si in range(len(ansatz_ops)):\n print(\" %4i %12.8f\" % (si, parameters_sim[si]))\n break\n\n chosen_batch = sorted_mylist_value_without_0\n\n gamma1 = []\n sorted_index1 = []\n curr_norm1 = 0\n for z in chosen_batch:\n curr_norm1 += z * z\n curr_norm1 = np.sqrt(curr_norm1)\n for i in range(Y):\n gamma1.append(chosen_batch[i] / curr_norm1)\n sorted_index1.append(sorted_index[i])\n # parameters = []\n for m in range(len(gamma1)):\n parameters_sim.append(gamma1[m])\n parameters_ana.append(gamma1[m])\n # parameters.append(0.0)\n ansatz_ops.append(pool_mix[sorted_index1[m]])\n op_indices.append(sorted_index1[m])\n print(\"initial parameters\", parameters_sim)\n print(\"op_indices of iteration_%d\" % n_iter, op_indices)\n # opt_result_ana = scipy.optimize.minimize(exact_adapt_energy,\n # parameters_ana,\n # (ansatz_ops,reference_ket,hamiltonian_sp_sparse,n_el),\n # method = method_ana,\n # tol =tolerance_ana,\n # options = {'gtol': 10**(-5),\n # 'maxiter': 50000,\n # 'disp': False})\n # xlist_ana = opt_result_ana.x\n opt_result_sim = scipy.optimize.minimize(\n lambda theta: ucc_action(\n hamiltonian_sp, ansatz_ops, hf_init_sp, theta\n ),\n x0=parameters_sim,\n method=method_sim,\n tol=tolerance_sim,\n options={\"maxiter\": 100000, \"disp\": False},\n )\n xlist_sim = opt_result_sim.x\n # print(\" ----------- ansatz from analytical calculations----------- \")\n # print(\" %s\\t %s\\t\\t %s\" %(\"#\",\"Coeff\",\"Term\"))\n # parameters_ana = []\n # for si in range(len(ansatz_ops)):\n # print(\" %i\\t %f\\t %s\" %(si, xlist_ana[si], op_indices[si]) )\n # parameters_ana.append(xlist_ana[si])\n # print(\" Energy reached from the analytical calculations: %20.20f\" %opt_result_ana.fun)\n\n # curr_state_open_f = prepare_adapt_state(reference_ket,ansatz_ops,parameters_ana,nqubits)\n\n print(\" ----------- ansatz from the simulator----------- \")\n print(\" %s\\t %s\\t\\t %s\" % (\"#\", \"Coeff\", \"Term\"))\n parameters_sim = []\n for si in range(len(ansatz_ops)):\n print(\" %i\\t %f\\t %s\" % (si, xlist_sim[si], op_indices[si]))\n parameters_sim.append(xlist_sim[si])\n print(\" Energy reached from the simulator: %20.20f\" % opt_result_sim.fun)\n curr_state = prepare_state_ansatz(ansatz_ops, hf_init_sp, parameters_sim)\n curr_state_open_f = prepare_adapt_state(\n reference_ket, ansatz_ops, parameters_sim, nqubits\n )\n\n prev_norm = curr_norm\n gates = curr_state.ops\n cnot = count(\"CNOT\", gates)\n hadamard = count(\"H\", gates)\n ry = count(\"_4\", gates)\n rx = count(\"_2\", gates)\n iterations_sim[\"energies\"].append(opt_result_sim.fun)\n iterations_sim[\"energies_substracted_from_fci\"].append(\n abs(opt_result_sim.fun - fci)\n )\n iterations_sim[\"norms\"].append(curr_norm)\n iterations_sim[\"Max_gradient\"].append(sorted_mylist_value_without_0[0])\n # iterations_ana[\"energies\"].append(opt_result_ana.fun)\n # iterations_ana[\"norms\"].append(curr_norm)\n # iterations_ana[\"Max_gradient\"].append(sorted_mylist_value_without_0[0])\n iterations_sim[\"CNOTs\"].append(cnot)\n iterations_sim[\"Hadamard\"].append(hadamard)\n iterations_sim[\"RY\"].append(ry)\n iterations_sim[\"RX\"].append(rx)\n return iterations_sim, iterations_ana, result_sim, result_ana", "def solve(self, p_0, u_0=None, k_ff_all_0=None, k_fb_safe=None, u_perf_0=None,\n k_fb_perf_0=None, sol_verbose=False, q_0=None, k_fb_0=None):\n assert self.solver_initialized, \"Need to initialize the solver first!\"\n\n u_0_init, k_ff_all_0_init, k_fb_safe_init, u_perf_0_init, k_fb_perf_0_init = self._get_init_controls()\n\n if u_0 is None:\n u_0 = u_0_init\n if k_ff_all_0 is None:\n k_ff_all_0 = k_ff_all_0_init\n if k_fb_safe is None:\n k_fb_safe = k_fb_safe_init\n if u_perf_0 is None:\n u_perf_0 = u_perf_0_init\n if k_fb_perf_0 is None:\n k_fb_perf_0 = k_fb_perf_0_init\n if q_0 is not None:\n if k_fb_0 is None:\n k_fb_0 = self.get_lqr_feedback()\n\n if self.opt_x0:\n params = np.vstack(\n (cas_reshape(k_fb_safe, (-1, 1)), cas_reshape(k_fb_perf_0, (-1, 1))))\n\n opt_vars_init = vertcat(cas_reshape(p_0, (-1, 1)), cas_reshape(u_0, (-1, 1)), u_perf_0, \\\n cas_reshape(k_ff_all_0, (-1, 1)))\n else:\n params = np.vstack(\n (p_0, cas_reshape(k_fb_safe, (-1, 1)), cas_reshape(k_fb_perf_0, (-1, 1))))\n\n opt_vars_init = vertcat(cas_reshape(u_0, (-1, 1)), u_perf_0, \\\n cas_reshape(k_ff_all_0, (-1, 1)))\n\n if self.init_uncertainty:\n params = vertcat(params, cas_reshape(q_0, (-1, 1)), cas_reshape(k_fb_0, (-1, 1)))\n\n crash = False \n sol = self.solver(x0=opt_vars_init, lbg=self.lbg, ubg=self.ubg, p=params)\n try:\n # pass\n sol = self.solver(x0=opt_vars_init, lbg=self.lbg, ubg=self.ubg, p=params)\n except:\n crash = True\n warnings.warn(\"NLP solver crashed, solution infeasible\")\n sol = None\n\n return self._get_solution(p_0, sol, k_fb_safe, k_fb_perf_0, sol_verbose, crash, q_0=q_0, k_fb_0=k_fb_0)", "def main(**kwargs):\n flowsheet = Flowsheet(name='MB_Model') \n \n # Fix variables\n setInputs(flowsheet) \n\n ts = time.time() \n\n mb = flowsheet.MB_fuel\n \n # Initialize fuel reactor\n flowsheet.MB_fuel._initialize(outlvl=1,\n optarg={\"tol\" : 1e-8,\n \"max_cpu_time\" : 600,\n \"print_level\" : 5,\n \"halt_on_ampl_error\": 'yes'}) \n \n # Create a solver\n opt = SolverFactory('ipopt')\n opt.options = {'tol': 1e-8,\n 'linear_solver' : 'ma27',\n 'bound_push': 1e-8,\n 'max_cpu_time': 600,\n 'print_level': 5}\n \n results = opt.solve(flowsheet,tee=True,symbolic_solver_labels=False,\n keepfiles=False)\n\n #flowsheet.MB_fuel.Solid_In_M.fix(691.4)\n #flowsheet.MB_fuel.Gas_In_y['CO2'].fix(0.03999)\n #flowsheet.MB_fuel.Gas_In_y['H2O'].fix(0.00001)\n #flowsheet.MB_fuel.Gas_In_y['CH4'].fix(0.96)\n\n\n\n #results = opt.solve(flowsheet,tee=True,symbolic_solver_labels=False,\n # keepfiles=False)\n \n \n print(\"\\n\")\n print(\"----------------------------------------------------------\")\n print('Total simulation time: ', value(time.time() - ts), \" s\")\n print(\"----------------------------------------------------------\")\n\n \n # Print some variables \n #print_summary_fuel_reactor(flowsheet) \n\n # Plot some variables \n #results_plot_fuel_reactor(flowsheet) \n\n m = flowsheet.MB_fuel\n if 'Solid_M' in kwargs:\n m.Solid_In_M.fix(kwargs['Solid_M'])\n if 'Solid_T' in kwargs:\n m.Solid_In_Ts[t].fix(kwargs['Solid_T'])\n if 'Solid_x' in kwargs:\n m.Solid_In_x['Fe2O3'].fix(kwargs['Solid_x']['Fe2O3'])\n m.Solid_In_x['Fe3O4'].fix(kwargs['Solid_x']['Fe3O4'])\n m.Solid_In_x['Al2O3'].fix(kwargs['Solid_x']['Al2O3'])\n if 'Gas_F' in kwargs:\n m.Gas_In_F.fix(kwargs['Gas_F'])\n if 'Gas_P' in kwargs:\n m.Gas_In_P.fix(kwargs['Gas_P'])\n if 'Gas_T' in kwargs:\n m.Gas_In_T.fix(kwargs['Gas_T'])\n if 'Gas_y' in kwargs:\n m.Gas_In_y['CO2'].fix(kwargs['Gas_y']['CO2'])\n m.Gas_In_y['H2O'].fix(kwargs['Gas_y']['H2O'])\n m.Gas_In_y['CH4'].fix(kwargs['Gas_y']['CH4'])\n\n results = opt.solve(flowsheet, tee=True)\n\n with open('ss_fs.txt','w') as f:\n flowsheet.display(ostream=f)\n\n dt_Gflux_CO2 = []\n dt_Gflux_H2O = []\n dt_Gflux_CH4 = []\n dt_Sflux_Fe2O3 = []\n dt_Sflux_Fe3O4 = []\n dt_Sflux_Al2O3 = []\n dt_Ctrans_CO2 = []\n dt_Ctrans_H2O = []\n dt_Ctrans_CH4 = []\n dt_qtrans_Fe2O3 = []\n dt_qtrans_Fe3O4 = []\n dt_qtrans_Al2O3 = []\n dt_Ghflux = []\n dt_Ts = []\n dt_TgGS = []\n dt_TsGS = []\n dt_vg = []\n dt_vs = []\n\n# for z in mb.z.get_finite_elements():\n# if z != mb.z.first() and z != mb.z.last():\n#\n# dt_Gflux_CO2.append( (mb.Cg[z,'CO2'].value-mb.Cg[prev,'CO2'].value)/\\\n# (mb.G_flux[z,'CO2'].value-mb.G_flux[prev,'CO2'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Gflux_H2O.append( (mb.Cg[z,'H2O'].value-mb.Cg[prev,'H2O'].value)/\\\n# (mb.G_flux[z,'H2O'].value-mb.G_flux[prev,'H2O'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Gflux_CH4.append( (mb.Cg[z,'CH4'].value-mb.Cg[prev,'CH4'].value)/\\\n# (mb.G_flux[z,'CH4'].value-mb.G_flux[prev,'CH4'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Ctrans_CO2.append( (mb.Cg[z,'CO2'].value-mb.Cg[prev,'CO2'].value)/\\\n# (mb.Ctrans[z,'CO2'].value)* \\\n# #-mv.Ctrans[prev,'CO2'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Ctrans_H2O.append( (mb.Cg[z,'H2O'].value-mb.Cg[prev,'H2O'].value)/\\\n# (mb.Ctrans[z,'H2O'].value)* \\\n# #-mv.Ctrans[prev,'H2O'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Ctrans_CH4.append( (mb.Cg[z,'CH4'].value-mb.Cg[prev,'CH4'].value)/\\\n# (mb.Ctrans[z,'CH4'].value)* \\\n# #-mv.Ctrans[prev,'CH4'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Sflux_Fe2O3.append( (mb.q[z,'Fe2O3'].value-mb.q[prev,'Fe2O3'].value)/\\\n# (mb.S_flux[z,'Fe2O3'].value-mb.S_flux[prev,'Fe2O3'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_Sflux_Fe3O4.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.S_flux[z,'Fe3O4'].value-mb.S_flux[prev,'Fe3O4'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_Sflux_Al2O3.append( (mb.q[z,'Al2O3'].value-mb.q[prev,'Al2O3'].value)/\\\n# (mb.S_flux[z,'Al2O3'].value-mb.S_flux[prev,'Al2O3'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_qtrans_Fe2O3.append( (mb.q[z,'Fe2O3'].value-mb.q[prev,'Fe2O3'].value)/\\\n# (mb.qtrans[z,'Fe2O3'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe2O3'].value) )\n#\n# dt_qtrans_Fe3O4.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.qtrans[z,'Fe3O4'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe3O4'].value) )\n#\n# dt_qtrans_Al2O3.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.qtrans[z,'Fe3O4'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe3O4'].value) )\n#\n# dt_Ghflux.append( (mb.Tg[z].value-mb.Tg[prev].value)/\\\n# (mb.Gh_flux[z].value-mb.Gh_flux[prev].value)* (z-prev)* mb.eps.value*\\\n# mb.L.value* mb.rho_vap[z].value* mb.cp_gas[z].value /(z-prev)) \n#\n# dt_Ts.append( (z-prev)*(1-mb.eps.value)*mb.L.value/mb.vs.value /(z-prev))\n#\n# dt_TgGS.append( (mb.Tg[z].value - mb.Tg[prev].value)/\\\n# mb.Tg_GS[z].value* mb.eps.value* mb.rho_vap[z].value* mb.cp_gas[z].value \n# /(z-prev))\n# \n# dt_TsGS.append( (mb.Ts[z].value - mb.Ts[prev].value)/\\\n# mb.Tg_GS[z].value* (1-mb.eps.value)* mb.rho_sol.value* mb.cp_sol[z].value*1e-3 \n# /(z-prev))\n# \n# dt_vg.append( mb.L.value*(z-prev)/mb.vg[z].value /(z-prev))\n# \n# dt_vs.append( mb.L.value*(z-prev)/mb.vs.value /(z-prev))\n#\n# prev = z\n#\n# with open('dt.txt','w') as f:\n# f.write('dt_Gflux_CO2\\t')\n# for t in dt_Gflux_CO2:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Gflux_H2O\\t')\n# for t in dt_Gflux_H2O:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Gflux_CH4\\t') \n# for t in dt_Gflux_CH4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Fe2O3\\t') \n# for t in dt_Sflux_Fe2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Fe3O4\\t') \n# for t in dt_Sflux_Fe3O4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Al2O3\\t') \n# for t in dt_Sflux_Al2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ctrans_CO2\\t') \n# for t in dt_Ctrans_CO2:\n# f.write('%1.3f'%t +'\\t')\n# \n# f.write('\\ndt_Ctrans_H2O\\t') \n# for t in dt_Ctrans_H2O:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ctrans_CH4\\t') \n# for t in dt_Ctrans_CH4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Fe2O3\\t') \n# for t in dt_qtrans_Fe2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Fe3O4\\t') \n# for t in dt_qtrans_Fe3O4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Al2O3\\t') \n# for t in dt_qtrans_Al2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ghflux\\t') \n# for t in dt_Ghflux:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ts\\t\\t') \n# for t in dt_Ts:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_TgGS\\t\\t') \n# for t in dt_TgGS:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_TsGS\\t\\t') \n# for t in dt_TsGS:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_vg\\t\\t') \n# for t in dt_vg:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_vs\\t\\t') \n# for t in dt_vs:\n# f.write('%1.3f'%t +'\\t')\n\n\n # Store the flowsheet \n return flowsheet", "def ccg_algo(dir:str, tol: float, gamma: int, pv_min: np.array, pv_max: np.array, engagement: np.array, solver_param: dict, day:str, log:bool=False, printconsole:bool=False, warm_start:bool=False, M_neg:float=None):\n\n # Compute the maximal deviation between the max and min PV uncertainty set bounds\n max_dev = pv_max - pv_min # (kW)\n max_dev[max_dev < 0] = 0\n nb_periods = max_dev.shape[0]\n\n # ------------------------------------------------------------------------------------------------------------------\n # CCG initialization: build the initial MP\n # ------------------------------------------------------------------------------------------------------------------\n\n # Building the MP\n MP = CCG_MP()\n MP.model.update()\n print('MP initialized: %d variables %d constraints' % (len(MP.model.getVars()), len(MP.model.getConstrs())))\n MP.export_model(dir + day + '_ccg_MP_initialized')\n\n # ------------------------------------------------------------------------------------------------------------------\n # CCG loop until convergence criteria is reached\n # ------------------------------------------------------------------------------------------------------------------\n\n if printconsole:\n print('-----------CCG ITERATION STARTING-----------')\n\n t_solve = time.time()\n objectives = []\n computation_times = []\n mipgap = []\n SP_dual_status = []\n SP_primal_status = []\n alpha_neg_list = []\n epsilon = 1e20\n # With CCG the convergence is stable.\n epsilon_list = [epsilon] * 2\n iteration = 1\n BESS_count_list = []\n BESS_charge_discharge_list = []\n max_iteration = 50\n\n while all(i < tol for i in epsilon_list) is not True and iteration < max_iteration:\n logfile = \"\"\n if log:\n logfile = dir + 'logfile_' + str(iteration) + '.log'\n if printconsole:\n print('i = %s solve SP dual' % (iteration))\n\n # ------------------------------------------------------------------------------------------------------------------\n # 1. SP part\n # ------------------------------------------------------------------------------------------------------------------\n\n # 1.1 Solve the SP and get the worst PV trajectory to add the new constraints of the MP\n SP_dual = BD_SP(pv_forecast=pv_max, max_dev=max_dev, engagement=engagement, gamma=gamma, heuristic=solver_param['heuristic'], M_neg=M_neg)\n SP_dual.solve(logfile=logfile, Threads=solver_param['Threads'], MIPFocus=solver_param['MIPFocus'], TimeLimit=solver_param['TimeLimit'])\n SP_dual_sol = SP_dual.store_solution()\n SP_dual_status.append(SP_dual_sol['status'])\n mipgap.append(SP_dual.model.MIPGap)\n alpha_neg_list.append(SP_dual_sol['alpha_neg'])\n\n # 1.2 Compute the worst PV trajectory from the SP dual solution\n pv_worst_case_from_SP = [pv_max[i] - SP_dual_sol['z_neg'][i] * max_dev[i] for i in range(nb_periods)]\n if printconsole:\n print(' i = %s : SP dual status %s solved in %.1f s MIPGap = %.6f' % (iteration, SP_dual_sol['status'], SP_dual_sol['time_total'], SP_dual.model.MIPGap))\n\n # 1.3 Solve the primal of the SP to check if the objectives of the primal and dual are equal to each other\n SP_primal = SP_primal_LP(pv_forecast=pv_worst_case_from_SP, engagement=engagement)\n SP_primal.solve()\n SP_primal_sol = SP_primal.store_solution()\n SP_primal_status.append(SP_primal_sol['status'])\n\n if printconsole:\n print(' i = %s : SP primal status %s' % (iteration, SP_primal_sol['status']))\n print(' i = %s : SP primal %.1f € SP dual %.1f € -> |SP primal - SP dual| = %.2f €' % (iteration, SP_primal_sol['obj'], SP_dual_sol['obj'], abs(SP_primal_sol['obj'] - SP_dual_sol['obj'])))\n\n # 1.4 SP solved to optimality ? -> Check if there is any simultaneous charge and discharge in the SP primal solution\n if SP_primal_sol['status'] == 2 or SP_primal_sol['status'] == 9: # 2 = optimal, 9 = timelimit has been reached\n nb_count = check_BESS(SP_primal_sol=SP_primal_sol)\n if nb_count > 0:\n BESS_charge_discharge_list.append([iteration, SP_primal_sol['y_charge'], SP_primal_sol['y_discharge']])\n else: #\n nb_count = float('nan')\n BESS_count_list.append(nb_count)\n if printconsole:\n print(' i = %s : %s simultaneous charge and discharge' % (iteration, nb_count))\n\n # ------------------------------------------------------------------------------------------------------------------\n # 2. MP part\n # ------------------------------------------------------------------------------------------------------------------\n\n # Check Sub Problem status -> bounded or unbounded\n if SP_dual_sol['status'] == 2 or SP_dual_sol['status'] == 9: # 2 = optimal, 9 = timelimit has been reached\n # Add an optimality cut to MP and solve\n MP.update_MP(pv_trajectory=pv_worst_case_from_SP, iteration=iteration)\n if printconsole:\n print('i = %s : MP with %d variables and %d constraints' % (iteration, len(MP.model.getVars()), len(MP.model.getConstrs())))\n # MP.export_model(dir + 'MP_' + str(iteration))\n if printconsole:\n print('i = %s : solve MP' % (iteration))\n MP.solve()\n MP_sol = MP.store_solution()\n MP.update_sol(MP_sol=MP_sol, i=iteration)\n if MP_sol['status'] == 3 or MP_sol['status'] == 4:\n print('i = %s : WARNING MP status %s -> Create a new MP, increase big-M value and compute a new PV trajectory from SP' % (iteration, MP_sol['status']))\n\n # MP unbounded or infeasible -> increase big-M's value to get another PV trajectory from the SP\n SP_dual = BD_SP(pv_forecast=pv_max, max_dev=max_dev, engagement=engagement, gamma=gamma, heuristic=solver_param['heuristic'], M_neg=M_neg+50)\n SP_dual.solve(logfile=logfile, Threads=solver_param['Threads'], MIPFocus=solver_param['MIPFocus'],\n TimeLimit=solver_param['TimeLimit'])\n SP_dual_sol = SP_dual.store_solution()\n\n # Compute a new worst PV trajectory from the SP dual solution\n pv_worst_case_from_SP = [pv_max[i] - SP_dual_sol['z_neg'][i] * max_dev[i] for i in range(nb_periods)]\n\n # Create a new MP\n MP = CCG_MP()\n MP.model.update()\n MP.update_MP(pv_trajectory=pv_worst_case_from_SP, iteration=iteration)\n if printconsole:\n print('i = %s : MP with %d variables and %d constraints' % (iteration, len(MP.model.getVars()), len(MP.model.getConstrs())))\n # MP.export_model(dir + 'MP_' + str(iteration))\n if printconsole:\n print('i = %s : solve new MP' % (iteration))\n MP.solve()\n MP_sol = MP.store_solution()\n MP.update_sol(MP_sol=MP_sol, i=iteration)\n\n computation_times.append([SP_dual_sol['time_total'], MP_sol['time_total']])\n\n\n else: # 4 = Model was proven to be either infeasible or unbounded.\n print('SP is unbounded: a feasibility cut is required to be added to the Master Problem')\n\n objectives.append([iteration, MP_sol['obj'], SP_dual_sol['obj'], SP_primal_sol['obj']])\n\n # ------------------------------------------------------------------------------------------------------------------\n # 3. Update: the engagement, lower and upper bounds using the updated MP\n # ------------------------------------------------------------------------------------------------------------------\n\n # Solve the MILP with the worst case trajectory\n planner = Planner_MILP(pv_forecast=pv_worst_case_from_SP)\n planner.solve()\n sol_planner = planner.store_solution()\n\n # Update engagement\n engagement = MP_sol['x']\n # Update the lower and upper bounds\n # MP -> give the lower bound\n # SP -> give the upper bound\n epsilon = abs(MP_sol['obj'] - SP_dual_sol['obj'])\n print('i = %s : |MP - SP dual| = %.2f €' % (iteration, epsilon))\n abs_err = abs(MP_sol['obj'] - sol_planner['obj'])\n epsilon_list.append(epsilon)\n epsilon_list.pop(0)\n if printconsole:\n print('i = %s : MP %.2f € SP dual %.2f € -> |MP - SP dual| = %.2f €' % (iteration, MP_sol['obj'], SP_dual_sol['obj'], epsilon))\n print('i = %s : MP %.2f € MILP %.2f € -> |MP - MILP| = %.2f €' % (iteration, MP_sol['obj'], sol_planner['obj'], abs_err))\n print(epsilon_list)\n print(' ')\n\n iteration += 1\n\n # ------------------------------------------------------------------------------------------------------------------\n # CCG loop terminated\n # ------------------------------------------------------------------------------------------------------------------\n if printconsole:\n print('-----------CCG ITERATION TERMINATED-----------')\n print('Final iteration = %s : MP %.2f € SP dual %.2f € -> |MP - SP dual| = %.2f €' % (iteration-1, MP_sol['obj'], SP_dual_sol['obj'], epsilon))\n\n # Export last MP\n MP.export_model(dir + day + '_MP_' + str(warm_start) + '_' + str(int(100 * PARAMETERS['tol_penalty'])) + '_' + str(PARAMETERS['penalty_factor']))\n\n # MP.model.printStats()\n\n # Dump last engagement plan at iteration\n dump_file(dir=dir, name=day+'_x_' + str(warm_start)+ '_' + str(int(100 * PARAMETERS['tol_penalty'])) + '_' + str(PARAMETERS['penalty_factor']), file=engagement)\n\n # Print T CPU\n t_total = time.time() - t_solve\n computation_times = np.asarray(computation_times)\n SP_dual_status = np.asarray(SP_dual_status)\n SP_primal_status = np.asarray(SP_primal_status)\n\n if printconsole:\n print('Total CCG loop t CPU %.1f min' % (t_total / 60))\n print('T CPU (s): Sup Problem max %.1f Master Problem max %.1f' % (computation_times[:, 0].max(), computation_times[:, 1].max()))\n print('nb Sup Problem status 2 %d status 9 %d' % (SP_dual_status[SP_dual_status == 2].shape[0], SP_dual_status[SP_dual_status == 9].shape[0]))\n\n # Store data\n objectives = np.asarray(objectives)\n df_objectives = pd.DataFrame(index=objectives[:, 0], data=objectives[:, 1:], columns=['MP', 'SP', 'SP_primal'])\n\n # store convergence information\n conv_inf = dict()\n conv_inf['mipgap'] = mipgap\n conv_inf['computation_times'] = computation_times\n conv_inf['SP_status'] = SP_dual_status\n conv_inf['SP_primal_status'] = SP_primal_status\n conv_inf['alpha_neg'] = alpha_neg_list\n conv_inf['BESS_count'] = BESS_count_list\n conv_inf['BESS_charge_discharge'] = BESS_charge_discharge_list\n\n return engagement, df_objectives, conv_inf", "def pfit(Vrf,Vdc,X,Y,Z,Irf,Jrf,Krf):\n #1) find dc potential\n #from all_functions import plot_potential,p2d,trap_depth,find_saddle,exact_saddle\n from project_parameters import charge,mass,driveAmplitude,Omega,debug,scale\n #2) find pseudopotential\n \"\"\"Gebhard, Oct 2010:\n changed back to calculating field numerically in ppt2 instead directly\n with bemsolver. this is because the slow bemsolver (new version) does not output EX, EY, EZ.\"\"\"\n [Ey,Ex,Ez] = np.gradient(Vrf,abs(X[1]-X[0])/scale,abs(Y[1]-Y[0])/scale,abs(Z[1]-Z[0])/scale) # fortran indexing\n Esq = Ex**2 + Ey**2 + Ez**2\n #3) plotting pseudopotential, etc; outdated?\n PseudoPhi = Esq*(charge**2)/(4*mass*Omega**2) \n U = PseudoPhi+charge*Vdc # total trap potential\n if debug.pfit:\n# plot_potential(Vrf,X,Y,Z,'1D plots','Vrf','U_{rf} (eV)',[Irf,Jrf,Krf])\n# plot_potential(Ex,X,Y,Z,'1D plots','Ex','U_{ps} (eV)',[Irf,Jrf,Krf])\n# plot_potential(Ey,X,Y,Z,'1D plots','Ey','U_{ps} (eV)',[Irf,Jrf,Krf])\n# plot_potential(Ez,X,Y,Z,'1D plots','Ez','U_{ps} (eV)',[Irf,Jrf,Krf]) \n# plot_potential(Esq,X,Y,Z,'1D plots','E**2','U_{ps} (eV)',[Irf,Jrf,Krf])\n plot_potential(Vrf,X,Y,Z,'1D plots','Vrf','U_{rf} (eV)',[Irf,Jrf,Krf])\n plot_potential(PseudoPhi/charge,X,Y,Z,'1D plots','Pseudopotential','U_{ps} (eV)',[Irf,Jrf,Krf])\n# plot_potential(Vdc,X,Y,Z,'1D plots','DC Potential','U_{sec} (eV)',[Irf,Jrf,Krf])\n plot_potential(U/charge,X,Y,Z,'1D plots','Trap Potential','U_{sec} (eV)',[Irf,Jrf,Krf])\n #4) determine trap frequencies and tilt in radial directions\n Uxy = U[Irf-3:Irf+4,Jrf-3:Jrf+4,Krf]\n MU = np.max(Uxy) # normalization factor, will be undone when calculating frequencies\n Uxy = Uxy/MU \n nx,ny,nz=X.shape[0],Y.shape[0],Z.shape[0]\n x,y,z = np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz))\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n x[i,j,k] = X[i]\n y[i,j,k] = Y[j]\n z[i,j,k] = Z[k]\n dL = x[Irf+3,Jrf,Krf]-x[Irf,Jrf,Krf] # is this X? Originally x. Temporarily y so that dL not 0. Probably related to meshgrid or indexing.\n xr = (x[Irf-3:Irf+4,Jrf-3:Jrf+4,Krf]-x[Irf,Jrf,Krf])/dL \n yr = (y[Irf-3:Irf+4,Jrf-3:Jrf+4,Krf]-y[Irf,Jrf,Krf])/dL\n [C1,C2,theta] = p2d(Uxy,xr,yr) \n C1,C2,theta = C1[0],C2[0],theta[0] \n fx = (1e3/dL)*np.sqrt(abs(2*C1*MU/(mass)))/(2*np.pi)\n fy = (1e3/dL)*np.sqrt(abs(2*C2*MU/(mass)))/(2*np.pi)\n #5) trap frequency in axial direction\n Uz=U[Irf,Jrf,:] # old projection\n l1 = np.max([Krf-6,1])\n l2 = np.min([Krf+6,np.max(Z.shape)])\n p = np.polyfit((Z[l1:l2+1]-Z[Krf])/dL,Uz[l1:l2+1],6)\n fz = (1e3/dL)*np.sqrt(2*p[4]/mass)/(2*np.pi)\n [Depth,Xe,Ye,Ze] = trap_depth(U/charge,X,Y,Z,Irf,Jrf,Krf) \n return [fx,fy,fz,theta,Depth,Xe,Ye,Ze]", "def element_power_consistent_with_bus_power(net, rtol=1e-2, test_q=True):\r\n bus_p = pd.Series(data=0., index=net.bus.index)\r\n bus_q = pd.Series(data=0., index=net.bus.index)\r\n\r\n for idx, tab in net.ext_grid.iterrows():\r\n if tab.in_service:\r\n bus_p.at[tab.bus] -= net.res_ext_grid.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_ext_grid.q_mvar.at[idx]\r\n\r\n for idx, tab in net.gen.iterrows():\r\n if tab.in_service:\r\n bus_p.at[tab.bus] -= net.res_gen.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_gen.q_mvar.at[idx]\r\n\r\n for idx, tab in net.load.iterrows():\r\n bus_p.at[tab.bus] += net.res_load.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_load.q_mvar.at[idx]\r\n\r\n for idx, tab in net.sgen.iterrows():\r\n bus_p.at[tab.bus] -= net.res_sgen.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_sgen.q_mvar.at[idx]\r\n\r\n for idx, tab in net.asymmetric_load.iterrows():\r\n bus_p.at[tab.bus] += net.res_asymmetric_load.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_asymmetric_load.q_mvar.at[idx]\r\n\r\n for idx, tab in net.asymmetric_sgen.iterrows():\r\n bus_p.at[tab.bus] -= net.res_asymmetric_sgen.p_mw.at[idx]\r\n bus_q.at[tab.bus] -= net.res_asymmetric_sgen.q_mvar.at[idx]\r\n\r\n for idx, tab in net.storage.iterrows():\r\n bus_p.at[tab.bus] += net.res_storage.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_storage.q_mvar.at[idx]\r\n\r\n for idx, tab in net.shunt.iterrows():\r\n bus_p.at[tab.bus] += net.res_shunt.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_shunt.q_mvar.at[idx]\r\n\r\n for idx, tab in net.ward.iterrows():\r\n bus_p.at[tab.bus] += net.res_ward.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_ward.q_mvar.at[idx]\r\n\r\n for idx, tab in net.xward.iterrows():\r\n bus_p.at[tab.bus] += net.res_xward.p_mw.at[idx]\r\n bus_q.at[tab.bus] += net.res_xward.q_mvar.at[idx]\r\n\r\n for idx, tab in net.svc.iterrows():\r\n bus_q.at[tab.bus] += net.res_svc.q_mvar.at[idx]\r\n\r\n for idx, tab in net.ssc.iterrows():\r\n bus_q.at[tab.bus] += net.res_ssc.q_mvar.at[idx]\r\n\r\n assert allclose(net.res_bus.p_mw.values, bus_p.values, equal_nan=True, rtol=rtol)\r\n if test_q:\r\n assert allclose(net.res_bus.q_mvar.values, bus_q.values, equal_nan=True, rtol=rtol)", "def test_pde_vector_scalar():\n eq = PDE({\"u\": \"vector_laplace(u) - u + gradient(v)\", \"v\": \"- divergence(u)\"})\n grid = UnitGrid([8, 8])\n field = FieldCollection(\n [VectorField.random_uniform(grid), ScalarField.random_uniform(grid)]\n )\n\n res_a = eq.solve(field, t_range=1, dt=0.01, backend=\"numpy\", tracker=None)\n res_b = eq.solve(field, t_range=1, dt=0.01, backend=\"numba\", tracker=None)\n\n res_a.assert_field_compatible(res_b)\n np.testing.assert_allclose(res_a.data, res_b.data)", "def mbed_solve (A, budgets, S, verbose=True):\n # print(S)\n start_time = time.time()\n x_v, C = initialize(A, S)\n if (verbose):\n print(\"Initialized\")\n print(\"V1: \", np.sum(x_v == 1), \" ,V2: \", np.sum(x_v == -1))\n results_info, S_new, Ad, edges_removed = random_choose_candidate_solve (x_v, C, A, S, budgets, start_time, verbose=verbose)\n return results_info, S_new, Ad, edges_removed", "def optimize_with_dssp_julia(self, graph, network_objective, old_buildings, postprocess=True):\n # === Start data initialization\n data_init_start = time.time()\n # === Start the algorithm\n all_nodes = set(graph.nodes())\n costs = nx.get_edge_attributes(graph, config.EDGE_COST_KEY)\n heat_demand = nx.get_node_attributes(graph, config.BUILDING_CONSUMPTION_KEY)\n production = nx.get_node_attributes(graph, config.SUPPLY_POWER_CAPACITY_KEY)\n capacities = {}\n #print(costs)\n #print(heat_demand)\n #print(production)\n if self.old_network_graph is not None and self.modify_old_network:\n for e,c in self.old_capacity.items():\n if e in graph.edges(keys=True):\n capacities[e] = c+1e-5\n elif (e[1],e[0],e[2]) in graph.edges(keys=True):\n capacities[(e[1],e[0],e[2])] = c+1e-5\n\n self.logger.info(\"\\tData initialization time: %.2fs\" % (time.time() - data_init_start))\n # === Set up instance of julia :\n self.logger.info(\"Setting up julia call...\")\n julia_instantiate_start = time.time()\n optimizer_directory = os.path.dirname(os.path.realpath(__file__))\n with JuliaQgisInterface() as j:\n j.include(os.path.join(optimizer_directory, \"DSSP.jl\"))\n j.using(\"Main.DSSP: optimize_with_DSSP\")\n assert (hasattr(j, \"optimize_with_DSSP\"))\n self.logger.info(\"\\tJulia instantiating time: %.2fs\" % (time.time() - julia_instantiate_start))\n dssp_start = time.time()\n #print(\"old_buildings\", old_buildings)\n best_solution, best_cost = j.optimize_with_DSSP(network_objective, all_nodes, costs, heat_demand,\n production,\n capacities,\n old_buildings,\n self.logger.info,\n postprocess)\n self.logger.info(\"\\tDSSP run time: %.2fs\" % (time.time() - dssp_start))\n return best_solution, best_cost", "def test_pde_2scalar():\n eq = PDE({\"u\": \"laplace(u) - u\", \"v\": \"- u * v\"})\n grid = UnitGrid([8])\n field = FieldCollection.scalar_random_uniform(2, grid)\n\n res_a = eq.solve(field, t_range=1, dt=0.01, backend=\"numpy\", tracker=None)\n res_b = eq.solve(field, t_range=1, dt=0.01, backend=\"numba\", tracker=None)\n\n res_a.assert_field_compatible(res_b)\n np.testing.assert_allclose(res_a.data, res_b.data)", "def solve_elas(self,x,E_p=None):\n \n if x['Crystal_Structure'] == \"Cubic\":\n self.estf = self.Ccubic( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2] )\n\n elif x['Crystal_Structure'] == \"HCP\":\n self.estf = self.Chcp( x['Stiffness'][0], x['Stiffness'][1], x['Stiffness'][2], x['Stiffness'][3], x['Stiffness'][4] )\n\n # Update orientation\n for n in range(9):\n cell_num_list = list((9*self.cell_num)+n)\n self.orient.vector()[cell_num_list] = self.rots[self.subdomain_num,n]\n \n self.a = inner(self.sigs3x3(self.u), sym(grad(self.v)))*dx\n \n if E_p:\n # Note use of sym(), assuming E_p to be the \\chi field\n L_elas_rhs = self.L_elas + inner(self.sigs_e(sym(E_p)), sym(grad(self.v)))*dx\n else:\n L_elas_rhs = self.L_elas \n\n self.A_elas, self.b_elas = assemble_system(self.a, L_elas_rhs, self.bc_elas) \n \n # Attach near nullspace to matrix\n as_backend_type(self.A_elas).set_near_nullspace(self.null_space)\n\n # Set matrix operator\n self.elasticity_solver.set_operator(self.A_elas);\n\n # Compute solution\n self.elasticity_solver.solve(self.ue.vector(), self.b_elas);\n \n if E_p:\n self.Ue_sym = project( sym(grad(self.ue) - E_p), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n else:\n self.Ue_sym = project( sym(grad(self.ue)), self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")\n \n self.sim_strn = np.reshape(self.Ue_sym.vector().get_local(),(len(self.grains.array()),9))\n\n for grain_no in range(self.grains.array().max()):\n # Grain numbering is 1 index origin\n cell_subset = self.grains.array()==(grain_no+1)\n if np.any(cell_subset):\n self.sim_avg[grain_no,:] = np.average(self.sim_strn[cell_subset,:],\n axis=0,weights=self.dVol[cell_subset]) \n \n deps = self.exp_strn - self.sim_avg\n resid = np.linalg.norm(deps.ravel())\n print(resid) #,self.its)\n return resid", "def _dprime(A, B, wopt=None):\n\n sigA = np.cov((A - A.mean(axis=-1, keepdims=True)))\n sigB = np.cov((B - B.mean(axis=-1, keepdims=True)))\n\n usig = 0.5 * (sigA + sigB)\n u_vec = (A.mean(axis=-1) - B.mean(axis=-1))[np.newaxis, :] \n\n try:\n valA, vecA = np.linalg.eig(sigA)\n valB, vecB = np.linalg.eig(sigB)\n evec_sim = abs(vecB[:, np.argsort(valB)[::-1][0]].dot(vecA[:, np.argsort(valA)[::-1][0]]))\n except:\n evec_sim = np.nan\n\n if wopt is not None:\n wopt_train = wopt / np.linalg.norm(wopt)\n A = A.T.dot(wopt_train).T\n B = B.T.dot(wopt_train).T\n\n usig_ = 0.5 * (np.cov((A - A.mean(axis=-1, keepdims=True))) + np.cov((B - B.mean(axis=-1, keepdims=True))))\n u_vec_ = (A.mean(axis=-1) - B.mean(axis=-1))[np.newaxis, :]\n \n try:\n if wopt is not None:\n wopt = (1 / usig_) * u_vec_\n dp2 = np.matmul(u_vec_, wopt)[0][0]\n try:\n # if wopt is passed, could still compute dpirme but can't compute \n # evecs/ evals\n evals, evecs = np.linalg.eig(usig)\n # make sure evals / evecs are sorted\n idx_sort = np.argsort(evals)[::-1]\n evals = evals[idx_sort]\n evecs = evecs[:, idx_sort]\n except:\n wopt = np.nan * np.ones((A.shape[0], 1))\n evals = np.nan * np.ones((A.shape[0], ))\n evecs = np.nan * np.ones((A.shape[0], A.shape[0]))\n\n else:\n inv = np.linalg.inv(usig)\n wopt = inv @ u_vec.T\n dp2 = np.matmul(u_vec, wopt)[0][0]\n\n evals, evecs = np.linalg.eig(usig)\n # make sure evals / evecs are sorted\n idx_sort = np.argsort(evals)[::-1]\n evals = evals[idx_sort]\n evecs = evecs[:, idx_sort]\n\n except:\n log.info('WARNING, Singular Covariance, dprime infinite, set to np.nan')\n wopt_nan = np.nan * np.ones((A.shape[0], 1))\n evals_nan = np.nan * np.ones((A.shape[0], ))\n evecs_nan = np.nan * np.ones((A.shape[0], A.shape[0]))\n u_vec_nan = np.nan * np.ones((1, A.shape[0]))\n return np.nan, wopt_nan, evals_nan, evecs_nan, np.nan, u_vec_nan\n\n return dp2, wopt, evals, evecs, evec_sim, u_vec" ]
[ "0.60630625", "0.5577446", "0.5546294", "0.54922825", "0.54431385", "0.5392691", "0.53446376", "0.53008276", "0.52739555", "0.52732044", "0.52537686", "0.51653886", "0.5158268", "0.5113869", "0.5086314", "0.50845", "0.50803244", "0.50725675", "0.50536424", "0.5046146", "0.5026179", "0.5016833", "0.50126654", "0.5009299", "0.49954784", "0.4984334", "0.4982522", "0.49469948", "0.49381983", "0.493705" ]
0.71842694
0
Rebuild the parameter vector. Note that this can potentially alter the parameter order if the strings are given in a different order. It mutates the parameter vector to contain the elements as specified in "parameters" with the defaults as specified in defaults. If the parameter already exists in the vector nothing happens to it. If it doesn't, it gets initialized to its default.
def _set_params(self, params, defaults): new_params = OrderedDict( zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults]) ) for key, value in self._src.items(): if key in new_params: new_params[key] = value self._src = new_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rebuild_param(self,vec,**kwargs):\n from collections import OrderedDict\n tmp = OrderedDict([('lengthscale',None),( 'variance',None),( 'gstds',None)])\n for key,val in kwargs.items():\n assert val!=None, \"Can't have None as fixed values\"\n tmp[key]=val\n for key,val in tmp.items():\n if val==None:\n tmp[key]=vec[0]\n vec = np.delete(vec,0)\n return np.array([tmp[key] for key in tmp])", "def _apply_defaults(self):\n # Applies normal parameter defaults\n for scalar_parameter, value in self._DEFAULT_PARAMETER_SCALARS.items():\n if scalar_parameter not in self.parameters:\n self.parameters[scalar_parameter] = copy.copy(value)\n\n # Applies defaults to all ramp parameters\n for table_parameter, table in self._DEFAULT_PARAMETER_TABLES.items():\n self.parameters[table_parameter] = [list(tup) for tup in table]\n self.parameters['_' + table_parameter] = zip(*self.parameters[table_parameter])", "def _get_default_parameters(new_values):\n no_default = [\"BEAM\", \"TYPE\", \"ERRORDEF\", \"CORRECTIONS\"]\n\n not_found = [nf for nf in no_default if nf not in new_values]\n if any(not_found):\n raise ValueError(\"Required parameters '{}' not found.\".format(not_found))\n\n # Some defaults\n default = {\n # Beam Parameters\n \"QX\": \"62.31\",\n \"QY\": \"60.32\",\n \"CHROMX\": \"3\",\n \"CHROMY\": \"3\",\n # Settings\n \"USETHIN\": \"1\",\n \"ARCERRORS\": \"0\",\n \"CALCCORRECTIONS\": \"1\",\n # Outputs\n \"NOMINALMACHINE\": \"\",\n \"ARCAPPLIED\": \"\",\n \"MQXAPPLIED\": \"\",\n \"MBIPAPPLIED\": \"\",\n \"ALLAPPLIED\": \"\",\n \"CORRECTED\": \"\",\n }\n\n # crossing angles and separation bumps\n for idx in [1,2,5,8]:\n for prefix in [\"XING\", \"SEP\", \"PHI\"]:\n default[\"{:s}{:d}\".format(prefix, idx)] = \"0\"\n\n # applied errors\n for idx in range(1, 12):\n for orientation in [\"A\", \"B\"]:\n default[\"{:s}{:d}\".format(orientation, idx)] = \"0\"\n\n # return dictionary filled with defaults and new values\n default.update(new_values)\n return default", "def resetparams(self, parameters):\n try:\n utils.update_dictionary_items(self.params,parameters)\n except AttributeError:\n # Variable self.params does not exist, so not updated\n # Create an empty set of params for future reference\n self.params = {}", "def _parse_parameters(self, parameters_text):\n for mo in re.finditer(self._PARAMETERS_RE, parameters_text):\n self._parameters.append(Parameter(mo.group(\"param_name\"), mo.group(\"default_value\")))", "def update_param(param, param_dict, alg=\"IID_LINEAR\", prefix=\"\"):\n default_len = len(param.defaults)\n if param.defaults:\n for index, value in enumerate(reversed(param.args)):\n if value not in [\"self\", \"W\", \"method\", \"causal_matrix\", \"topology_matrix\"]:\n if index < default_len:\n p_value = list(reversed(param.defaults))[index]\n else:\n p_value = None\n if value is \"sem_type\":\n p_value = sem_type_set(\"sem_type\", alg)[0]\n param_dict.update({prefix + value: p_value})", "def _construct_optional(self, params):\r\n\r\n args = []\r\n filtered = {key: arg.default for key, arg in params.items() if arg.default != inspect._empty}\r\n for key, default in filtered.items():\r\n arg = self.OptionalArg(full=key, abbrev=key[0].lower(), default=default)\r\n args.append(arg)\r\n\r\n args_full, args_abbrev = dict(), dict()\r\n\r\n # Resolve conflicts\r\n known_count = defaultdict(int)\r\n for arg in args:\r\n args_full[arg.full] = arg\r\n\r\n if known_count[arg.abbrev] == 0:\r\n args_abbrev[arg.abbrev] = arg\r\n elif known_count[arg.abbrev] == 1:\r\n new_abbrev = arg.abbrev.upper()\r\n args_full[arg.full] = self.OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default)\r\n args_abbrev[new_abbrev] = args_full[arg.full]\r\n else:\r\n new_abbrev = arg.abbrev.upper() + str(known_count[arg.abbrev])\r\n args_full[arg.full] = self.OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default)\r\n args_abbrev[new_abbrev] = args_full[arg.full]\r\n known_count[arg.abbrev] += 1\r\n return args_full, args_abbrev", "def _add_parameter_default(self, msg_param):\n default_types = msg_param.default_types\n while default_types: # iterate over each bit\n def_type = default_types & (~default_types+1)\n default_types ^= def_type\n def_type -= 1\n if def_type not in self._default_parameters:\n self._default_parameters[def_type] = {}\n self._default_parameters[def_type][msg_param.key] = msg_param.value", "def add_parameters(self, parameters, new_parameters):\n for new in new_parameters:\n if new:\n for parameter in parameters:\n if parameter.name == new.name:\n break\n else:\n parameters.append(new)", "def load_params(param_vector=[]):\n params = {}\n param_vector_default = [-1.43,0.05,7.5,0.05,1.,40.,0.6,1.,5.5]\n\n if len(param_vector) != 0:\n params['alpha'], params['sigma_M'], params['M50'], params['sigma_mpeak'], params['B'], params['A'], params['sigma_r'], params['n'], params['Mhm'] = param_vector\n else:\n params['alpha'], params['sigma_M'], params['M50'], params['sigma_mpeak'], params['B'], params['A'], params['sigma_r'], params['n'], params['Mhm'] = param_vector_default\n\n return params", "def update_drum_params(input_args, default_params):\n try:\n as_dict = ast.literal_eval(str(input_args))\n except ValueError:\n base_params = get_base_params(0, 0, 0, 0)\n print(f'The input string: `{input_args}` is not in the right format.')\n print('The input and each key should be enclosed in quotes.')\n print('Heres an example:')\n example = \"\"\" -kick \"{'div':2}\" \"\"\"\n print('\\t', example)\n print('Poissible parameters are: ')\n [print('\\t', k) for k in base_params.keys()]\n except Exception as e:\n print(e)\n\n for k, v in as_dict.items():\n if k in default_params:\n default_params[k] = v\n return default_params", "def resetparams(self, parameters):\n self.weights = None\n try:\n self.params = parameters\n except AttributeError:\n # Variable self.params does not exist, so not updated\n # Create an empty set of params for future reference\n self.params = {}", "def SetFixedParams(self, fixedParameters=None):\n # Set first the fixParameter to true and\n # pass the parameters to fix\n \n #if not self.fixParameter:\n # self.fixParameter = True\n \n # Then adjust the Names and the Values\n \n if fixedParameters is not None:\n self.fixParameter = True\n pNames, pValues = \\\n Utils.reduceParameters(self.parameterNames0, \\\n self.initialParameterValues0, \\\n fixedParameters)\n self.parameterNames = pNames\n self.parameterNameList = pNames.split(\",\")\n self.initialParameterValues = pValues\n self.fixedParameters = fixedParameters\n self.fixParameter = True\n self.SetFixedParamsPass = True\n else:\n if self.SetFixedParamsPass:\n self.parameterNames=self.parameterNames0\n self.parameterNameList = self.parameterNameList0\n self.initialParameterValues = self.initialParameterValues0\n self.fixParameter = False\n self.fixedParameters = None", "def _inject_params(self, params):\n\n params.extend([LocaleParam(), CompileDomainsParam(),\n UseFuzzyParam(), StatisticsParam(),\n DirectoryParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def update_custom_environment_params(self):\n allowed_parameter_keys = [\n k for k, v in signature(Environment).parameters.items() if v.kind == v.KEYWORD_ONLY\n ]\n\n for k in allowed_parameter_keys:\n if getattr(self, k) is None:\n setattr(self, k, self.DEFAULT_PARAMS.get(k, None))", "def updateParameters(self, parameters):", "def updateParameters(self, parameters):\n if parameters[0].value and parameters[3].value:\n if (parameters[0].altered or paramaters[3].altered) and not parameters[4].altered:\n layer = parameters[0].valueAsText;\n desc = arcpy.Describe(layer)\n name = desc.file;\n type = parameters[3].valueAsText;\n char = type[:1];\n if (char != 'U'):\n if (char != 'C'):\n char = 'C' + char; #Output _C + first letter of type unless it is U\n else:\n char = 'CT'; # Unless it is C, then it is CT... \n #Update name accordingly\n resulttmp = \"%WORKSPACE%\\\\\" + name + \"_\" + char; \n parameters[4].value = resulttmp.replace(\".\",\"\"); #Remove illegal characters\n return", "def set_params(self, new_params: torch.Tensor) -> None:\n assert new_params.size() == self.get_params().size()\n progress = 0\n for pp in list(self.net.parameters()):\n cand_params = new_params[progress: progress +\n torch.tensor(pp.size()).prod()].view(pp.size())\n progress += torch.tensor(pp.size()).prod()\n pp.data = cand_params", "def merged_parameters(self, parameters):\n result = self.__params.copy()\n for k, v in parameters.iteritems():\n result[k] = v\n return result", "def xml_fix_parms( self ):\n\t\tnew_parms = {}\n\t\tfor key in self.parms:\n\t\t\tnew_parms[ self.xml_fix_parm(key) ] = self.xml_fix_parm(self.parms[key])\n\t\tself.parms = new_parms", "def update_params(argv: list, prm: dict):\n\n\tfor a in argv[1:]:\n\t\ttoks = a.split('=',1)\n\t\tif len(toks)<2: continue\n\t\tk,v = toks[:2]\n\t\tif k not in prm: continue\n\t\tprm[k] = v", "def updateParameters(self, parameters):\n\t\treturn", "def overwrite_hyperparams(self):\n try:\n default_hyperparams = self.hyperparams\n for key in default_hyperparams:\n try:\n flag = self.FLAGS[key]\n param_value = flag.value\n if param_value is not None:\n self.hyperparams[key] = param_value\n except:\n pass\n except:\n pass", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return" ]
[ "0.63915217", "0.62324", "0.5815009", "0.5782338", "0.56467503", "0.5494379", "0.5489028", "0.546808", "0.5458663", "0.5448343", "0.53930414", "0.5371881", "0.5368795", "0.53453875", "0.53266186", "0.5283019", "0.5277188", "0.5276958", "0.5272733", "0.5256184", "0.52495134", "0.5220922", "0.52184016", "0.52126145", "0.5194018", "0.5194018", "0.51925606", "0.51925606", "0.51925606", "0.51925606" ]
0.6548692
0
Chains a Future instance directly to another Future instance Used for recursive Promise Resolution Procedure (section 2.3.2) specified in Promise/A+ that allows .then() to piggy back on a Promise returned by success handler
def _chain_to_another_future(self, base_future): if base_future in self._chained_futures_log: raise CircularFuturesChainException( 'Circular Futures chain detected. Future {} is already in the resolved chain {}'.format( base_future, set(self._chained_futures_log) ) ) else: self._chained_futures_log.add(base_future) def _done_handler(base_future): """ Converts results of underlying future into results of new future :param ThenableFuture base_future: Original Future instance, but now guaranteed to be resolved due to cancellation or completion. """ if not base_future.done(): # this should never ever be true. # having this code here just to avoid infinite timeout self.cancel() return if base_future.cancelled(): self.cancel() return try: result = base_future.result() if isinstance(result, Future): self._chain_to_another_future(result) else: self.set_result(result) return except BaseException: # note, that exception may come from self.result() # and from on_fulfilled(result) calls. ex, trace_back = sys.exc_info()[1:] self.set_exception_info(ex, trace_back) return base_future.add_done_callback(_done_handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chain_future(a, b):\n def copy(future):\n assert future is a\n if b.done():\n return\n if (isinstance(a, TracebackFuture) and\n isinstance(b, TracebackFuture) and\n a.exc_info() is not None):\n b.set_exc_info(a.exc_info())\n elif a.exception() is not None:\n b.set_exception(a.exception())\n else:\n b.set_result(a.result())\n\n a.add_done_callback(copy)", "def _chain_result(outer_future):\n def f(inner_future):\n try:\n result = inner_future.result()\n except BaseException as e:\n outer_future.set_exception(e)\n else:\n outer_future.set_result(result)\n return f", "def _done_handler(base_future):\n if not base_future.done():\n # this should never ever be true.\n # having this code here just to avoid infinite timeout\n new_future.cancel()\n return\n\n if base_future.cancelled():\n new_future.cancel()\n return\n\n try:\n result = base_future.result()\n if on_fulfilled:\n result = on_fulfilled(result)\n\n # Per Promise/A+ spec, if return value is a Promise,\n # our promise must adapt the state of the return value Promise\n if isinstance(result, Future):\n # this is the only outcome where we don't\n # set new_future's result in this code and\n # defer resolution of new_future to outcome of return value Promise resolution\n new_future._chain_to_another_future(result)\n else:\n new_future.set_result(result)\n return\n except BaseException:\n # note, that exception may come from self.result()\n # and from on_fulfilled(result) calls.\n ex, trace_back = sys.exc_info()[1:]\n if not on_rejected:\n new_future.set_exception_info(ex, trace_back)\n return\n else:\n try:\n result = on_rejected(ex)\n if isinstance(result, BaseException):\n raise result\n else:\n new_future.set_result(result)\n return\n except BaseException:\n ex, trace_back = sys.exc_info()[1:]\n new_future.set_exception_info(ex, trace_back)\n return", "def return_fake_future(f):\n def wrap(*args, **kwargs):\n future = Future()\n future.set_result(f(*args, **kwargs))\n return future\n return wrap", "def _done_handler(base_future):\n if not base_future.done():\n # this should never ever be true.\n # having this code here just to avoid infinite timeout\n self.cancel()\n return\n\n if base_future.cancelled():\n self.cancel()\n return\n\n try:\n result = base_future.result()\n if isinstance(result, Future):\n self._chain_to_another_future(result)\n else:\n self.set_result(result)\n return\n except BaseException:\n # note, that exception may come from self.result()\n # and from on_fulfilled(result) calls.\n ex, trace_back = sys.exc_info()[1:]\n self.set_exception_info(ex, trace_back)\n return", "def andThen(self, callback):\n if self.running: # We might want a mutex here...\n return self.future.andThen(callback)\n else:\n callback(self.future.value()) #?\n # return something? (to see when we have a testcase for this...)", "def make_future(result=None):\n future = Future()\n future.set_result(result)\n return future", "def maybe_future(x):\n if is_future(x):\n return x\n else:\n fut = Future()\n fut.set_result(x)\n return fut", "def then(self, callback):\n if self.running: # We might want a mutex here...\n return self.future.then(callback)\n else:\n callback(self)\n # return something? (to see when we have a testcase for this...)", "def _return_result(self, done):\n chain_future(done, self._running_future)\n\n self.current_future = done\n self.current_index = self._unfinished.pop(done)", "async def test_wrap_result(self):\n result = 321\n wrapped = util.wrap_result(result, True)\n await wrapped\n assert isinstance(wrapped, asyncio.Future)\n assert wrapped.result() == result", "def __call__(self):\r\n if self.__failure is not None:\r\n return fail(self.__failure)\r\n\r\n if self.__pending is not None:\r\n d = Deferred()\r\n self.__pending.append(d)\r\n return d\r\n\r\n return succeed(self.__obj)", "def maybe_future(obj):\n if inspect.isawaitable(obj):\n # already awaitable, use ensure_future\n return asyncio.ensure_future(obj)\n elif isinstance(obj, concurrent.futures.Future):\n return asyncio.wrap_future(obj)\n else:\n # could also check for tornado.concurrent.Future\n # but with tornado >= 5.1 tornado.Future is asyncio.Future\n f = asyncio.Future()\n f.set_result(obj)\n return f", "def _thread_run_for_result(future, func, *args):\n result = func(future, *args)\n future._set_result(result)", "def _sub_future_done(self, f):\n\n # If this function were a nested function inside `_step` then `_step`\n # frames would never be freed until completion of the task. This is\n # because each `_sub_future_done` frame would keep a reference to its\n # enclosing `_step` frame, while the next `_step` frame would keep a\n # reference to the `_sub_future_done` frame.\n if f.exception():\n self._step(exc=f.exception())\n else:\n self._step(value=f.result())\n\n # Avoid cyclic references from tracebacks.\n self = None\n f = None", "def _cast(obj):\n if isinstance(obj, Future):\n return obj\n else:\n return NonFuture(obj)", "def _callbackChooser(self, future):\n assert(self.done())\n try:\n self._resultFuture.result()\n except TimeoutError:\n for c in self._callbackTimeout:\n c()\n except CancelledError:\n for c in self._callbackCancelled:\n c()\n if self._callbackSuccess:\n for c in self._callbackSuccess:\n c()", "async def test_wrap_async(self):\n result = 987\n wrapped = async_util.wrap_async(result)\n await wrapped\n assert isinstance(wrapped, asyncio.Future)\n assert wrapped.result() == result", "def test_deferred_success_result(self):\n passthrough = self.make_wrapped_function()\n result = passthrough(succeed(123))\n self.assertIsInstance(result, EventualResult)\n self.assertEqual(result.wait(0.1), 123)", "def _set_futures(self, result: bool) -> None:\n for future in self._image_futures:\n if not future.done():\n future.set_result(result)\n self._image_futures.clear()", "def wait(result):\n if is_result_proxy(result):\n result.__wrapped__ # force the evaluation", "def set_future(self, future):\r\n self._future = future", "def test_success_result_twice(self):\n dr = EventualResult(succeed(123), None)\n self.assertEqual(dr.wait(0.1), 123)\n self.assertEqual(dr.wait(0.1), 123)", "def handle_done(future):\n results[in_progress[future]] = future.result()\n del in_progress[future]", "def resolve(self, result: Any) -> None:\n self.expr.call_hash = self.call_hash\n self.result_promise.do_resolve(result)\n self.clear()", "def _bind_future(function):\n return lambda container: container.bind_future(function)", "def set_result(self, result):\n with self._condition:\n self._result = result\n self._state = FadeFuture.FINISHED\n self._condition.notify_all()", "def task_done(self, pkg, attr, future):\n exc = future.exception()\n if exc is not None:\n # traceback can't be pickled so serialize it\n tb = traceback.format_exc()\n # return exceptions that occurred in threads\n self.results_q.put(tb)\n return\n\n result = future.result()\n if result is not None:\n if pkg is not None:\n # recreate result object with different pkg target and attr\n attrs = result._attrs.copy()\n attrs[\"attr\"] = attr\n result = result._create(**attrs, pkg=pkg)\n self.results_q.put([result])", "def test_deferred_success_result(self):\n passthrough = self.make_wrapped_function()\n result = passthrough(succeed(123))\n self.assertEqual(result, 123)", "def _coalesce(success_handler, failure_handler):\n def decorator(container):\n if is_successful(container):\n return success_handler(container.unwrap())\n return failure_handler(container.failure())\n return decorator" ]
[ "0.78678876", "0.6660304", "0.6277418", "0.6258491", "0.61489964", "0.59651977", "0.5930862", "0.5908173", "0.58968514", "0.58914006", "0.5653376", "0.5583116", "0.5559329", "0.54988134", "0.54841375", "0.5467413", "0.53980947", "0.5350944", "0.532589", "0.5256295", "0.5244313", "0.52230054", "0.52055514", "0.5160959", "0.5146459", "0.5075679", "0.5059211", "0.50322556", "0.50177616", "0.5004191" ]
0.7341593
1
Creates the himesis graph representing the Simulink model HFlatten2.
def __init__(self): # Flag this instance as compiled now self.is_compiled = True super(HFlatten2, self).__init__(name='HFlatten2', num_nodes=117, edges=[]) # Add the edges self.add_edges([(5, 66), (66, 50), (5, 67), (67, 51), (5, 35), (35, 20), (5, 36), (36, 21), (11, 68), (68, 52), (6, 37), (37, 22), (7, 38), (38, 23), (8, 39), (39, 24), (12, 69), (69, 53), (15, 40), (40, 25), (16, 41), (41, 26), (17, 42), (42, 27), (0, 70), (70, 54), (0, 43), (43, 28), (3, 71), (71, 55), (3, 72), (72, 56), (3, 44), (44, 29), (4, 73), (73, 57), (4, 74), (74, 58), (4, 45), (45, 30), (9, 75), (75, 59), (9, 76), (76, 60), (9, 46), (46, 31), (13, 77), (77, 61), (1, 78), (78, 62), (14, 79), (79, 63), (2, 80), (80, 64), (2, 81), (81, 65), (2, 47), (47, 32), (18, 48), (48, 33), (19, 49), (49, 34), (5, 98), (98, 7), (5, 99), (99, 12), (5, 100), (100, 15), (5, 101), (101, 17), (5, 102), (102, 0), (5, 103), (103, 3), (5, 104), (104, 9), (5, 105), (105, 14), (9, 106), (106, 8), (9, 107), (107, 4), (9, 108), (108, 13), (9, 109), (109, 2), (9, 110), (110, 18), (9, 111), (111, 19), (10, 112), (112, 5), (10, 113), (113, 11), (10, 114), (114, 6), (10, 115), (115, 16), (10, 116), (116, 1), (29, 82), (82, 60), (31, 83), (83, 54), (28, 84), (84, 53), (28, 85), (85, 63), (25, 86), (86, 56), (27, 87), (87, 55), (22, 88), (88, 51), (26, 89), (89, 50), (21, 90), (90, 62), (20, 91), (91, 52), (24, 92), (92, 64), (32, 93), (93, 61), (30, 94), (94, 65), (33, 95), (95, 58), (34, 96), (96, 57), (23, 97), (97, 59)]) # Set the graph attributes self["mm__"] = pickle.loads("""(lp1 S'Simulink' p2 a.""") self["name"] = """Flatten2""" self["GUID__"] = UUID('3bd37131-b783-49da-b347-b00a25f97e1e') # Set the node attributes self.vs[0]["Name"] = """Gain2""" self.vs[0]["SampleTime"] = -1.0 self.vs[0]["gain"] = 5.4 self.vs[0]["BackgroundColor"] = """yellow""" self.vs[0]["Position"] = pickle.loads("""(lp1 F405 aF99 aF445 aF131 a.""") self.vs[0]["mm__"] = """Gain""" self.vs[0]["GUID__"] = UUID('aa88c5b8-9e26-46a0-ac27-c2ce5ead2aab') self.vs[1]["NumInputPorts"] = """1""" self.vs[1]["Name"] = """Scope""" self.vs[1]["BackgroundColor"] = """white""" self.vs[1]["Position"] = pickle.loads("""(lp1 F345 aF129 aF375 aF161 a.""") self.vs[1]["mm__"] = """Scope""" self.vs[1]["LimitDataPoints"] = """on""" self.vs[1]["GUID__"] = UUID('5b3d0f44-79dd-4361-baa2-e5158af03f75') self.vs[2]["Name"] = """Sum""" self.vs[2]["Inputs"] = """|++""" self.vs[2]["SampleTime"] = -1.0 self.vs[2]["IconShape"] = """round""" self.vs[2]["BackgroundColor"] = """lightBlue""" self.vs[2]["Position"] = pickle.loads("""(lp1 F280 aF90 aF300 aF110 a.""") self.vs[2]["mm__"] = """Sum""" self.vs[2]["GUID__"] = UUID('c3f1b72b-f864-4dc4-a9ec-4b1768272323') self.vs[3]["Name"] = """Product2""" self.vs[3]["SampleTime"] = -1.0 self.vs[3]["BackgroundColor"] = """yellow""" self.vs[3]["Position"] = pickle.loads("""(lp1 F185 aF177 aF215 aF208 a.""") self.vs[3]["mm__"] = """Product""" self.vs[3]["GUID__"] = UUID('2116c172-b8c5-4f25-9cfb-c9a8bc23e063') self.vs[4]["Name"] = """Product3""" self.vs[4]["SampleTime"] = -1.0 self.vs[4]["BackgroundColor"] = """lightBlue""" self.vs[4]["Position"] = pickle.loads("""(lp1 F225 aF127 aF255 aF158 a.""") self.vs[4]["mm__"] = """Product""" self.vs[4]["GUID__"] = UUID('30f0a0a1-0c57-4801-8224-c52dc4871906') self.vs[5]["Name"] = """Subsystem""" self.vs[5]["BackgroundColor"] = """yellow""" self.vs[5]["Position"] = pickle.loads("""(lp1 F145 aF89 aF245 aF131 a.""") self.vs[5]["mm__"] = """SubSystem""" self.vs[5]["GUID__"] = UUID('5b78ddd3-6f58-47dd-8f61-985d21cf2e6d') self.vs[6]["Name"] = """Constant""" self.vs[6]["SampleTime"] = inf self.vs[6]["value"] = 134.67 self.vs[6]["BackgroundColor"] = """white""" self.vs[6]["Position"] = pickle.loads("""(lp1 F30 aF127 aF80 aF163 a.""") self.vs[6]["mm__"] = """Constant""" self.vs[6]["GUID__"] = UUID('a3b6dd66-2c10-4435-97f8-6bfd668c9675') self.vs[7]["Name"] = """Constant2""" self.vs[7]["SampleTime"] = inf self.vs[7]["value"] = 12.34 self.vs[7]["BackgroundColor"] = """yellow""" self.vs[7]["Position"] = pickle.loads("""(lp1 F175 aF120 aF220 aF150 a.""") self.vs[7]["mm__"] = """Constant""" self.vs[7]["GUID__"] = UUID('bc283ed6-240c-47ea-8c44-555e26976de9') self.vs[8]["Name"] = """Constant""" self.vs[8]["SampleTime"] = inf self.vs[8]["value"] = 66598.0 self.vs[8]["BackgroundColor"] = """lightBlue""" self.vs[8]["Position"] = pickle.loads("""(lp1 F205 aF69 aF250 aF101 a.""") self.vs[8]["mm__"] = """Constant""" self.vs[8]["GUID__"] = UUID('47141a82-efb0-40f9-b21f-bc20e042605a') self.vs[9]["Name"] = """Subsystem2""" self.vs[9]["BackgroundColor"] = """lightBlue""" self.vs[9]["Position"] = pickle.loads("""(lp1 F270 aF134 aF370 aF176 a.""") self.vs[9]["mm__"] = """SubSystem""" self.vs[9]["GUID__"] = UUID('8d319c42-24b2-4033-a93b-1769106af470') self.vs[10]["Name"] = """Flatten2""" self.vs[10]["Position"] = pickle.loads("""(lp1 .""") self.vs[10]["mm__"] = """SubSystem""" self.vs[10]["GUID__"] = UUID('3ff74440-7f12-4691-9bb4-fecc2804b8ca') self.vs[11]["Name"] = """Out1""" self.vs[11]["BackgroundColor"] = """white""" self.vs[11]["Position"] = pickle.loads("""(lp1 F355 aF98 aF385 aF112 a.""") self.vs[11]["mm__"] = """Outport""" self.vs[11]["Port"] = 1 self.vs[11]["GUID__"] = UUID('37ae989e-8191-4230-800f-c25db780344b') self.vs[12]["Name"] = """Out2""" self.vs[12]["BackgroundColor"] = """yellow""" self.vs[12]["Position"] = pickle.loads("""(lp1 F465 aF188 aF495 aF202 a.""") self.vs[12]["mm__"] = """Outport""" self.vs[12]["Port"] = 2 self.vs[12]["GUID__"] = UUID('b55605ee-5f95-43bb-bc15-517dcb5a6077') self.vs[13]["Name"] = """Out1""" self.vs[13]["BackgroundColor"] = """lightBlue""" self.vs[13]["Position"] = pickle.loads("""(lp1 F355 aF108 aF385 aF122 a.""") self.vs[13]["mm__"] = """Outport""" self.vs[13]["Port"] = 1 self.vs[13]["GUID__"] = UUID('2d73df35-44b9-4ae3-8a33-80439e9ea242') self.vs[14]["Name"] = """Out1""" self.vs[14]["BackgroundColor"] = """yellow""" self.vs[14]["Position"] = pickle.loads("""(lp1 F475 aF108 aF505 aF122 a.""") self.vs[14]["mm__"] = """Outport""" self.vs[14]["Port"] = 1 self.vs[14]["GUID__"] = UUID('cc231818-18b3-4628-b567-61cecc568877') self.vs[15]["Name"] = """In2""" self.vs[15]["BackgroundColor"] = """yellow""" self.vs[15]["Position"] = pickle.loads("""(lp1 F40 aF193 aF70 aF207 a.""") self.vs[15]["mm__"] = """Inport""" self.vs[15]["Port"] = 2 self.vs[15]["GUID__"] = UUID('48ee4de9-4f36-40a8-b9ea-91985af85c43') self.vs[16]["Name"] = """In1""" self.vs[16]["BackgroundColor"] = """white""" self.vs[16]["Position"] = pickle.loads("""(lp1 F40 aF48 aF70 aF62 a.""") self.vs[16]["mm__"] = """Inport""" self.vs[16]["Port"] = 1 self.vs[16]["GUID__"] = UUID('abbcc9b5-a037-4543-94fd-e9e07898e0fd') self.vs[17]["Name"] = """In1""" self.vs[17]["BackgroundColor"] = """yellow""" self.vs[17]["Position"] = pickle.loads("""(lp1 F40 aF133 aF70 aF147 a.""") self.vs[17]["mm__"] = """Inport""" self.vs[17]["Port"] = 1 self.vs[17]["GUID__"] = UUID('73d6aff1-3f45-45c1-9c13-8bea418fc6e0') self.vs[18]["Name"] = """In2""" self.vs[18]["BackgroundColor"] = """lightBlue""" self.vs[18]["Position"] = pickle.loads("""(lp1 F115 aF158 aF145 aF172 a.""") self.vs[18]["mm__"] = """Inport""" self.vs[18]["Port"] = 2 self.vs[18]["GUID__"] = UUID('f910f910-3b72-4d34-ba33-b1005cba5f1e') self.vs[19]["Name"] = """In1""" self.vs[19]["BackgroundColor"] = """lightBlue""" self.vs[19]["Position"] = pickle.loads("""(lp1 F110 aF103 aF140 aF117 a.""") self.vs[19]["mm__"] = """Inport""" self.vs[19]["Port"] = 1 self.vs[19]["GUID__"] = UUID('775fc836-56be-481d-821a-ddb8ad3fcdf2') self.vs[20]["Name"] = """1""" self.vs[20]["mm__"] = """Port_Output""" self.vs[20]["GUID__"] = UUID('09c29cf7-9e1d-494b-a475-dfc2d49a1888') self.vs[21]["Name"] = """2""" self.vs[21]["mm__"] = """Port_Output""" self.vs[21]["GUID__"] = UUID('98e3375b-1e6b-4f23-a5b8-69ae5a078f66') self.vs[22]["Name"] = """1""" self.vs[22]["mm__"] = """Port_Output""" self.vs[22]["GUID__"] = UUID('d059abe7-06b2-4d42-8eb2-13ec4f2b0605') self.vs[23]["Name"] = """1""" self.vs[23]["mm__"] = """Port_Output""" self.vs[23]["GUID__"] = UUID('f9b1025f-94a8-4414-9e1f-0c8d88dfa1bb') self.vs[24]["Name"] = """1""" self.vs[24]["mm__"] = """Port_Output""" self.vs[24]["GUID__"] = UUID('e7857c2e-3c19-4c69-b716-88ec14c15e2f') self.vs[25]["Name"] = """1""" self.vs[25]["mm__"] = """Port_Output""" self.vs[25]["GUID__"] = UUID('c8c2d6da-7413-42d8-a87e-41c7a132be22') self.vs[26]["Name"] = """1""" self.vs[26]["mm__"] = """Port_Output""" self.vs[26]["GUID__"] = UUID('16517dd7-a328-44cd-beea-2ef80dcae619') self.vs[27]["Name"] = """1""" self.vs[27]["mm__"] = """Port_Output""" self.vs[27]["GUID__"] = UUID('d961915e-3cd7-4b60-80d6-8be1f5192e27') self.vs[28]["Name"] = """1""" self.vs[28]["mm__"] = """Port_Output""" self.vs[28]["GUID__"] = UUID('e90742ed-92ec-4a96-b73d-d0193458fe9a') self.vs[29]["Name"] = """1""" self.vs[29]["mm__"] = """Port_Output""" self.vs[29]["GUID__"] = UUID('9aaacc04-1328-483d-ae38-c5536bd24c00') self.vs[30]["Name"] = """1""" self.vs[30]["mm__"] = """Port_Output""" self.vs[30]["GUID__"] = UUID('8cf56cf4-bde6-47bd-a01a-98948b37cc05') self.vs[31]["Name"] = """1""" self.vs[31]["mm__"] = """Port_Output""" self.vs[31]["GUID__"] = UUID('23a56bf4-b95c-406e-a94a-9b1d95b08c95') self.vs[32]["Name"] = """1""" self.vs[32]["mm__"] = """Port_Output""" self.vs[32]["GUID__"] = UUID('01de4a4e-867b-4fa2-88ab-18138ebb83c5') self.vs[33]["Name"] = """1""" self.vs[33]["mm__"] = """Port_Output""" self.vs[33]["GUID__"] = UUID('be0b168e-5e87-4c60-b243-ae86ae4470fd') self.vs[34]["Name"] = """1""" self.vs[34]["mm__"] = """Port_Output""" self.vs[34]["GUID__"] = UUID('ba8ba12b-7ae9-42c8-bcab-59c39b7219c9') self.vs[35]["mm__"] = """__Block_Outport__""" self.vs[35]["GUID__"] = UUID('2b94a8e3-5dc8-4ef9-a369-9fa28dfa4a25') self.vs[36]["mm__"] = """__Block_Outport__""" self.vs[36]["GUID__"] = UUID('b0686df7-b969-42bc-8321-34d0785ae81f') self.vs[37]["mm__"] = """__Block_Outport__""" self.vs[37]["GUID__"] = UUID('e98f9e88-df30-44e1-a37c-585d02b58d3a') self.vs[38]["mm__"] = """__Block_Outport__""" self.vs[38]["GUID__"] = UUID('9e379931-decd-49d3-a71d-81ddb0393c9f') self.vs[39]["mm__"] = """__Block_Outport__""" self.vs[39]["GUID__"] = UUID('9e25ae89-9a4f-4d34-87a9-fdbd86781309') self.vs[40]["mm__"] = """__Block_Outport__""" self.vs[40]["GUID__"] = UUID('bc892a1a-16d0-45b1-8d24-e9e45706d26a') self.vs[41]["mm__"] = """__Block_Outport__""" self.vs[41]["GUID__"] = UUID('3880bb62-5210-410c-80e1-1658b01a8a8d') self.vs[42]["mm__"] = """__Block_Outport__""" self.vs[42]["GUID__"] = UUID('982d02b4-bb03-41fc-b77e-5fc3f575a85c') self.vs[43]["mm__"] = """__Block_Outport__""" self.vs[43]["GUID__"] = UUID('0cdd9c41-72cb-4321-bc3b-2629c260ca43') self.vs[44]["mm__"] = """__Block_Outport__""" self.vs[44]["GUID__"] = UUID('8871d75b-0be0-4e76-a709-eb7e61949647') self.vs[45]["mm__"] = """__Block_Outport__""" self.vs[45]["GUID__"] = UUID('b5b05072-d6a5-4d70-9b73-211a77b53684') self.vs[46]["mm__"] = """__Block_Outport__""" self.vs[46]["GUID__"] = UUID('30d22c6e-df70-49bd-96e2-abd1a927077e') self.vs[47]["mm__"] = """__Block_Outport__""" self.vs[47]["GUID__"] = UUID('a1772768-d323-45fa-b7ef-095dd4fa24aa') self.vs[48]["mm__"] = """__Block_Outport__""" self.vs[48]["GUID__"] = UUID('092ee6ee-095f-454e-b6e7-34332f8a27a0') self.vs[49]["mm__"] = """__Block_Outport__""" self.vs[49]["GUID__"] = UUID('8ef11b47-2e19-475d-b004-ff80e618ac28') self.vs[50]["Name"] = """1""" self.vs[50]["mm__"] = """Port_Input""" self.vs[50]["GUID__"] = UUID('c21cd5ea-4f2e-4c79-a7b2-b1ededf7224f') self.vs[51]["Name"] = """2""" self.vs[51]["mm__"] = """Port_Input""" self.vs[51]["GUID__"] = UUID('f2f40662-6db0-45b6-99f7-faf9d0826cb0') self.vs[52]["Name"] = """1""" self.vs[52]["mm__"] = """Port_Input""" self.vs[52]["GUID__"] = UUID('a86461b0-f516-4b01-a8b9-df002de2936c') self.vs[53]["Name"] = """1""" self.vs[53]["mm__"] = """Port_Input""" self.vs[53]["GUID__"] = UUID('d00fb4a0-24cc-43c8-a30b-2630fc5b5576') self.vs[54]["Name"] = """1""" self.vs[54]["mm__"] = """Port_Input""" self.vs[54]["GUID__"] = UUID('0a914718-ec1c-42d8-9d25-e8921e969ac1') self.vs[55]["Name"] = """1""" self.vs[55]["mm__"] = """Port_Input""" self.vs[55]["GUID__"] = UUID('0e7f61a7-ab89-4775-90ab-401bfdf9acb9') self.vs[56]["Name"] = """2""" self.vs[56]["mm__"] = """Port_Input""" self.vs[56]["GUID__"] = UUID('1b8f219a-d034-478c-8239-ae16bcfe3b24') self.vs[57]["Name"] = """1""" self.vs[57]["mm__"] = """Port_Input""" self.vs[57]["GUID__"] = UUID('5af6ee33-6a1c-4c8e-8d75-2a76393c2610') self.vs[58]["Name"] = """2""" self.vs[58]["mm__"] = """Port_Input""" self.vs[58]["GUID__"] = UUID('9d78e402-c0c7-457e-83f9-aee3dca00144') self.vs[59]["Name"] = """1""" self.vs[59]["mm__"] = """Port_Input""" self.vs[59]["GUID__"] = UUID('68269617-a0a6-4804-9a5f-ce2575dd17d9') self.vs[60]["Name"] = """2""" self.vs[60]["mm__"] = """Port_Input""" self.vs[60]["GUID__"] = UUID('bdebfbac-2308-4f82-a610-4903c6b126be') self.vs[61]["Name"] = """1""" self.vs[61]["mm__"] = """Port_Input""" self.vs[61]["GUID__"] = UUID('cb37b8bb-0d28-4954-9ade-e1c58e36deb0') self.vs[62]["Name"] = """1""" self.vs[62]["mm__"] = """Port_Input""" self.vs[62]["GUID__"] = UUID('3efb5d21-0e4a-4f35-9f13-33f5269c5d27') self.vs[63]["Name"] = """1""" self.vs[63]["mm__"] = """Port_Input""" self.vs[63]["GUID__"] = UUID('7480d4ea-e5c9-4369-8beb-44a82010a9f4') self.vs[64]["Name"] = """1""" self.vs[64]["mm__"] = """Port_Input""" self.vs[64]["GUID__"] = UUID('b8d9a531-9b5e-4ab2-a4a9-f1910367b255') self.vs[65]["Name"] = """2""" self.vs[65]["mm__"] = """Port_Input""" self.vs[65]["GUID__"] = UUID('a82e9ec6-04f3-4921-ab95-672320b1c54f') self.vs[66]["mm__"] = """__Block_Inport__""" self.vs[66]["GUID__"] = UUID('f0398ee2-f9fe-4c0f-8b07-d64be73a3c3b') self.vs[67]["mm__"] = """__Block_Inport__""" self.vs[67]["GUID__"] = UUID('f9356434-73eb-412b-a349-3b41dda3a1f9') self.vs[68]["mm__"] = """__Block_Inport__""" self.vs[68]["GUID__"] = UUID('8b93f3e8-8b35-4950-b6db-99071419c97a') self.vs[69]["mm__"] = """__Block_Inport__""" self.vs[69]["GUID__"] = UUID('580eebf0-8650-40d5-ac8c-9ebc4611d8b4') self.vs[70]["mm__"] = """__Block_Inport__""" self.vs[70]["GUID__"] = UUID('3c00ad24-ff30-49ba-8aa9-a489e92ac971') self.vs[71]["mm__"] = """__Block_Inport__""" self.vs[71]["GUID__"] = UUID('ad7f53ea-df4a-42dd-927c-dee91a28c68f') self.vs[72]["mm__"] = """__Block_Inport__""" self.vs[72]["GUID__"] = UUID('18e453f9-715a-4c21-810e-db6c14ea391e') self.vs[73]["mm__"] = """__Block_Inport__""" self.vs[73]["GUID__"] = UUID('d57011fb-5626-45e0-9720-dfeeec025492') self.vs[74]["mm__"] = """__Block_Inport__""" self.vs[74]["GUID__"] = UUID('329d90a2-8091-435f-a230-e66273f96ad4') self.vs[75]["mm__"] = """__Block_Inport__""" self.vs[75]["GUID__"] = UUID('85e5ff0f-bb4e-4ffc-8547-a2d3339668ad') self.vs[76]["mm__"] = """__Block_Inport__""" self.vs[76]["GUID__"] = UUID('242a9924-011c-4ca0-a14e-ff940d8470e6') self.vs[77]["mm__"] = """__Block_Inport__""" self.vs[77]["GUID__"] = UUID('25a81afa-35ec-4361-9fb2-b0fab39f0e74') self.vs[78]["mm__"] = """__Block_Inport__""" self.vs[78]["GUID__"] = UUID('72daf75d-a55c-4da8-b6fa-540ecc5890fe') self.vs[79]["mm__"] = """__Block_Inport__""" self.vs[79]["GUID__"] = UUID('85222c53-252e-481b-92cd-367af4ff2bc6') self.vs[80]["mm__"] = """__Block_Inport__""" self.vs[80]["GUID__"] = UUID('1babbcb5-911d-46e9-b491-c2db5ee4c8f2') self.vs[81]["mm__"] = """__Block_Inport__""" self.vs[81]["GUID__"] = UUID('c53cd074-98e0-4a02-804e-d36a8729174c') self.vs[82]["mm__"] = """__Relation__""" self.vs[82]["GUID__"] = UUID('3acc69e0-9e76-4e28-adc6-a0542777972c') self.vs[83]["mm__"] = """__Relation__""" self.vs[83]["GUID__"] = UUID('3ce8d214-0b7f-41a6-b852-f91c45b393ce') self.vs[84]["mm__"] = """__Relation__""" self.vs[84]["GUID__"] = UUID('472527a3-dc6c-48bf-a61c-174e136fd519') self.vs[85]["mm__"] = """__Relation__""" self.vs[85]["GUID__"] = UUID('c025134c-d29e-4a05-a487-9c34655d05c8') self.vs[86]["mm__"] = """__Relation__""" self.vs[86]["GUID__"] = UUID('177e3050-d372-4d20-8769-cf3cfc1c4f89') self.vs[87]["mm__"] = """__Relation__""" self.vs[87]["GUID__"] = UUID('b051d0ba-e75c-4e93-a75d-3fdbda8b13e6') self.vs[88]["mm__"] = """__Relation__""" self.vs[88]["GUID__"] = UUID('59f711e9-c681-42f8-99f4-fd5d5ed4e60b') self.vs[89]["mm__"] = """__Relation__""" self.vs[89]["GUID__"] = UUID('20d2d0cd-3e4a-41c1-b825-e4272a79b938') self.vs[90]["mm__"] = """__Relation__""" self.vs[90]["GUID__"] = UUID('5bae399e-9a12-4b57-a2b9-14a03192e5ed') self.vs[91]["mm__"] = """__Relation__""" self.vs[91]["GUID__"] = UUID('22db28a4-4de4-4dd4-9f32-c3e09badff15') self.vs[92]["mm__"] = """__Relation__""" self.vs[92]["GUID__"] = UUID('8f2fa4e8-ed1f-43d7-9827-8b99db4ef332') self.vs[93]["mm__"] = """__Relation__""" self.vs[93]["GUID__"] = UUID('1e048894-952a-48e8-9d84-0c4527393ca2') self.vs[94]["mm__"] = """__Relation__""" self.vs[94]["GUID__"] = UUID('be223435-891f-4466-b9a5-cdec06256b63') self.vs[95]["mm__"] = """__Relation__""" self.vs[95]["GUID__"] = UUID('6b94ff1d-1cce-4ec9-a298-c94f259741ec') self.vs[96]["mm__"] = """__Relation__""" self.vs[96]["GUID__"] = UUID('05a63986-edc0-49b1-9365-69860d0a89d4') self.vs[97]["mm__"] = """__Relation__""" self.vs[97]["GUID__"] = UUID('4a932950-2fab-4ce3-9767-484dbe084290') self.vs[98]["Name"] = """None""" self.vs[98]["mm__"] = """__Contains__""" self.vs[98]["GUID__"] = UUID('a31037cd-dace-43cf-9987-8a0610c0c07f') self.vs[99]["Name"] = """None""" self.vs[99]["mm__"] = """__Contains__""" self.vs[99]["GUID__"] = UUID('ea24b961-26eb-4c44-93d4-0f15cad67bab') self.vs[100]["Name"] = """None""" self.vs[100]["mm__"] = """__Contains__""" self.vs[100]["GUID__"] = UUID('5e671a7c-7539-41af-958c-fe48d4e31809') self.vs[101]["Name"] = """None""" self.vs[101]["mm__"] = """__Contains__""" self.vs[101]["GUID__"] = UUID('9749ed46-6409-4b18-8057-36f1d9a6ef1c') self.vs[102]["Name"] = """None""" self.vs[102]["mm__"] = """__Contains__""" self.vs[102]["GUID__"] = UUID('36ab22fb-634f-47ca-b65d-e8dc064fd022') self.vs[103]["Name"] = """None""" self.vs[103]["mm__"] = """__Contains__""" self.vs[103]["GUID__"] = UUID('daed977f-8833-405c-b5a9-511c3cf7b53a') self.vs[104]["Name"] = """None""" self.vs[104]["mm__"] = """__Contains__""" self.vs[104]["GUID__"] = UUID('7ee00228-b980-4c88-8149-dc4881379102') self.vs[105]["Name"] = """None""" self.vs[105]["mm__"] = """__Contains__""" self.vs[105]["GUID__"] = UUID('d8832334-a7ee-415c-b24c-26eadc8935be') self.vs[106]["Name"] = """None""" self.vs[106]["mm__"] = """__Contains__""" self.vs[106]["GUID__"] = UUID('3b4c3970-2d19-4742-85c1-b83094b4a3b4') self.vs[107]["Name"] = """None""" self.vs[107]["mm__"] = """__Contains__""" self.vs[107]["GUID__"] = UUID('ea32d964-6098-4204-9e7a-6a62dd1184bd') self.vs[108]["Name"] = """None""" self.vs[108]["mm__"] = """__Contains__""" self.vs[108]["GUID__"] = UUID('ae5f7a4a-3ba4-449e-a8d5-453cd67010b9') self.vs[109]["Name"] = """None""" self.vs[109]["mm__"] = """__Contains__""" self.vs[109]["GUID__"] = UUID('c8b62e5b-34a8-47b4-8720-d3d25e8f5dd7') self.vs[110]["Name"] = """None""" self.vs[110]["mm__"] = """__Contains__""" self.vs[110]["GUID__"] = UUID('59e6c5dc-1412-4ee8-8faf-431f82283f4b') self.vs[111]["Name"] = """None""" self.vs[111]["mm__"] = """__Contains__""" self.vs[111]["GUID__"] = UUID('248f6796-1962-4699-ada5-0dbcbdead522') self.vs[112]["Name"] = """None""" self.vs[112]["mm__"] = """__Contains__""" self.vs[112]["GUID__"] = UUID('0da03d23-08bb-4c83-ad76-a3bc789442de') self.vs[113]["Name"] = """None""" self.vs[113]["mm__"] = """__Contains__""" self.vs[113]["GUID__"] = UUID('6bc7010c-4f9b-444d-80ea-c17bfa7b86df') self.vs[114]["Name"] = """None""" self.vs[114]["mm__"] = """__Contains__""" self.vs[114]["GUID__"] = UUID('b553d6be-a275-4e58-b106-e3a1e5294b9b') self.vs[115]["Name"] = """None""" self.vs[115]["mm__"] = """__Contains__""" self.vs[115]["GUID__"] = UUID('9a4025bf-92c3-4602-a0e5-75d273769abd') self.vs[116]["Name"] = """None""" self.vs[116]["mm__"] = """__Contains__""" self.vs[116]["GUID__"] = UUID('0d38375f-caf8-42a6-a4db-a5d72cd034c6')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def gen_graph(self):", "def build_graph(self):\n return nn.Sequential(\n nn.Linear(self.input_dim, self.hidden_dim),\n self.hidden_activation,\n nn.Linear(self.hidden_dim, self.n_classes_))", "def build_graph(self):\n pass", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def _build_graph(self):\n pass", "def create_ising_wishbone(h, w, **kwargs):\n assert h == 2 # Only works for 2 branches\n G = nx.empty_graph(h * w)\n n = w\n G.add_edges_from([(v, v+1) for v in range(n-1)])\n G.add_edges_from([(v, v+1) for v in range(n,2*n-1)])\n G.add_edges_from([(v, v+n) for v in range(n // 2)]) # Connect first half of nodes\n return nx.to_numpy_matrix(G)", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def build_graph(self):\n raise NotImplementedError", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def _make_graph(self):\n # this resets the whole default graph for tensorflow\n tf.reset_default_graph()\n # inputs/outputs:\n # each input example will be two np.hstacked 3x3 matrices, flattened\n # (initial state s and final state s' after selecting action a)\n self.input = tf.placeholder(tf.float32, [None, 3 * 6])\n self.layers, self.weights, self.biases = \\\n make_fully_connected_network(\n input_layer=self.input,\n architecture=self.architecture,\n activation=self.activation\n )\n self.output = self.layers[-1]\n self.observed = tf.placeholder(tf.float32, shape=[None, 1])\n # MSE loss function\n self.loss = tf.reduce_sum(tf.square(self.output - self.observed))\n if self.penalty:\n penalty_tensor = tf.add_n([self.penalty_function(x) for x in self.weights])\n self.loss = self.loss + self.penalty * penalty_tensor\n self.optimizer = (self.optimizer_algo(learning_rate=self.learning_rate, **self.optimizer_params)\n .minimize(self.loss))", "def add_edges_from_swmm_inp(G, inp):\n\n inp = _validate_hymo_inp(inp)\n\n df_edge_list = pandas_edgelist_from_swmm_inp(inp=inp)\n\n edge_list = pandas_edgelist_to_edgelist(df_edge_list,\n source='inlet_node',\n target='outlet_node')\n\n G.add_edges_from(edge_list)\n\n df_node_attrs = pandas_node_attrs_from_swmm_inp(inp=inp).to_dict('index')\n set_node_attributes(G, values=df_node_attrs)", "def __create_graph(self):\n # create the nodes\n for h in range(self.height):\n row: List[JuncNode] = list()\n for w in range(self.width):\n jnodes: List[Node] = [self.add_node() for _ in range(4)]\n jn = JuncNode(jnodes, (h, w))\n row.append(jn)\n self.__juncs.append(row)\n # create all connections\n self.__create_connections()", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def _construct_graph(self):\n raise NotImplementedError", "def build_graph(self):\n if self.model == 'dense':\n # ForecastNet with two densely connected hidden layers in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv':\n # ForecastNet with a convlutional neural network in a cell and Mixture Density Network outputs\n self.outputs, self.mu, self.sigma, self.cost = forecastnet_conv_graph(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'dense2':\n # ForecastNet with two densely connected hidden layers in a cell and linear outputs\n self.outputs, self.cost = forecastnet_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)\n elif self.model == 'conv2':\n # ForecastNet with a convolutional neural network in a cell and linear outputs\n self.outputs, self.cost = forecastnet_conv_graph2(self.X,\n self.Y,\n self.hidden_dim,\n self.out_seq_length,\n self.is_training)", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def populate_graph(self):", "def build_graph(self):\n\t\tself.n_hidden = 100\n\t\tself.weights_hidden = tf.get_variable(\"weights_hidden\", [self.state_size, self.n_hidden], initializer = tf.random_normal_initializer())\n\t\tself.bias_hidden = tf.get_variable(\"bias_hidden\", [self.n_hidden], initializer = tf.constant_initializer(0.1))\n\n\t\tself.weights_out = tf.get_variable(\"weights_out\", [self.n_hidden, self.action_size], initializer = tf.random_normal_initializer())\n\t\tself.bias_out = tf.get_variable(\"bias_out\", [self.action_size], initializer = tf.constant_initializer(0.1))", "def get_line_graph(H, collapse=True):\n D = nx.DiGraph()\n\n V = {edge: set(nodes) for edge, nodes in H.edges.elements.items()}\n\n D.add_nodes_from(V)\n\n for u, v in combinations(V, 2):\n if V[u] != V[v] or not collapse:\n if V[u].issubset(V[v]):\n D.add_edge(u, v)\n elif V[v].issubset(V[u]):\n D.add_edge(v, u)\n\n return D", "def CreateCurve1DMeshfrom2DMesh(self):\n\n self.__do_memebers_exist__()\n\n p = self.InferPolynomialDegree()\n mm = Mesh()\n unique_edges, inv_edges = np.unique(self.edges,return_inverse=True)\n mm.points = self.points[unique_edges,:]\n mm.nnode = mm.points.shape[0]\n aranger = np.arange(mm.nnode)\n mm.elements = aranger[inv_edges].reshape(self.edges.shape)\n mm.nelem = mm.elements.shape[0]\n mm.element_type = \"line\"\n\n return mm", "def build_inference_graph(self):\n self.build_train_graph()", "def H_layer(nqubits):\n for idx in range(nqubits):\n qml.Hadamard(wires=idx)", "def _build_graph1(self):\n g1 = nx.DiGraph()\n for source, target, weight, timestamp in self._edges:\n if timestamp <= self._median_timestamp:\n if weight == 1:\n g1.add_edge(source, target)\n else:\n if g1.has_edge(source, target):\n g1.remove_edge(source, target)\n else:\n self._remaining_edges.append((source, target, weight))\n return g1", "def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")\n self.floorGraph.addEdge(params[0],params[1],float(params[2]))\n self.floorGraph.addEdge(params[1],params[0],float(params[2]))", "def hypergraph_homology_basis(h, k, shortest=False, log=None):\n max_dim = np.max([len(e) for e in h.edges()]) - 1\n\n if k > max_dim or k < 1:\n return 'wrong dim'\n C = dict()\n for i in range(k - 1, k + 2):\n C[i] = kchainbasis(h, i)\n bd = dict()\n for i in range(k, k + 2):\n bd[i] = bkMatrix(C[i - 1], C[i])\n if log:\n try:\n logdict = pickle.load(open(log, 'rb'))\n except:\n logdict = dict()\n logdict.update({'maxdim': max_dim,\n 'kchains': C,\n 'bd': bd, })\n pickle.dump(logdict, open(log, 'wb'))\n return homology_basis(bd, k, C=C[k], shortest=shortest, log=log)", "def HamiltonianMatrix(self):\n self.Inter = sp.Matrix([[0,self.t],[self.t,0]])\n self.Intra1 = sp.Matrix([[0,v],[w,0]])\n self.Intra2 = sp.Matrix([[0,w],[v,0]])\n H = sp.Matrix([])\n for i in range(1, self.N+1):\n fila = sp.Matrix([])\n for j in range(1, self.N+1):\n if j==i:\n fila = fila.row_join(self.Inter)\n elif j==i+1:\n fila = fila.row_join(self.Intra1)\n elif j==i-1:\n fila = fila.row_join(self.Intra2)\n else:\n fila = fila.row_join(sp.Matrix([[0,0],[0,0]]))\n H = H.col_join(fila) \n H.simplify()\n #printer = StrPrinter()\n #print(H.table(printer,align='center'))\n self.H = H", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def homothick():\n return se2hmt(binary([[1,1,1],\n [0,0,0],\n [0,0,0]]),\n binary([[0,0,0],\n [0,1,0],\n [1,1,1]]))" ]
[ "0.6022183", "0.58268195", "0.56974345", "0.5671017", "0.5617992", "0.5606539", "0.5554137", "0.5550622", "0.5527271", "0.5515618", "0.54920125", "0.5475375", "0.5474933", "0.5449014", "0.54002106", "0.5384529", "0.5359415", "0.5316242", "0.5313028", "0.53064716", "0.5270311", "0.5254161", "0.5224649", "0.5216979", "0.5214591", "0.518202", "0.5174107", "0.51719815", "0.5165076", "0.5161966" ]
0.66377443
0
Runs parameter checks This includes a determination that the value is equal to or greater than zero, and a check that all required keywords for a given
def _check_parameters(self, target_function, **kwargs): # Ensure all arguments are =< 0 where relevant for keyword, value in kwargs.items(): # Two conditions value_is_less_than_zero = value < 0 keyword_is_relevant = keyword in ['mean', 'constant', 'low', 'mode', 'high'] # Test conditions if keyword_is_relevant and value_is_less_than_zero: raise FairException('"{}" is less than zero.'.format(keyword)) # Check that all required keywords are provided required_keywords = self._required_keywords[target_function] for required_keyword in required_keywords: if required_keyword in kwargs.keys(): pass else: raise FairException('"{}" is missing "{}".'.format(str(target_function), required_keyword))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Check(self, parameters):", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def param_vals_test(param_dict):\n file_msg = param_dict['Prog_msg']\n ##\n ## Testing if `wget` exists in the system\n if is_tool('wget'):\n pass\n else:\n msg = '{0} You need to have `wget` installed in your system to run '\n msg += 'this script. You can download the entire dataset at {1}.\\n\\t\\t'\n msg += 'Exiting....'\n msg = msg.format(file_msg, param_dict['url_catl'])\n raise ValueError(msg)\n ##\n ## Checking that Esmeralda is not ran when doing 'SO' halos\n if (param_dict['halotype'] == 'so') and (param_dict['sample'] == 20):\n msg = '{0} The `halotype`==`so` and `sample`==`20` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format(file_msg)\n raise ValueError(msg)\n ##\n ## Checking that `hod_model_n` is set to zero for FoF-Halos\n if (param_dict['halotype'] == 'fof') and (param_dict['hod_n'] != 0):\n msg = '{0} The `halotype`==`{1}` and `hod_n`==`{2}` are no compatible '\n msg += 'input parameters.\\n\\t\\t'\n msg += 'Exiting...'\n msg = msg.format( file_msg,\n param_dict['halotype'],\n param_dict['hod_n'])\n raise ValueError(msg)\n ##\n ## Checking input different types of `test_train_opt`\n #\n # `sample_frac`\n if (param_dict['test_train_opt'] == 'sample_frac'):\n # `sample_frac`\n if not ((param_dict['sample_frac'] > 0) and\n (param_dict['sample_frac'] <= 1.)):\n msg = '{0} `sample_frac` ({1}) must be between (0,1]'.format(\n file_msg, param_dict['sample_frac'])\n raise ValueError(msg)\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n #\n # boxes_n\n if (param_dict['test_train_opt'] == 'boxes_n'):\n box_n_arr = num.array(param_dict['box_idx'].split('_')).astype(int)\n box_n_diff = num.diff(box_n_arr)\n # Larger than zero\n if not (all(box_n_arr >= 0)):\n msg = '{0} All values in `box_idx` ({1}) must be larger than 0!'\n msg = msg.format(file_msg, box_n_arr)\n raise ValueError(msg)\n # Difference between elements\n if not (all(box_n_diff > 0)):\n msg = '{0} The value of `box_idx` ({1}) is not valid!'.format(\n file_msg, param_dict['box_idx'])\n raise ValueError(msg)\n #\n # `box_test`\n if (param_dict['test_train_opt'] == 'box_sample_frac'):\n # Value of `box_test`\n if not (param_dict['box_test'] >= 0):\n msg = '{0} `box_test` ({1}) must be larger or equal to `0`.'\n msg = msg.format(file_msg, param_dict['box_test'])\n raise ValueError(msg)\n # Testing `test_size`\n # `test_size`\n if not ((param_dict['test_size'] > 0) and\n (param_dict['test_size'] < 1)):\n msg = '{0} `test_size` ({1}) must be between (0,1)'.format(\n file_msg, param_dict['test_size'])\n raise ValueError(msg)\n ##\n ## Checking that `kf_splits` is larger than `2`\n if (param_dict['kf_splits'] < 2):\n msg = '{0} The value for `kf_splits` ({1}) must be LARGER than `2`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['kf_splits'])\n raise ValueError(msg)\n ##\n ## Checking that `n_predict` is not smaller than `1`.\n if (param_dict['n_predict'] < 1):\n msg = '{0} The value for `n_predict` ({1}) must be LARGER than `1`'\n msg += 'Exiting...'\n msg = msg.format(param_dict['Prog_msg'], param_dict['n_predict'])\n raise ValueError(msg)", "def check_params(self):\r\n \r\n # TODO: More cases?\r\n\r\n if self.N <= 0:\r\n print('Bad Parameter: N')\r\n \r\n if self.Ha_tally <= 0 or self.Ha_tally > self.N:\r\n print('Bad Parameter: Reported winner tally')\r\n \r\n if len(self.round_sched) < 1 or not self.check_inc_sched(self.round_sched):\r\n print('Bad Parameter: Round Schedule')\r\n\r\n if self.alpha <= 0 or self.alpha >= .5:\r\n print('Bad Parameter: Alpha')", "def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\", \"point_count\",\r\n \"PSO_VELOCITY_WEIGHT\", \"PSO_INDIVIDUAL_WEIGHT\", \"PSO_GROUP_WEIGHT\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\", \"point_count\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret", "def check_params(cls, **kwargs) -> None:\n\n for key, val in kwargs.items():\n cls.check_param(key, val)", "def check_required_parameters(required_params_dict=dict()):\r\n print threading.currentThread().getName(), 'Starting'\r\n is_valid = True\r\n required_params_not_set = pythontools.validate_required_parameters(required_params_dict)\r\n if len(required_params_not_set) > 0:\r\n is_valid = False\r\n msg = \"Validate all required input parameters are set failed.\"\r\n for param in required_params_not_set:\r\n steplog.error(\"Required parameter %s is not set.\" % param)\r\n else:\r\n msg = \"Validate all required input parameters are set succeeded.\"\r\n return is_valid, msg", "def checkNeededParams(self):\n for clp,value in self.neededParamsNames.items():\n if value[0] not in self.neededParams:\n print >> sys.stderr, clp+\" is a mandatory parameter \"\n self.printUsage()\n sys.exit(1)", "def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\", \"point_count\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\", \"point_count\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"population_function\",\r\n \"mutate_function\", \"cross_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('r', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Radius r must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Radius r must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n # Check piezometer depth\n elif 'z' in self.parameters:\n z = self.parameters.get('z', -1)\n if type(z) not in [int, float]:\n flag = False\n warnings += \"Depth of piezometer must be a float value\\n\"\n else:\n if z < 0:\n flag = False\n warnings += \"Depth z must be higher than 0\\n\"\n else:\n flag = False\n warnings += \"Well don't contain well depth attributes\\n\"\n return(flag, warnings) # End Function", "def c_test_set_inp(self, param, value):\r\n ret = 1\r\n if \"__hash__\" not in dir(param): # param must be hashable\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is not hashable. It will be unable to be set in a dict.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is hashable.\")\r\n if param in [\"population_size\", \"time_constraint\", \"generations\"]:\r\n if not ((isinstance(value, int) or\r\n isinstance(value, float) or\r\n isinstance(value, long))):\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be of a number. It is \" + str(value))\r\n ret = 0\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set to a number.\")\r\n if value < 0:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be greater than zero.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is greater than zero.\")\r\n if param in [\"population_size\", \"generations\"]:\r\n if not isinstance(value, int):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be an integer. It is \" + str(value))\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is an integer.\")\r\n if param in [\"fitness_function\", \"population_function\",\r\n \"mutate_function\", \"cross_function\", \"weighting_bias\"]:\r\n if not callable(value):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be a callable function.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is a callable function.\")\r\n if param == \"end_condition\":\r\n if value not in [\"time_constraint\", \"generations\"]:\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" needs to be 'time_constraint' or 'generations'\")\r\n else:\r\n if self.verbosity > 1:\r\n print(\"ERROR: \" + param + \" is a correct string.\")\r\n if param == \"seed\":\r\n if not (value is None or isinstance(value, int)):\r\n ret = 0\r\n if self.verbosity > 0:\r\n print(\"ERROR: \" + param + \" is incorrectly set.\")\r\n else:\r\n if self.verbosity > 1:\r\n print(param + \" is correctly set.\")\r\n return ret", "def perform_parameter_checks(self, update=False):\n for p, check_function in self.parameter_checks.items():\n try:\n val = p.snapshot(update=update)['value']\n res = check_function(val)\n if res != True: # False or a string (error message)\n log.warning(\n f'Parameter {p.full_name} has an uncommon value: '\n f'{val}.' + (f\" ({res})\" if res is not False else ''))\n except Exception as e:\n log.warning(\n f'Could not run parameter check for {p}: {e}')", "def _check_params(self):\n pass", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecGnomv0_1.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataQ, \"Scattering vector values are missing\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataValues, \"Experimental intensity values are missing\")", "def _check_params_do(name, val):\n if name == 'info_hash':\n return len(val) == 20\n elif name == 'peer_id':\n return len(val) == 20 and STORAGE.check_peer(val)\n elif name == 'numwant':\n return int(val) < 250\n fail(REASON_REQUEST_ERROR)", "def _check_le_1(self, target, **kwargs):\n # For every keyword argument\n for key, value in kwargs.items():\n # Set boolean conditions\n applicable_keyword = key in self._le_1_keywords\n applicable_target = target in self._le_1_targets\n # If key is in specified list\n if applicable_keyword and applicable_target:\n # Check if value is less than or equal to 1\n if 0.0 <= value <= 1.0:\n pass\n # If not, raise error\n else:\n raise FairException('\"{}\" must have \"{}\" value between zero and one.'.format(target, key))", "def _check_pert(self, **kwargs):\n conditions = {\n 'mode >= low' : kwargs['mode'] >= kwargs['low'],\n 'high >= mode' : kwargs['high'] >= kwargs['mode'],\n }\n for condition_name, condition_value in conditions.items():\n if condition_value == False:\n err = 'Param \"{}\" fails PERT requirement \"{}\".'.format(kwargs, condition_name)\n raise FairException(err)", "def check_params(params):\n\n required = ['gtsrb_train_root', 'gtsrb_test_root', 'batch_size']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('rw', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Well radius rw must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Well radius rw must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n return(flag, warnings) # End Function", "def _check_parameters(self, ep, params):\n\n any_group_satisfied = False\n for group in ep.REQUIRED:\n if all(required_param in params for required_param in group):\n any_group_satisfied = True\n\n if not any_group_satisfied:\n raise ValueError(f\"Got parameters {params}, expected one of {ep.REQUIRED}\")\n\n for key in params:\n if key not in ep.POSSIBLE:\n raise ValueError(f\"Got {key}, expected one of {ep.POSSIBLE}\")", "def check_params(self):\n raise NotImplementedError", "def parameter_check(coordinates, \n neighbors, \n bandwidth, \n convergence, \n percentage):\n # Create a boolean vector to keep track of incorrect inputs\n incorrect_inputs = np.zeros(5, dtype = bool)\n # Check whether two-dimensional coordinates are provided\n if not type(coordinates) == np.ndarray:\n incorrect_inputs[0] = True\n elif not coordinates.shape[1] == 2:\n incorrect_inputs[0] = True\n # Check whether neighbors is a positive integer or float\n if not ((type(neighbors) == int and neighbors > 0)\n and not ((type(neighbors) == float) \n and (neighbors > 0)\n and (neighbors.is_integer() == True))):\n incorrect_inputs[1] = True\n # Check whether bandwidth is a positive integer or float\n if not bandwidth == None:\n if not ((type(bandwidth) == int and bandwidth > 0)\n or (type(bandwidth) == float) and bandwidth > 0):\n incorrect_inputs[2] = True\n # Check whether convergence is a positive integer or float\n if not convergence == None:\n if not ((type(convergence) == int and convergence > 0)\n or (type(convergence) == float) and convergence > 0):\n incorrect_inputs[3] = True\n # Check whether percentage is a valid percentage value\n if not percentage == None:\n if not ((type(percentage) == int and percentage >= 0 \n and percentage <= 100)\n or ((type(percentage) == float) and percentage >= 0 \n and percentage <= 100)):\n incorrect_inputs[4] = True\n # Define error messages for each parameter failing the tests\n errors = ['ERROR: coordinates: Must be a 2-column numpy.ndarray',\n 'ERROR: neighbors: Must be a whole-number int or float > 0',\n 'ERROR: bandwidth: Must be an int or float > 0, or None',\n 'ERROR: convergence: Must be an int or float > 0, or None',\n 'ERROR: percentage: Must be an int or float in [0, 100], or None']\n # Print eventual error messages and terminate the code\n if any(value == True for value in incorrect_inputs):\n for i in range(0, len(errors)):\n if incorrect_inputs[i] == True:\n print(errors[i])\n sys.exit()", "def onCheckParameters(self, evt): \n \n print(\"version\", self.config.version)\n \n if isinstance(self.config.iSPV, ( int, long )): pass\n else: \n msg = (\"SPV value should be an integer!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iScanTime, ( int, float )): pass\n else: \n msg = (\"Scan time value should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iStartVoltage, ( int, float )): pass\n else: \n msg = (\"Start voltage should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iEndVoltage, ( int, float )): pass\n else: \n msg = (\"End voltage should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iStepVoltage, ( int, float )): pass\n else: \n msg = (\"Step voltage should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if self.config.iActivationMode == \"Exponential\":\n if isinstance(self.config.iExponentPerct, ( int, float )): pass\n else: \n msg = (\"Exponential % value should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n if isinstance(self.config.iExponentIncre, ( int, float )): pass\n else: \n msg = (\"Exponential increment value should be an float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return False\n \n elif self.config.iActivationMode == \"Boltzmann\":\n if isinstance(self.config.iBoltzmann, ( int, float )): pass\n else: \n msg = (\"Boltzmann offset value should be an integer or float!\")\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n return False\n \n \n if (abs(self.config.iEndVoltage) <= abs(self.config.iStartVoltage)):\n msg = ('End voltage has to be larger than starting voltage')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n return\n \n if (abs(self.config.iEndVoltage) > 200):\n msg = ('The highest possible voltage is 200 V. Set to default: 200')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iEndVoltage = 200\n self.view.panelControls.endVoltage_input.SetValue(str(self.config.iEndVoltage))\n \n if (abs(self.config.iStartVoltage) < 0):\n msg = ('The lowest possible voltage is 0 V. Set to default: 0')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iStartVoltage = 0\n self.view.panelControls.startVoltage_input.SetValue(str(self.config.iStartVoltage))\n \n if self.config.iSPV <= 0:\n msg = ('SPV must be larger than 0! Set to default: 3')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iSPV = 3\n self.view.panelControls.spv_input.SetValue(str(self.config.iSPV))\n \n if self.config.iScanTime <= 0:\n msg = ('Scan time must be larger than 0! Set to default: 5')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iScanTime = 5\n self.view.panelControls.scanTime_input.SetValue(str(self.config.iScanTime))\n\n if self.config.iActivationMode == \"Exponential\":\n if self.config.iExponentPerct < 0:\n msg = ('Exponential % must be larger or equal to 0! Set to default: 0')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentPerct = 0\n elif self.config.iExponentPerct >= 100:\n msg = ('Exponential % must be smaller than 100! Set to default: 0')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentPerct = 0\n self.view.panelControls.exponentialPerct_input.SetValue(str(self.config.iExponentPerct))\n \n if self.config.iExponentIncre <= 0:\n msg = ('Exponential increment must be larger than 0! Set to default: 0.01')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentIncre = 0.01\n elif self.config.iExponentIncre > 0.075:\n msg = ('Exponential increment must be smaller than 0.075! Set to default: 0.075')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg ,\n type=\"Error\")\n self.config.iExponentIncre = 0.075\n self.view.panelControls.exponentialIncrm_input.SetValue(str(self.config.iExponentIncre))\n elif self.config.iActivationMode == \"Boltzmann\":\n if self.config.iBoltzmann < 10:\n msg = ('Boltzmann offset must be larger than 10! Set to default: 10')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg,\n type=\"Error\")\n self.config.iBoltzmann = 10\n elif self.config.iBoltzmann >= 100:\n msg = ('Boltzmann offset must be smaller than 100! Set to default: 25')\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg=msg,\n type=\"Error\")\n self.config.iBoltzmann = 25\n self.view.panelControls.boltzmann_input.SetValue(str(self.config.iBoltzmann))\n \n # All good\n return True", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def __check_inputs__(self):\n # | - __check_inputs__\n # #####################################################################\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n # #####################################################################\n\n if stop_mode == \"num_generations\":\n mess_i = \"stop_mode='num_generations', \\\n Must pass int to 'stop_num_generations'\"\n assert type(stop_num_generations) == type(1), mess_i\n #__|", "def _check_value(self, value, name, check_function):\n if check_function is not None:\n is_good = check_function(value) #May raise an exception\n assert is_good in [0,1,True,False]\n if not is_good:\n raise ValueError(\"Invalid parameter value %r for parameter %s\" \\\n % (value, name))", "def check_param(kwargs):\n\n for key in kwargs:\n if kwargs[key] is None:\n continue\n if key == 'trunk_id':\n value = int(kwargs[key])\n # maximal value is 1024,although the value is limit by command 'assign forward eth-trunk mode '\n if value < 0 or value > 1024:\n return 'Error: Wrong Value of Eth-Trunk interface number'\n elif key == 'system_id':\n # X-X-X ,X is hex(4 bit)\n if not re.match(r'[0-9a-f]{1,4}\\-[0-9a-f]{1,4}\\-[0-9a-f]{1,4}', kwargs[key], re.IGNORECASE):\n return 'Error: The system-id is invalid.'\n values = kwargs[key].split('-')\n flag = 0\n # all 'X' is 0,that is invalid value\n for v in values:\n if len(v.strip('0')) < 1:\n flag += 1\n if flag == 3:\n return 'Error: The system-id is invalid.'\n elif key == 'timeout_type':\n # select a value from choices, choices=['Slow','Fast'],it's checked by AnsibleModule\n pass\n elif key == 'fast_timeout':\n value = int(kwargs[key])\n if value < 3 or value > 90:\n return 'Error: Wrong Value of timeout,fast user-defined value<3-90>'\n rtype = str(kwargs.get('timeout_type'))\n if rtype == 'Slow':\n return 'Error: Short timeout period for receiving packets is need,when user define the time.'\n elif key == 'preempt_delay':\n value = int(kwargs[key])\n if value < 0 or value > 180:\n return 'Error: Value of preemption delay time is from 0 to 180'\n elif key == 'collector_delay':\n value = int(kwargs[key])\n if value < 0 or value > 65535:\n return 'Error: Value of collector delay time is from 0 to 65535'\n elif key == 'max_active_linknumber':\n value = int(kwargs[key])\n if value < 0 or value > 64:\n return 'Error: Value of collector delay time is from 0 to 64'\n elif key == 'priority' or key == 'global_priority':\n value = int(kwargs[key])\n if value < 0 or value > 65535:\n return 'Error: Value of priority is from 0 to 65535'\n return 'ok'", "def _check_params(self):\n if self.k_initial <= 0 :\n raise ValueError('Initial K should be 1 or more.')", "def check_params(params):\n\n required = ['initlandmarks']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def check_all_parameters(self):\n global dtParameterDesc\n self.message = ''\n ok = True\n for par in self.parameters:\n if par in dtParameterDesc:\n pardata = dtParameterDesc[par]\n check = self.check_parameter(par, self.parameters[par])\n ok = ok and check\n if not check:\n self.message += ('\\n' if self.message != '' else '') + pardata[dtg.LANG] +\\\n (' вне диапазона' if dtg.LANG == 'ru' else ' out of range')\n\n return ok" ]
[ "0.7083333", "0.6897127", "0.6845546", "0.6732831", "0.65487105", "0.6494509", "0.6490343", "0.6471558", "0.6422716", "0.64224803", "0.64110047", "0.63605785", "0.63543475", "0.6344284", "0.6249611", "0.62482", "0.6248133", "0.6237131", "0.6220662", "0.62176985", "0.62074405", "0.61965513", "0.619617", "0.61833537", "0.61680967", "0.61634296", "0.6163329", "0.61613744", "0.6157301", "0.6148548" ]
0.74509966
0
Supply raw data to the model This takes an arbitrary array, runs some quick checks, and returns the array if appropriate.
def supply_raw(self, target, array): # Ensure numeric clean_array = pd.to_numeric(array) # Coerce to series if type(array) == pd.Series: s = pd.Series(clean_array.values) else: s = pd.Series(clean_array) # Check numeric and not null if s.isnull().any(): raise FairException('Supplied data contains null values') # Ensure values are appropriate if target in self._le_1_targets: if s.max() > 1 or s.min() < 0: raise FairException(f'{target} data greater or less than one') self._supplied_values[target] = {'raw': s.values.tolist()} return s.values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raw_data(self):\n data = self.datasource.as_array()\n if not isinstance(data, np.ndarray):\n raise TypeError(\"The data you try to load is no numpy array!\")\n if data.ndim != 2:\n raise ValueError(\"The data array you try to load does not have 2 \"\n \"dimensions!\")\n data = data.copy(order='C')\n ident = hashlib.md5(data).hexdigest()\n if self._dataident is None:\n self._dataident = ident\n elif self._dataident != ident:\n raise ValueError(\"The data you try to load from '%s' has changed \"\n \"since the last time. Please, check the \"\n \"datasource of the record '%s'.\"\n % (self.datasource.name, self.name))\n\n return data", "def __array__(self, *args, **kwargs):\n\n return self.data", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def __prepare(self, data):\n #print(\"Running Prepare data\")\n #print(data)\n #print(type(data))\n if len(data) > 1:\n if type(data[0]) == np.ndarray:\n return np.concatenate(data)\n else:\n return torch.cat(data).cpu().numpy()\n else:\n return data[0].cpu().numpy()", "def fromarray(self, array):\n\n raise NotImplementedError", "def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data", "def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data", "def run_validation(self, data=fields.empty):\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n value = self.to_internal_value(data)\n self.run_validators(value)\n if isinstance(value, list):\n return value\n try:\n value = data.replace(\"u'\", \"\\\"\")\n value = value.replace(\"'\", \"\\\"\")\n json.loads(value)\n except:\n raise serializers.ValidationError('Invalid Array')\n return value", "def __init__(self, arr=None):\n self.data = arr.copy() if arr else []", "def _check_and_transform_input(self, data):\n if isinstance(data, list):\n if np.array(data).shape == (len(data),):\n if len(data) == 1:\n data = np.array(data).reshape(1, 1)\n data = np.array(data).reshape(len(data), 1)\n else:\n data = np.concatenate(data).reshape(len(data), -1)\n else:\n raise TypeError('Input data should be of type list, but found type {}'.format(type(data)))\n\n return data", "def __init__(self, data_array):\n self._data_array = data_array\n self._units = self._data_array.attrs.get('units', 'dimensionless')", "def prepare_full_data(raw_data):\n users_id = np.asarray(raw_data[0], dtype='int32')\n items_id = np.asarray(raw_data[1], dtype='int32')\n ratings = np.asarray(raw_data[3], dtype=theano.config.floatX)\n return [users_id, items_id, ratings]", "def __init__(\n self,\n field_path_raw,\n is_array_raw,\n type_raw,\n ):\n self.field_path_raw = field_path_raw\n self.is_array_raw = is_array_raw\n self.type_raw = type_raw", "def arrayreader(array, *args, **kwargs):\n yield array", "def sanitise_array(data):\n array = np.array(data)\n\n if array.ndim == 0:\n array = array[np.newaxis, np.newaxis]\n elif array.ndim == 1:\n array = array[:, np.newaxis]\n elif array.ndim != 2:\n raise ValueError(f'Only 1/2 dimensional data can be saved to text files, data.shape = {array.shape}')\n\n return array", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]", "def _prep_data(self, data, func_input_dtype):\n if func_input_dtype in (None, 'DataArray'):\n return data\n if func_input_dtype == 'Dataset':\n # TODO: add logic that creates a single Dataset comprising all of\n # the DataArray objects in `data`.\n raise NotImplementedError(\"func_input_dtype of `Dataset` not yet \"\n \"implemented.\")\n if func_input_dtype == 'numpy':\n self.coords = data[0].coords\n return [d.values for d in data]", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n\n self._n_shots = len(data[0])\n self._n_circuits = len(data)\n\n if self._validate:\n if data.shape[:2] != (self._n_circuits, self._n_shots):\n raise DataProcessorError(\n f\"The datum given to {self.__class__.__name__} does not convert \"\n \"of an array with dimension (number of circuit, number of shots).\"\n )\n\n return data", "def data(self, arr):\n self.bitmap(arr, 1)", "def convert_raw_arrays(x, f):\n try:\n # Tensor, TensorNetwork...\n x = x.copy()\n x.apply_to_arrays(f)\n return x\n except AttributeError:\n pass\n\n try:\n # raw structured arrays that provide the {get|set}_params interface\n x = x.copy()\n x.set_params(tree_map(f, x.get_params()))\n return x\n except AttributeError:\n pass\n\n # other raw arrays\n return f(x)", "def test_read_data_processed(model_data):\n assert len(model_data) == 6 and type(model_data) is tuple", "def check_input_and_return_data(self, input_name):\n\n #Check to see if data is given by a file\n if isinstance(self.input_data['reservoir'][input_name], str):\n #Get filename\n filename = self.input_data['reservoir'][input_name]\n #Load data \n data = np.loadtxt(filename, dtype=np.double)\n \n #Check to see if data is given by a list\n elif isinstance(self.input_data['reservoir'][input_name], (list, tuple)):\n #Turn the list into numpy array\n data = np.array(self.input_data['reservoir'][input_name], \n dtype=np.double)\n\n #data is a constant array (homogeneous)\n else:\n ngrids = self.input_data['numerical']['number of grids']\n data = (self.input_data['reservoir'][input_name] * \n np.ones(ngrids))\n return data", "def _to_arraylike(data):\n _load_objects()\n if data is None:\n raise ValueError('Cannot convert None data.')\n return None\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.asarray(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data", "def _get_to_actual_data(raw):\n raise NotImplemented", "def function():\n\tinputData = request.get_json(silent=True)\n\tprint(inputData)\n\t\n\ttry :\n\t\toriginArray = inputData['items']\n\texcept :\n\t\treturn jsonify({\"status\": \"failed\", \"msg\": 'Missing arguments'}),400\n\n\ttry :\n\t\tflattenArray = dataController.flatten(originArray)\n\t\tisSaved = dataController.saveArray(flattenArray)\n\t\tif isSaved:\n\t\t\treturn jsonify({\"status\": \"success\", \"msg\": \"The array was saved in database\", \"flattenArray\": flattenArray}), 200\n\t\telse:\n\t\t\treturn jsonify({\"status\": \"success\", \"msg\": \"The array was not saved in database\",\"flattenArray\": flattenArray}), 200\n\texcept Exception as error:\n\t\tprint(error)\n\t\treturn jsonify({'status': 'failed', 'msg': 'Something bad happened'}), 500", "def prefill_inputs(orm):\n # Determine the \"height\" and \"width\" of the array\n # by asking the database\n count = session.query(orm).count() # \"Height\" of array\n a_vector, = session.query(orm.vector).limit(1).one()\n dim = len(a_vector) # \"Width\" of array\n # Preallocate space\n data = np.empty((count, 768), dtype=FLOAT_TYPE)\n ids = np.empty((count, ), dtype=STR_TYPE)\n return data, ids", "def _validate_input(self, data: Union[np.ndarray, pd.DataFrame, pd.Series],\n expected_dim: int, inference: bool = False) -> np.ndarray:\n allowed_types = (\n np.ndarray,\n pd.core.frame.DataFrame,\n pd.core.frame.Series\n )\n\n if type(data) not in allowed_types:\n raise TypeError('Supported input types: np.ndarray, '\n 'pd.core.frame.DataFrame, pd.core.frame.Series got'\n ' {}'.format(type(data)))\n\n if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n data = data.values\n\n if data.size == 0:\n raise ValueError('Empty array passed to fit() or predict()')\n\n if data.ndim > expected_dim:\n raise ValueError('Data with incorrect number of dimensions '\n 'passed to fit() or predict(). Max dim is '\n '{}, got {}'.format(expected_dim, data.ndim))\n\n if not np.issubdtype(data.dtype, np.number):\n raise ValueError('Non numeric value found in data')\n\n if not np.isfinite(data).all():\n raise ValueError('Data contains nan or inf')\n\n if inference:\n # additional checks on prediction time\n if not self._fitted:\n raise ValueError('Fit the model first.')\n\n if self._ndim == 2 and data.shape[-1] != self._shape[-1]:\n raise ValueError('Number of features does not match'\n ' data model was trained on. Expected'\n ' {}, got {}'\n .format(self._shape[-1], data.shape[-1]))\n\n return data", "def _prepare_data(self, coords):\n return np.array([coords])" ]
[ "0.60833967", "0.5892189", "0.5854844", "0.5854844", "0.57446194", "0.5713086", "0.5632602", "0.5599413", "0.5592324", "0.5489122", "0.54690903", "0.54367536", "0.54291075", "0.53866744", "0.53799325", "0.53581697", "0.5353129", "0.5336951", "0.53313243", "0.5308267", "0.52908635", "0.52831054", "0.5277293", "0.5268992", "0.52489024", "0.52314156", "0.5209376", "0.5186471", "0.5182271", "0.51605254" ]
0.6199926
0
Checks keywords and returns the appropriate function object.
def _determine_func(self, **kwargs): # Check whether keys are recognized for key in kwargs.keys(): if key not in self._parameter_map.keys(): raise FairException('"{}"" is not a recognized keyword'.format(key)) # Check whether all keys go to same function via set comprension functions = list(set([ self._parameter_map[key] for key in kwargs.keys() ])) if len(functions) > 1: raise FairException('"{}" mixes incompatible keywords.'.format(str(kwargs.keys()))) else: function = functions[0] return function
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def func(input, keyword=None):\r\n pass", "def execute_function_for_keyword(self):\n kwargs, kw_status = self.get_argument_as_keywords()\n\n print_info(\"The Arguments passed for the current Step is: '{0}'\".format(kwargs))\n if kw_status:\n # Execute the corresponding function\n try:\n keyword_result = self.exec_obj(**kwargs)\n except Exception as exception:\n trcback = print_exception(exception)\n keyword_result = (\"EXCEPTION\", trcback)\n\n self.data_repository = self.update_data_repository(self.keyword,\n keyword_result,\n self.data_repository)\n\n return self.data_repository", "def get_func(self, class_name, arg, stored_objects):\n find_func = re.match('([a-z]+)', arg) # returns a matching object\n func_name = find_func.group()\n args = re.findall('\"([^\"]+)\",?', arg) # return a list of arguments\n\n if len(args) == 0:\n if func_name == \"all\":\n self.all(class_name, stored_objects)\n elif func_name == \"count\":\n self.count(class_name, stored_objects)\n else:\n print(\"** instance id missing **\")\n\n elif len(args) == 1:\n if self.check_instance(class_name, args[0], stored_objects):\n if func_name == \"show\":\n self.show(class_name, args[0], stored_objects)\n elif func_name == \"destroy\":\n self.destroy(class_name, args[0], stored_objects)\n elif func_name == \"update\":\n print(\"** attribute name missing **\")\n\n elif len(args) == 2 and func_name == \"update\":\n print(\"** value missing **\")\n\n elif len(args) == 3 and func_name == \"update\":\n if self.check_instance(class_name, args[0], stored_objects):\n self.update(class_name, args, stored_objects)", "def __call__(self, *ar, **kw):\n\t\tkw = {**self.default_kw, **kw} # add any default keywords\n\t\tkw = {k:v for k,v in kw.items() if self.is_kwarg_valid(k)} # remove non valid keywords (keywords that are not in base func)\n\n\t\t# selectively get the kwargs according to the user\n\t\tif self.ignore_kw == \"ALL\":\n\t\t\tkw = {}\n\t\telif type(self.ignore_kw) == list:\n\t\t\tkw = {k:v for k,v in kw.items() if not k in self.ignore_kw}\n\t\telse:\n\t\t\traise Exception(\"self.ignore_kw must be list or ALL, but is:\", self.ignore_kw)\n\t\t\n\n\t\tassert self.check(ar, is_check_verbose=True), \"Checks have failed on given parameters %s for %s\"%(ar, self.__class__.__name__)\n\t\treturn self.base_func(*self.additional_check(ar), **kw)", "def _parse_functions(self, locals: dict):\n functions_dict = dict(filter(self._isfunction, locals.items()))\n functions = []\n if not self.args:\n functions.append(next(iter(functions_dict.values())))\n else:\n for i in range(len(self.args)):\n if functions_dict.get(self.args[0]):\n functions.append(functions_dict[self.args.pop(0)])\n else:\n if not functions:\n msg = f'ezmake command args: {self.args} did not ' + \\\n 'match any functions defined in Makefile.py: %s' %\\\n list(functions_dict.keys())\n raise TypeError(msg)\n break\n self.functions = functions", "def test_single_keyword_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func(arg1='hello'), 'foo')", "def __init__(self, recipes, decode_param_from=None, custom_handlers=None):\n\n if not recipes or not isinstance(recipes, list):\n logger.error('Unsupported _functions type! Something went wrong!')\n\n # Get required functions\n self.functions = [] # {func: func_obj, func_params: (params), fields=[]}\n\n for _func in recipes:\n # Check the syntax of provided function\n\n # Case: handler_name\n if match(r'(^[a-zA-Z0-9_-]{3,20}$)', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ<field>AbC\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<field>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ<field>AbC<rfield>YzX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<field>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>XyZ\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<field>AbC\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<field>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<param>AbC<rfield>XXX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<param>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<field>AbC<rfield>XXX\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<field>.{1,512}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: handler_name<rfield>ABCD\n elif match(r'(^[a-zA-Z0-9_-]{3,20}<rfield>.{1,512})$', _func, IGNORECASE):\n pass\n\n # Case: Syntax error\n else:\n logger.error('Syntax Error. Function: %s' % _func)\n logger.error(\n 'The example syntax of registry handler function shuld be: \\n\"-rh function_name<param>param1<param>param2<field>field_name_to_process<rfield>output_field_name\" (<param>,<field> and <rfield> are optional and depends on given function)\\nUse -prh for more details')\n exit(ERR_PROVIDER_INCORRECT_FUNCTION_SYNTAX)\n\n\n _func_name = ''\n _func_params = None\n _func_fields = None\n _func_output_fields = None\n\n # Get function, parameter(s) and fields (if specified)\n # Get _func_name\n _func_name, separator, _ = _func.partition('<')\n _func_name = _func_name.lower()\n\n if '<rfield>' in _func:\n _func, _, _func_output_fields = _func.partition('<rfield>')\n _func_output_fields = _func_output_fields.split(';')\n map(str.strip, _func_output_fields)\n\n if '<field>' in _func:\n _func, _, _func_fields = _func.partition('<field>')\n _func_fields = _func_fields.split(';')\n map(str.strip, _func_fields)\n\n if '<param>' in _func:\n _func, _, _func_params = _func.partition('<param>')\n _func_params = _func_params.split(';')\n map(str.strip, _func_params)\n\n if decode_param_from:\n if decode_param_from.lower() == 'base64':\n _func_params = list(map(base64.b64decode, _func_params))\n _func_params = list(map(bytes.decode, _func_params))\n else:\n logger.error('Unable to create a registry handler: \"%s\"\\n'\n 'Function: \"%s\"\\n'\n 'Unsupported param encoding: \"%s\"' %\n (_func_name, _func, decode_param_from))\n return None\n\n _func_params = tuple(_func_params)\n\n try:\n if not custom_handlers:\n func_class = getattr(handlers, _func_name)\n else:\n try:\n func_class = getattr(handlers, _func_name)\n except AttributeError:\n func_class = getattr(custom_handlers, _func_name)\n\n func_obj = getattr(func_class, _func_name)\n\n # if _func_output_fields is None:\n # _func_output_fields = _func_fields\n # pass\n\n self.functions.append({'func': func_obj, 'func_params': _func_params, 'func_fields': _func_fields,\n 'result_fields': _func_output_fields})\n\n except Exception as msg:\n logger.warning('Unable to get function object for: %s. Error: %s' % (_func_name, msg))\n logger.error('Unsupported Registry Handler: \"%s\"' % _func_name)\n\n self.default_fields = [registry_provider.registry_value.attributes.value_content]", "def this_is_a_keyword(arg1):\r\n print(arg1)\r\n return 'Whatever'", "def get_func(name, argtypes=None, restype=c_int, lib=libDE):\n logger.debug(\"Getting NewWordFinder API function: 'name': '{}', 'argtypes': '{}',\"\n \" 'restype': '{}'.\".format(name, argtypes, restype))\n func = getattr(lib, name)\n if argtypes is not None:\n func.argtypes = argtypes\n if restype is not c_int:\n func.restype = restype\n logger.debug(\"NewWordFinder API function '{}' retrieved.\".format(name))\n return func", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)", "def get_func_lookup():\n return {\n \"randomstr\": randomstr,\n \"random\": random,\n \"sha256\": sha256,\n \"ed25519\": ed25519_private_key,\n \"rsa\": rsa_private_key,\n \"rsapublic\": rsa_public_key,\n \"publickey\": public_key,\n \"reveal\": reveal,\n \"loweralphanum\": loweralphanum,\n \"basicauth\": basicauth,\n }", "def detect_user_function(argtext):\r\n arglist=argtext.split(Comma) if Comma in argtext else [argtext] \r\n if is_primed(arglist[0]): # a user-defined function\r\n for x in arglist:\r\n if not is_primed(x):\r\n print(\"\\n*** Error in user-defined function: all args must be prime ***\")\r\n print(name)\r\n raise ReferenceError\r\n else: return False # a dict \r\n return True", "def validate_func(func, source=None):\n\n if type(func) == str:\n full_name = func\n short_name = full_name.split('_')[0]\n source = source or globals()\n try:\n func = source[short_name]\n except KeyError:\n raise KeyError('Unrecognized function name \"%s\"' % full_name)\n\n if type(func).__name__ != 'function':\n raise ValueError('Not a function: \"%s\"' % full_name)\n\n if 'SIGNATURE' not in func.__dict__:\n raise ValueError('Not a cspyce function: \"%s\"' % func.__name__)\n\n return func", "def run_keyword_and_ignore_keyword_definitions(name, *args):\n try:\n status, _ = BuiltIn().run_keyword_and_ignore_error(name, *args)\n except HandlerExecutionFailed:\n LOGGER.log_message(Message(\"Keyword {} not implemented\", \"ERROR\"))\n return \"FAIL\", \"\"\n return status, _", "def ask(self):\n keyword = input(self.foretext)\n\n self.input_asked = True\n\n if keyword in self.keywords:\n self.retrieved_input = keyword\n if keyword in self.functions:\n function, args, kwargs = self.functions[keyword]\n return function(*args, **kwargs)\n\n else:\n return keyword\n else:\n return self.fallback(keyword)", "def _is_function(self, words):\n if words[0] == 'function':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_FUNCTION command.\".format(self._file_line))\n return True\n else:\n return False", "def allkeywords(f):\n @_fntools.wraps(f)\n def wrapper(*a, **k):\n a = list(a)\n for idx, arg in enumerate(_inspect.getargspec(f).args, -_inspect.ismethod(f)): # or [0] in 2.5\n if arg in k:\n if idx < len(a):\n a.insert(idx, k.pop(arg))\n else:\n break\n return f(*a, **k)\n return wrapper", "def fcheck(*args, **kwargs)->None:\n pass", "def _validate_kw(obj, fn, trait_types, kw):\n\n actual = {}\n for name, value in kw.items():\n trait_type = trait_types.get(name)\n if trait_type is not None:\n value = trait_type.validate_method_argument(obj, fn, name, value)\n\n actual[name] = value\n \n return actual", "def __call__(fun_name):", "def _kwargs_check(feature_extraction, kwargs):\n # When using policy_kwargs parameter on model creation,\n # all keywords arguments must be consumed by the policy constructor except\n # the ones for the cnn_extractor network (cf nature_cnn()), where the keywords arguments\n # are not passed explicitly (using **kwargs to forward the arguments)\n # that's why there should be not kwargs left when using the mlp_extractor\n # (in that case the keywords arguments are passed explicitly)\n if feature_extraction == 'mlp' and len(kwargs) > 0:\n raise ValueError(\"Unknown keywords for policy: {}\".format(kwargs))", "def test_onearg_and_keyword(self):\n varargs = (12,)\n kwargs = {'default' : 13}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def _validate_builtin(_):\n pass", "def keyword_search(keywords):\n try:\n return itunespy.search(keywords)[0]\n except LookupError:\n return None", "def test_kw_validation_with_trait_type_instances(self):\n\n @function(x=Int(10), y=Int(20), _returns_=Int(30))\n def add(**kw):\n return kw['x'] + kw['y']\n\n self.assertEqual(add(x=8, y=2), 10)\n self.failUnlessRaises(TraitError, add, x=2, y='xxx')\n\n return", "def parseFunction(tokens: List[LEX_Type], last_token: LEX_Type, ast_main: AST_Program) -> (AST_Function, List[LEX_Type], LEX_Type, AST_Program):\n if last_token.value == \"recipe\":\n if tokens[0].type == \"Identifier\":\n ast_main.Functions[tokens[0].value] = AST_Function()\n func: AST_Function\n rest_tokens: List[LEX_Type]\n final_token: LEX_Type\n func, rest_tokens, final_token, ast_main = parseFunction(tokens[1:], tokens[0], ast_main)\n func.name = tokens[0].value\n if func.CodeSequence is None:\n print(\"no code in function, expected code after Bake:\")\n exit()\n return func, rest_tokens, final_token, ast_main\n else:\n throw_error_with_token(\"MissingIdentifier\", tokens[0])\n elif last_token.type == \"Identifier\":\n if tokens[0].value == \"->\":\n if tokens[1].type == \"Type\":\n func: AST_Function\n rest_tokens: List[LEX_Type]\n final_token: LEX_Type\n func, rest_tokens, final_token, ast_main = parseFunction(tokens[2:], tokens[1], ast_main)\n func.ReturnType = tokens[1].value\n return func, rest_tokens, final_token, ast_main\n else:\n throw_error_with_token(\"ExpectedReturnType\", tokens[1])\n else:\n throw_error_with_token(\"ExpectedArrow\", tokens[0])\n elif tokens[0].type == \"Keyword\":\n if tokens[0].value == \"prepare\":\n if last_token.type == \"LineEnd\":\n if tokens[1].value == \":\":\n arguments: List[AST_FunctionArgument]\n rest_tokens: List[LEX_Type]\n arguments, rest_tokens = getFunctionArguments(tokens[2:], last_token)\n if len(arguments) == 0:\n throw_error(\"ExpectedAfter\", tokens[0].file, tokens[0].line, \"FunctionArguments\", tokens[0].value)\n func, rest_tokens, final_token, ast_main = parseFunction(rest_tokens[1:], rest_tokens[0], ast_main)\n func.argumentList = arguments\n return func, rest_tokens, final_token, ast_main\n else:\n throw_error(\"ExpectedAfter\", tokens[0].file, tokens[0].line, \":\", tokens[0].value)\n else:\n throw_error(\"ExpectedBefore\", tokens[0].file, tokens[0].line, \"LineEnd\", tokens[0].value)\n elif tokens[0].value == \"bake\":\n if last_token.type == \"LineEnd\":\n if tokens[1].value == \":\":\n func: AST_Function = AST_Function()\n code: List[AST_Node]\n rest_tokens: List[LEX_Type]\n code, rest_tokens = createCodeBlock(tokens[2:], tokens[1], ast_main)\n func.CodeSequence = code\n if len(code) == 0:\n throw_error(\"CodeBlockEmpty\", tokens[1].file, tokens[1].line, tokens[1].value)\n return func, rest_tokens[1:], rest_tokens[0], ast_main\n else:\n throw_error(\"ExpectedAfter\", tokens[0].file, tokens[0].line, \":\", tokens[0].value)\n else:\n throw_error(\"ExpectedBefore\", tokens[0].file, tokens[0].line, \"LineEnd\", tokens[0].value)\n else:\n return parseFunction(tokens[1:], tokens[0], ast_main)", "def recognize_function_definition(self, a, text):\n logging.debug(\"in function recognize\")\n self.produce(KEYWORD, text)\n logging.debug(\"start def_func state\")\n self.begin('def_func')", "def get_function(name, store=None, error=True, **kwargs):\n fun = SqlFunction._definitions.get(SqlFunction.normalize_name(name))\n if fun is None:\n if error:\n raise KGTKException(f'undefind SQL function: {name}')\n return None\n elif isinstance(fun, str):\n # we have a forward-declaration to a defining module, import it\n # (any errors here are real and not subject to the 'error' flag):\n fun = SqlFunction.import_declared_function(name)\n # create a copy of the definition with some additional values filled in:\n fun = copy.copy(fun)\n fun.store = store\n for key, value in kwargs.items():\n setattr(fun, key, value)\n return fun", "def find_func(self, params):\n match = self.funcs.get(params, self.funcs[(otherwise,)])\n return match", "def parse(cls, data):\r\n try:\r\n # Parse the function here\r\n result = cls.FuncDefn.parseString(data)\r\n result_list = result.asList()\r\n args = result_list[3:result_list.index(')')]\r\n # Return single line or multi-line function body\r\n fn_body = re.sub(r'[^\\{]+\\{', '', data, count=1)\r\n parts = fn_body.strip().split('\\n')\r\n fn_body = '\\n'.join(parts[0:-1])\r\n return cls.GroovyFunction(result[1], args, fn_body, data)\r\n except Exception, ex:\r\n return {}" ]
[ "0.5854664", "0.56806916", "0.5641", "0.5623809", "0.55649495", "0.5476309", "0.54629546", "0.5438626", "0.541577", "0.5365577", "0.5363465", "0.5266843", "0.5264946", "0.5263023", "0.52230936", "0.5172818", "0.51589596", "0.51542425", "0.5113567", "0.5110534", "0.51024556", "0.5085226", "0.50839895", "0.50208515", "0.50052786", "0.49969798", "0.49941742", "0.49939832", "0.49853668", "0.49846557" ]
0.6410414
0
Generates constant array of size `count`
def _gen_constant(self, count, **kwargs): return np.full(count, kwargs['constant'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_array( n ):", "def default_array(self, count: int) -> List[Any]:\n return [self.default() for _ in range(count)]", "def create_array(n, bound):\n array = [np.random.randint(0, bound) for x in range(n)]\n return array", "def init_naive_array(n):\n result = list()\n for i in range(1, n+1):\n result.append(i)\n return result", "def sample(self, count):\n batch = deepcopy(random.sample(self.buffer, count))\n batch = [np.array(arr) for arr in zip(*batch)]\n\n return batch", "def counts_to_vector(counts):\n\n return np.hstack([np.repeat(idx, count) for idx, count in enumerate(counts)])", "def gen_multi_v0(self, namespace, count):\n conn = self.pick_conn()\n retries = self.max_retries\n url = \"/v0/gen?ns=%s&count=%d\" % (namespace, count)\n while 1:\n try:\n r = conn.request(\"GET\", url)\n content = r.data\n assert r.status == 200, \"http status(%d) != 200 : %s\" % (\n r.status, content\n )\n return [int(i) for i in content.split(\",\")]\n except Exception as e:\n logger.warn(\"%s %s %s\", conn, url, e)\n conn = self.pick_conn(new=True)\n retries -= 1\n if retries < 0:\n raise", "def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def generate(count):\n return unpack_random_animals(generate_animals_randomly(count))", "def create_n(N):\n\n all_n = np.array([])\n max_bin_len = len(bin(2 ** N - 1)[2:]) # need this for 2 -> 010 instead of 10\n for i in range(2**N):\n all_n = np.append(all_n, bin(i)[2:].zfill(max_bin_len))\n\n return all_n", "def create_variable_array(times):\n R=np.empty(np.sum(times))\n return R", "def gen_multi(self, namespace, countspace, count):\n conn = self.pick_conn()\n retries = self.max_retries\n url = \"/gen?ns=%s&cs=%s&count=%d\" % (namespace, countspace, count)\n while 1:\n try:\n r = conn.request(\"GET\", url)\n content = r.data\n assert r.status == 200, \"http status(%d) != 200 : %s\" % (\n r.status, content\n )\n return [int(i) for i in content.split(\",\")]\n except Exception as e:\n logger.warn(\"%s %s %s\", conn, url, e)\n conn = self.pick_conn(new=True)\n retries -= 1\n if retries < 0:\n raise", "def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def expand_counts(counts):\n result = []\n for i, c in enumerate(counts):\n result.append(zeros(c, int) + i)\n return concatenate(result)", "def generate_read_list(num_files, world_size):\n return np.array_split(np.arange(num_files), world_size)", "def GenerateField(N):\n field = list(range(N * N))\n for i in range(N * N):\n field[i] = 0\n\n return field", "def _create_array(capacity):\n return (ctypes.py_object * capacity)()", "def data_gen(size, p):\n #print(np.random.get_state()[1][0])\n random_table = np.random.binomial(size = size, p = p, n = 1)\n test_array = np.zeros((size, 2), dtype = int)\n for i in range(size):\n test_array[i,0] = i\n test_array[i,1] = random_table[i]\n return test_array", "def generate_index(size=20):\n return hexlify(np.random.rand(100))[:size].decode()", "def gen_vector(size):\n solution = []\n for i in range(size):\n rand_num = uniform(-size, size)\n solution.append(rand_num)\n return np.array(solution)", "def generateSDR(n, w):\n sdr = np.zeros((n, ))\n randomOrder = np.random.permutation(np.arange(n))\n activeBits = randomOrder[:w]\n sdr[activeBits] = 1\n return sdr", "def create_K_u(n: int) -> Array:\n return Array([K(i) for i in range(n ** 2 - 1)])", "def _make_array(self, capacity):\n return (capacity * ctypes.py_object)()", "def random_subset(array, count):\n indices = np.random.permutation(len(array))[:count]\n return array[indices]", "def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]", "def _generate_constant_array(self, dtype, value):\n dtype = np.dtype(dtype)\n # here `key` is used to cache the constant array\n # has nothing to do with column name\n key = (dtype.str, value)\n if key not in self._constant_arrays:\n self._constant_arrays[key] = np.asarray(np.repeat(value, len(self)), dtype=dtype)\n self._constant_arrays[key].setflags(write=False)\n return self._constant_arrays[key]", "def createarray(m,n):\n return( np.ones((m,2,n)) )", "def generate_sparse(n, s):\n x = np.zeros(n)\n I = np.random.randint(0, n, s)\n x[I] = 1\n return x", "def generate_array_ints(n: int = 1024, max_int: int = 256, random_seed: int = None) -> TYPE_ARRAY:\n return _RNG.randint(0, max_int, n).astype(int)", "def generate_assignments(record_count: int) -> List[Dict]:\n assert record_count > 0, \"Number of assignments to generate must be greater than zero\"\n\n logging.info(f\"Generating {record_count} assignments\")\n assignments = []\n for i in range(1, record_count + 1):\n if i % 1000 == 0:\n logging.info(f\"{i} assignments...\")\n assignments.append(\n {\n \"name\": fake.catch_phrase(),\n }\n )\n return assignments" ]
[ "0.69629025", "0.6237902", "0.59859437", "0.5976607", "0.590662", "0.5860379", "0.5796555", "0.5791605", "0.57907313", "0.5790046", "0.57897764", "0.57081926", "0.56950295", "0.56916034", "0.5662179", "0.56293595", "0.5592688", "0.55898625", "0.5583231", "0.5581325", "0.55608493", "0.5524939", "0.55217165", "0.55058444", "0.54871124", "0.548313", "0.54825234", "0.5449732", "0.5383166", "0.53817344" ]
0.7421939
0
Checks parameters, creates BetaPert, returns random values
def _gen_pert(self, count, **kwargs): self._check_pert(**kwargs) pert = FairBetaPert(**kwargs) rvs = pert.random_variates(count) return rvs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df", "def generate_data(sample_size, noise_variance):\n \n # generate true beta\n A = np.array([[1]*15, [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]]).T\n B = np.array([[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1]*15]).T\n x_shape = A.shape[0]\n y_shape = B.shape[0]\n \n X_train = np.random.randn(sample_size, x_shape, y_shape) \n X_train_vec = np.reshape(X_train, (sample_size, x_shape*y_shape))\n \n cross_beta = A @ B.T\n vec_cross_beta = np.reshape(cross_beta, (x_shape*y_shape, 1))\n cross_norm = np.linalg.norm(cross_beta, 'fro')\n cross_beta = cross_beta / cross_norm\n Y_soft = np.zeros((sample_size, 1))\n \n for i in range(sample_size):\n epsilon = noise_variance * np.random.randn(1, 1)\n x_i = X_train_vec[i, :]\n y_i = (x_i @ vec_cross_beta) + epsilon\n Y_soft[i, :] = y_i\n \n Y_hard = np.sign(Y_soft)\n \n return cross_beta, X_train, Y_hard, Y_soft", "def _beta_choice ( time_total, CurrentTime, dist, a = 0, b = 0 ):\n \n # compute the parameters b and a\n if not b: b = ( time_total - CurrentTime ) / ( time_total )\n if not a: a = ( 1 - b )\n\n if b == 0 : b = 0.01\n \n rv = beta(a, b)\n \n # get a random number\n variate = beta.rvs( a, b )\n \n # get the key of bin in which the random variate falls\n for key in dist:\n if dist[key][0] <= variate < dist[key][1]:\n return key", "def ts_rewards_sample(self):\n exp_rewards_list = [np.random.beta(a, b) for a, b in zip(self.alpha_params, self.beta_params)]\n return np.array(exp_rewards_list)", "def test_Bernoulli_NB_estimators():", "def randomParam(trajA):\n lenCBFs = len(trajA[\"CBFCons\"][-1])\n CBFs = CBFSets[CBFlens.index(lenCBFs)]\n Balpha, Kp, Kd = trajA[\"param\"]\n Balpha = Balpha + 0.02*(0.5-np.random.random(size = Balpha.shape))\n Kp = Kp + 50*(0.5-np.random.random())\n Kd = Kd + 10*(0.5-np.random.random())\n return SimuTest(CBFs,Balpha,Kp,Kd)", "def estimate_sample_beta(sample):\n x_s, y_s = zip(*sample)\n reg.fit(x_s, y_s)\n betas = reg.weights_\n return betas", "def beta_gen_nonneg(p):\n return np.clip(np.random.normal(0, 3, p), 0, None)", "def generate_data(params, N, rng=(-7, 7)):\n hp = np.array(params)\n print(\"parameters for data generated from gp are : {0}\".format(hp))\n print(\"using a ExpSquared kernel\")\n gp = george.GP(hp[0] * kernels.ExpSquaredKernel(hp[1]))\n t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))\n\n #y = model(params, t)\n y = gp.sample(t)\n yerr = 1.e-5 #1 + 0.1 * np.random.randn(N)\n y += yerr\n\n return t, y, yerr", "def learning_proposal(self):\n\n n, s = self.X_select.shape\n\n beta_hat = self.observed_MLE\n\n perturbed_beta = beta_hat.copy()\n nidx = np.random.choice(np.arange(s), min(3, s), replace=False)\n for idx in nidx:\n scale = np.random.choice(self.scales, 1)\n perturbed_beta[idx] += (scale * np.random.standard_normal() *\n np.sqrt(self._beta_cov[idx, idx]))\n \n linpred = self.X_select.dot(perturbed_beta)\n prob = normal_dbn.cdf(linpred)\n perturbed_Y = np.random.binomial(1, prob)\n\n perturbed_MLE = probit_MLE(self.X, perturbed_Y, self.observed_outcome)[0]\n return perturbed_MLE, perturbed_Y", "def sample_bernoulli(params):\n assert False, 'tfp not available on cluster gpu yet'\n \"\"\"\n shape = tf.shape(params)\n bernoulli_dist = tfp.distributions.Bernoulli(logits=params, dtype=tf.float32)\n return bernoulli_dist.sample()\n \"\"\"", "def _tstat_beta(self):\n return _handle_ab(self._tstat_all, self.use_const)[1]", "def _pvalue_beta(self):\n return _handle_ab(self._pvalues_all, self.use_const)[1]", "def diffy_hellman(field, a_value, b_value, point):\n a_comb, b_comb = int(), int()\n while a_comb == b_comb:\n a_comb = randint(1, sqrt(field) // 2)\n b_comb = randint(1, sqrt(field) // 2)\n print(\"Next factors have been generated:\")\n print(\"alhpha: \", a_comb)\n print(\"beta: \", b_comb)\n try:\n a_point = multiply_point(point, a_comb, field, a_value, b_value)\n b_point = multiply_point(point, b_comb, field, a_value, b_value)\n a_secret = multiply_point(b_point, a_comb, field, a_value, b_value)\n b_secret = multiply_point(a_point, b_comb, field, a_value, b_value)\n except ValueError:\n print(\"Got a point an eternity... Please, repeat DF-algorythm\")\n return\n if a_secret != b_secret:\n print(\"Something has terribly gone wrong...\")\n return\n else:\n print(\"Common secret key has been succesfully generated\")\n return a_secret", "def bert_module_fn(is_training):\n\n input_ids = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_ids\")\n input_mask = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_mask\")\n token_type = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"segment_ids\")\n\n config = modeling.BertConfig.from_json_file(config_path)\n model = modeling.BertModel(config=config, is_training=is_training,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type)\n \n seq_output = model.all_encoder_layers[seq_layer]\n tok_output = model.all_encoder_layers[tok_layer]\n pool_output = model.get_pooled_output()\n\n config_file = tf.constant(value=config_path, dtype=tf.string, name=\"config_file\")\n vocab_file = tf.constant(value=vocab_path, dtype=tf.string, name=\"vocab_file\")\n lower_case = tf.constant(do_lower_case)\n\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, vocab_file)\n \n input_map = {\"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": token_type}\n \n output_map = {\"pooled_output\": pool_output,\n \"sequence_output\": seq_output,\n \"token_output\": tok_output}\n\n output_info_map = {\"vocab_file\": vocab_file,\n \"do_lower_case\": lower_case}\n \n hub.add_signature(name=\"tokens\", inputs=input_map, outputs=output_map)\n hub.add_signature(name=\"tokenization_info\", inputs={}, outputs=output_info_map)", "def beta_distribution(mu, sigma=10 ** -5):\n alpha = mu * mu * ((1 - mu) / (sigma * sigma) - 1 / mu)\n beta = alpha * (1 / mu - 1)\n\n return np.random.beta(alpha, beta)", "def mod_pert_random(low, likely, high, confidence=4, samples=30):\n # Check minimum & maximum confidence levels to allow:\n confidence = min(8, confidence)\n confidence = max(2, confidence)\n\n mean = (low + confidence * likely + high) / (confidence + 2)\n\n a = (mean - low) / (high - low) * (confidence + 2)\n b = ((confidence + 1) * high - low - confidence * likely) / (high - low)\n\n beta = np.random.beta(a, b, samples)\n beta = beta * (high - low) + low\n return beta", "def test_bayes_factor_a(self):\n model_1 = 2\n model_2 = 4\n expected_result = -4\n actual_result = utils.bayes_factor(model_1, model_2)\n assert_almost_equal(actual_result, expected_result)", "def get_elbo(votes,\n bill_indices,\n senator_indices,\n ideal_point_distribution,\n polarity_distribution,\n popularity_distribution,\n dataset_size,\n num_samples):\n ideal_point_samples = ideal_point_distribution.sample(num_samples)\n polarity_samples = polarity_distribution.sample(num_samples)\n popularity_samples = popularity_distribution.sample(num_samples)\n \n ideal_point_log_prior = get_log_prior(ideal_point_samples)\n polarity_log_prior = get_log_prior(polarity_samples)\n popularity_log_prior = get_log_prior(popularity_samples)\n log_prior = ideal_point_log_prior + polarity_log_prior + popularity_log_prior\n\n ideal_point_entropy = get_entropy(ideal_point_distribution, \n ideal_point_samples)\n polarity_entropy = get_entropy(polarity_distribution, polarity_samples)\n popularity_entropy = get_entropy(popularity_distribution, popularity_samples)\n entropy = ideal_point_entropy + polarity_entropy + popularity_entropy\n\n selected_ideal_points = tf.gather(ideal_point_samples, \n senator_indices, \n axis=1)\n selected_polarities = tf.gather(polarity_samples, bill_indices, axis=1) \n selected_popularities = tf.gather(popularity_samples, bill_indices, axis=1) \n vote_logits = (selected_ideal_points * \n selected_polarities + \n selected_popularities)\n\n vote_distribution = tfp.distributions.Bernoulli(logits=vote_logits)\n vote_log_likelihood = vote_distribution.log_prob(votes)\n vote_log_likelihood = tf.reduce_sum(vote_log_likelihood, axis=1)\n \n elbo = log_prior + vote_log_likelihood + entropy\n elbo = tf.reduce_mean(elbo)\n\n tf.summary.scalar(\"elbo/elbo\", elbo)\n tf.summary.scalar(\"elbo/log_prior\", tf.reduce_mean(log_prior))\n tf.summary.scalar(\"elbo/vote_log_likelihood\", \n tf.reduce_mean(vote_log_likelihood))\n tf.summary.scalar(\"elbo/entropy\", tf.reduce_mean(entropy))\n return elbo", "def test_bart(self):\n valid, _ = testing_utils.eval_model(\n dict(task='integration_tests', model='bart', num_examples=10)\n )\n self.assertAlmostEqual(valid['ppl'].value(), 1.0, places=1)", "def random_valid(self):\n if random_exp > 0:\n args.exp = random.sample(exp_choices, random_exp)\n elif random_exp < 0:\n args.exp = random.sample(exp_choices, random.randint(0, -random_exp))\n btypes_str = 'T'*8+'S'*4+'U'*(5 - len(args.exp))+'P'*3+'G'*2+'F'*2+'A'*3+'1'*3+'2'*2+'3'*1+'4'*1+'5'*1+'O'*8+'M'*(-args.monuments if args.monuments < 0 else 0)\n btypes_min_str = 'T'*0+'S'*0+'U'*len(args.exp)+'P'*0+'G'*0+'F'*0+'A'*0+'1'*0+'2'*0+'3'*0+'4'*0+'5'*0+'O'*0+'M'*(args.monuments if args.monuments > 0 else 0)\n len_min = len(btypes_min_str)\n while 1:\n ## TSU_PG_FA_12345_OM\n ## tot845_32_23_32111_81\n ## min00E_00_00_00000_00\n bpos = list(range(20))\n self.b = ['_'] * 20\n self.f = [1] * 20\n cnt_b = 0\n btypes_min = list(btypes_min_str)\n random.shuffle(btypes_min)\n while cnt_b < len_min:\n s_bpos = random.choice(bpos)\n c_bding = self.b[s_bpos]\n if c_bding == 'T' or c_bding == 'O':\n if self.f[s_bpos] < 5 and c_bding in btypes_min:\n btypes_min.remove(c_bding)\n cnt_b += 1\n self.f[s_bpos] += 1\n else:\n bpos.remove(s_bpos)\n else:\n s_bding = btypes_min.pop(-1)\n cnt_b += 1\n self.b[s_bpos] = s_bding\n if s_bding != 'T' and s_bding != 'O':\n bpos.remove(s_bpos)\n btypes = list(btypes_str)\n random.shuffle(btypes)\n while cnt_b < 20:\n s_bpos = random.choice(bpos)\n c_bding = self.b[s_bpos]\n if c_bding == 'T' or c_bding == 'O':\n if self.f[s_bpos] < 5 and c_bding in btypes:\n btypes.remove(c_bding)\n cnt_b += 1\n self.f[s_bpos] += 1\n else:\n bpos.remove(s_bpos)\n else:\n s_bding = btypes.pop(-1)\n cnt_b += 1\n self.b[s_bpos] = s_bding\n if s_bding != 'T' and s_bding != 'O':\n bpos.remove(s_bpos)\n self.calc_resources()\n if self.popula_used <= self.popula and self.energy_used <= self.energy:\n break", "def test_prop_beta(self):\n # reproducible arbitrariness\n np.random.seed(1321)\n\n self.rule.alpha = 0\n self.rule.beta = 0.5\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n factor = 1.5\n tmax = 7*self.dt\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(tmax)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.rule.beta *= factor\n sim.run(tmax)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, factor*change1))", "def test_prop_beta(self):\n # reproducible arbitrariness\n np.random.seed(1321)\n\n self.rule.alpha = 0\n self.rule.beta = 0.5\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n factor = 1.5\n tmax = 7*self.dt\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(tmax)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.rule.beta *= factor\n sim.run(tmax)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, factor*change1))", "def jarque_bera(self,alpha=0.05):\n self._finalize()\n JB = self.vcount/6*(self.vskewness**2 + 1/4*((self.vkurtosis-3)**2))\n if chi2 is None:\n p = \"scipy missing\"\n else:\n p = 1 - chi2.cdf(JB,2)\n return JB,p", "def test_bayes_factor_b(self):\n model_1 = ufloat(2, 1)\n model_2 = ufloat(4, 1)\n expected_result = ufloat(-4, 2.82842712474619032)\n actual_result = utils.bayes_factor(model_1, model_2)\n assert_almost_equal(actual_result.n, expected_result.n)\n assert_almost_equal(actual_result.s, expected_result.s)", "def test_generategaussians(self):\n ret1 = {\"message\": \"No Random.org api key or api version found.\", \"res\": False}\n self.assertDictEqual(random_org.generateGaussians(), ret1)\n\n ret2 = {\"message\": \"Required argument, number is missing.\", \"res\": False}\n self.assertDictEqual(\n random_org.generateGaussians(api_key=\"peW\", api_version=\"1\"), ret2\n )\n\n ret3 = {\n \"message\": \"Number of decimal fractions must be between 1 and 10000\",\n \"res\": False,\n }\n self.assertDictEqual(\n random_org.generateGaussians(\n api_key=\"peW\",\n api_version=\"1\",\n number=\"5\",\n mean=\"0.0\",\n standardDeviation=\"1.0\",\n significantDigits=\"8\",\n ),\n ret3,\n )\n\n ret4 = {\n \"message\": \"The distribution's mean must be between -1000000 and 1000000\",\n \"res\": False,\n }\n self.assertDictEqual(\n random_org.generateGaussians(\n api_key=\"peW\",\n api_version=\"1\",\n number=5,\n mean=\"0.0\",\n standardDeviation=\"1.0\",\n significantDigits=\"8\",\n ),\n ret4,\n )\n\n ret5 = {\n \"message\": (\n \"The distribution's standard deviation must be\"\n \" between -1000000 and 1000000\"\n ),\n \"res\": False,\n }\n self.assertDictEqual(\n random_org.generateGaussians(\n api_key=\"peW\",\n api_version=\"1\",\n number=5,\n mean=0.0,\n standardDeviation=\"1.0\",\n significantDigits=\"8\",\n ),\n ret5,\n )\n\n ret6 = {\n \"message\": \"The number of significant digits must be between 2 and 20\",\n \"res\": False,\n }\n self.assertDictEqual(\n random_org.generateGaussians(\n api_key=\"peW\",\n api_version=\"1\",\n number=5,\n mean=0.0,\n standardDeviation=1.0,\n significantDigits=\"8\",\n ),\n ret6,\n )\n\n ret7 = {\"message\": \"Parameter 'apiKey' is malformed\", \"res\": False}\n self.assertDictEqual(\n random_org.generateGaussians(\n api_key=\"peW\",\n api_version=\"1\",\n number=5,\n mean=0.0,\n standardDeviation=1.0,\n significantDigits=8,\n ),\n ret7,\n )", "def batch_sample_beta(self):\n c_contexts = self.context[self.iter]\n\n old_beta = self.beta[self.iter]\n new_beta = -1\n proposal_sd = .1\n while new_beta <= 0:\n new_beta = random.gauss(mu = old_beta, sigma = proposal_sd)\n \n # set up to calculate the g densities for both the old and new beta values\n log_g_old = -1 * old_beta # which is np.log(np.exp(-1 * old_beta))\n log_g_new = -1 * new_beta # similar as above\n\n # derive contexts from breakpoints arrangement\n context_dict = self.make_context_dict(c_contexts)\n for context in context_dict.keys():\n log_g_old += math.lgamma(self.support_size * old_beta) \\\n - math.lgamma(self.support_size * old_beta + len(context_dict[context]))\n log_g_new += math.lgamma(self.support_size * new_beta) \\\n - math.lgamma(self.support_size * new_beta + len(context_dict[context]))\n \n for y in self.support:\n log_g_old += math.lgamma(context_dict[context].count(y) + old_beta) - math.lgamma(old_beta)\n log_g_new += math.lgamma(context_dict[context].count(y) + new_beta) - math.lgamma(new_beta)\n\n # compute candidate densities q for old and new beta\n # since the proposal distribution is normal this step is not needed\n log_q_old = 0#np.log(dnorm(old_beta, loc = new_beta, scale = proposal_sd))\n log_q_new = 0#np.log(dnorm(new_beta, loc = old_beta, scale = proposal_sd)) \n \n # compute the moving probability\n moving_prob = min(1, np.exp((log_g_new + log_q_old) - (log_g_old + log_q_new)))\n \n u = random.uniform(0,1)\n if u < moving_prob: self.beta[self.iter] = new_beta\n return self.beta[self.iter]", "def betabinom_artifact_model_probability(data,\n prior_p=betabinom_p_prior[0],\n prior_beta=betabinom_beta_prior[0],\n mutation_object=True):\n\n if mutation_object is True:\n trajectories = data.data\n else:\n trajectories = data\n\n # initialise list of samples for p integration\n int_p = []\n # integral over betabinom p parameter\n for p_sample in prior_p[0, :]:\n # initialise list of samples for beta integration\n int_beta = []\n # integrate over beta parameter\n for beta_sample in prior_beta[0, :]:\n # For each combination of p and beta parameters compute\n # lieklihood of observing a given time-series conditional\n # on initial time-point.\n alpha_sample = beta_sample*p_sample/(1-p_sample)\n # compute likelihood for each individual omiting first time point\n ind_likelihoods = []\n for ind_traj in trajectories:\n likelihood = betabinom.pmf(k=ind_traj[1:].AO,\n n=ind_traj[1:].DP,\n a=alpha_sample,\n b=beta_sample)\n ind_likelihoods.append(np.product(likelihood))\n # for each beta append the total likelihood (product individuals).\n int_beta.append(np.product(ind_likelihoods))\n # For each p, compute the likelihood marginalised over beta\n int_p.append(\n np.trapz(x=prior_beta[0, :],\n y=int_beta*prior_beta[1, :]))\n \n # marginalise likelihood over p\n mutation_prob = np.trapz(x=prior_p[0, :],\n y=int_p*prior_p[1, :])\n\n if mutation_object is True:\n # return updated model_comparison object \n data.betabinom_artifact_prob = mutation_prob\n return data\n else:\n # return marginalised likelihood.\n return mutation_prob", "def sample(self, beta):\n experiences = random.sample(self.memory, k=self.batch_size)\n\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)\n \n # return 2 extra values as dummy replacements for the indices and weights returned by NaivePrioritizedBuffer\n indices = np.ones(len(actions))\n weights = torch.from_numpy(np.ones(len(actions))).float().to(device)\n\n return (states, actions, rewards, next_states, dones, indices, weights)", "def beta(G, responses = [], covariates = [], conditionants = []):\n variables = covariates + conditionants \n \n means = mean(G, responses, variables)\n \n def collect(index):\n return sp.collect(expr = sp.expand(means[index]), \n syms = variables)\n \n collections = [collect(index) for index in range(len(means))]\n \n betas = sp.Matrix([collection.coeff(variables) \n for collection, variables \n in product(collections, variables)])\n \n betas.reshape(len(collections), len(variables)).T\n \n indices = variable_indices(G, values = covariates, \n restrictions = variables, \n sort = True)\n \n return betas[indices, :]" ]
[ "0.5960632", "0.5894815", "0.58648175", "0.58480936", "0.5828943", "0.57892954", "0.57710135", "0.5668323", "0.56302905", "0.5577978", "0.55545354", "0.5546763", "0.5536753", "0.55121404", "0.55006164", "0.5491941", "0.5485144", "0.54799646", "0.54481506", "0.54462004", "0.54321235", "0.5420652", "0.5420652", "0.54107434", "0.53948045", "0.5388472", "0.5388318", "0.53874195", "0.5384054", "0.53795713" ]
0.6816109
0
Uses pandas to load an edgelist file and returns it as a list of tuples with pairs of connected nodes.
def load_edgl(fname): # Reads edges df = pd.read_csv(fname, sep=" ", header=None, usecols=[0, 1]) # Convert to list of tuples return list(df.itertuples(index=False, name=None))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')", "def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')", "def read_graph(filename):\n return nx.read_edgelist(filename, create_using=nx.DiGraph(), nodetype=str)", "def _read_data(filename):\n file = open(filename, \"r\")\n timestamps = []\n edges = []\n for line in file:\n # source target weight timestamp\n if line.startswith(\"%\"):\n continue\n spl = line.split()\n if len(spl) == 4:\n # store that stuff in triples (source, target, weight, timestamp)\n edges.append((int(spl[0]), int(spl[1]), int(spl[2]), int(spl[3])))\n timestamps.append(int(spl[3]))\n return edges, sorted(timestamps)", "def ParseGraph(filename):\n vertices = []\n edges = set([])\n\n for l in open(filename):\n fields = [int(f) for f in l.split()]\n vertex = fields.pop(0)\n incident = [tuple(sorted([vertex, f])) for f in fields]\n vertices.append(vertex)\n edges.update(incident)\n\n return vertices, list(edges)", "def get_label_list(connection_fn):\n label_list = []\n with open(connection_fn, 'r') as adj_file:\n for line in adj_file:\n # Get adjacencies\n node, edges = line.strip().split(':')\n node = node.upper()\n edges = edges.upper()\n label_list.append(node)\n return label_list", "def get_edge_list(self):\n return [(edge.value, edge.node_from.value, edge.node_to.value) for edge in self.edges]", "def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph", "def read_graph(filename):\n\n print(\"\\n\\n========== Loading graph: \" + filename + '==================')\n edges = {}\n\n inFile = open(filename)\n for line in inFile:\n roadInfo = line.split()\n\n # Skip blank lines, read in contents from non-empty lines.\n if (len(roadInfo) > 0):\n srcCity = roadInfo[0]\n destCity = roadInfo[1]\n\n if srcCity in edges:\n edges[srcCity] = edges[srcCity] + [destCity]\n else:\n edges[srcCity] = [destCity]\n\n if destCity in edges:\n edges[destCity] = edges[destCity] + [srcCity]\n else:\n edges[destCity] = [srcCity]\n\n print(\" done.\\n\")\n return edges", "def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list", "def read (path):\n\n with open(path) as f:\n reader = csv.DictReader(f)\n edges, nodes = [], {}\n for row in reader:\n edges.append((row[\"NODE1\"], row[\"NODE2\"]))\n nodes[row[\"NODE1\"]] = [eval(row[\"LONG1\"]), eval(row[\"LAT1\"])]\n nodes[row[\"NODE2\"]] = [eval(row[\"LONG2\"]), eval(row[\"LAT2\"])]\n\n return nodes, edges", "def edge_list_build(input_path, output_path):\n\n start_time = time.time()\n\n df = pd.read_csv(input_path, sep='\\t', header=None)\n\n for col in range(1, len(df.columns)):\n df.iloc[:, col] = df.iloc[:, col-1] + '_' + df.iloc[:, col]\n\n n_divs = len(df.columns) - 1\n\n\n dict_node_names = {}\n\n for id, node_name in enumerate(np.unique(df.values.flatten())):\n dict_node_names[node_name] = id + 1\n\n tmp_df = pd.DataFrame.from_dict(dict_node_names, orient='index')\n tmp_df.reset_index(inplace=True)\n tmp_df.rename({'index': 'nodes', 0: 'hash'}, inplace=True, axis=1)\n\n hash_df = tmp_df['nodes'].str.split('_', n=n_divs, expand=True)\n hash_df = pd.concat([hash_df, tmp_df['hash']], axis=1)\n\n for col_name in df.columns:\n df[col_name] = df[col_name].map(dict_node_names)\n\n df['root'] = 0\n colnames = df.columns.values\n colnames = list(colnames[-1:]) + list(colnames[:-1])\n df = df[colnames]\n\n df_tuples = pd.DataFrame()\n\n for i in range(len(df.columns) - 1):\n df_tuples[i] = list(df[df.columns[i:i + 2]].itertuples(index=False, name=None))\n del df\n gc.collect()\n\n nodes_list = []\n\n for col_id in range(0, df_tuples.shape[1]):\n father_child = df_tuples.iloc[:, col_id].drop_duplicates().values\n nodes_list.extend(father_child)\n\n graph = nx.DiGraph(nodes_list)\n graph_bfs = nx.bfs_tree(graph, 0)\n \n path = output_path + '.hashmap'\n hash_df.to_csv(path, index=False, sep='\\t')\n end_time = time.time()\n print(\"Time spent creating tree from csv file:\", end_time - start_time)\n return graph_bfs", "def edge_list_df_to_igraph(edge_list_df, node_id_mapper):\n nodes = list(set(edge_list_df.from_id.values.tolist() + edge_list_df.to_id.values.tolist()))\n #node_names = list(set(edge_list_df.from_name.values.tolist() + edge_list_df.to_name.values.tolist()))\n edges = list(zip(edge_list_df.from_id, edge_list_df.to_id))\n weights = list(edge_list_df.weight.values)\n g = Graph()\n g.add_vertices(len(nodes))\n g.add_edges(edges)\n g.es['weight'] = weights\n g.vs['label'] = list(node_id_mapper.inverse_transform(np.array(range(len(g.vs)))))\n g.vs['community'] = 0 # Set original community the same for all nodes\n return g, edges", "def read_graph(args):\n dataset = pd.read_csv(args.features_path).values.tolist()\n edges = {}\n edges[\"positive_edges\"] = [edge[0:2] for edge in dataset if edge[2] == 1]\n edges[\"negative_edges\"] = [edge[0:2] for edge in dataset if edge[2] == -1]\n edges[\"ecount\"] = len(dataset)\n edges[\"ncount\"] = len(set([edge[0] for edge in dataset]+[edge[1] for edge in dataset]))\n return edges", "def process_edges(edges_string_list):\n edge_list = []\n for line in edges_string_list:\n pair = line.split(',')\n edge_list.append([int(pair[0]), int(pair[1]), float(pair[2])])\n return edge_list", "def read_graph_file(filename):\n nodes, edges = [], []\n with open(filename) as f1:\n numNodes = int(f1.readline())\n numEdges = int(f1.readline())\n nodes = np.zeros([numNodes,3], dtype=\"float32\")\n edges = np.zeros([numEdges,2], dtype=\"int32\")\n nodeCount = 0\n edgeCount = 0\n for line in f1:\n parts = line.split(\" \")\n if len(parts) == 4:\n # node line\n nodes[nodeCount] = (float(parts[0]), float(parts[1]), int(parts[3])) \n nodeCount += 1\n elif len(parts) == 3:\n # edge line\n edges[edgeCount] = (int(parts[0]), int(parts[1])) \n edgeCount += 1\n return nodes, edges", "def get_edge_ids(self):\n node_ids = self.node_ids\n return [(node_ids[0], node_ids[1])]", "def build_graph(filepath):\n graph = defaultdict(list)\n with open(filepath, 'r') as file:\n for edge in file:\n head, tail = edge.split()\n graph[head].append(tail)\n return graph", "def edges(self, node):\n nID = self.n2ID[node]\n return [(self.ID2n[n1ID], self.ID2n[n2ID]) for (n1ID, n2ID) in self.G.edges(nID)]", "def read_dot_file(dot_file_path):\n nodes = []\n edges = []\n with open(dot_file_path) as f:\n in_lines = f.readlines()\n for line in in_lines:\n # ignore arrow attributes\n line = line.split(sep=\"[\")[0]\n if \"->\" in line:\n split_list = line.split(sep=\"->\")\n # print(\"ffgg\", split_list)\n pa = split_list[0].strip()\n if pa not in nodes:\n nodes.append(pa)\n ch_list = split_list[1].split(\",\")\n ch_list = [x.strip().strip(\";\").strip() for x in ch_list]\n # print(\"ffgg\", pa)\n # print(\"ffgg\", ch_list)\n for ch in ch_list:\n edges.append((pa, ch))\n if ch not in nodes:\n nodes.append(ch)\n\n return nodes, edges", "def read_graph_g2o(filename):\n Edge = namedtuple(\n 'Edge', ['Type', 'fromNode', 'toNode', 'measurement', 'information'])\n edges = []\n nodes = {}\n with open(filename, 'r') as file:\n for line in file:\n data = line.split()\n\n if data[0] == 'VERTEX_SE2':\n nodeId = int(data[1])\n pose = np.array(data[2:5], dtype=np.float32)\n nodes[nodeId] = pose\n\n elif data[0] == 'VERTEX_XY':\n nodeId = int(data[1])\n loc = np.array(data[2:4], dtype=np.float32)\n nodes[nodeId] = loc\n\n elif data[0] == 'EDGE_SE2':\n Type = 'P'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:6], dtype=np.float32)\n uppertri = np.array(data[6:12], dtype=np.float32)\n information = np.array(\n [[uppertri[0], uppertri[1], uppertri[2]],\n [uppertri[1], uppertri[3], uppertri[4]],\n [uppertri[2], uppertri[4], uppertri[5]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n elif data[0] == 'EDGE_SE2_XY':\n Type = 'L'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:5], dtype=np.float32)\n uppertri = np.array(data[5:8], dtype=np.float32)\n information = np.array([[uppertri[0], uppertri[1]],\n [uppertri[1], uppertri[2]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n else:\n print('VERTEX/EDGE type not defined')\n\n # compute state vector and lookup table\n lut = {}\n x = []\n offset = 0\n for nodeId in nodes:\n lut.update({nodeId: offset})\n offset = offset + len(nodes[nodeId])\n x.append(nodes[nodeId])\n x = np.concatenate(x, axis=0)\n\n # collect nodes, edges and lookup in graph structure\n graph = Graph(x, nodes, edges, lut)\n print('Loaded graph with {} nodes and {} edges'.format(\n len(graph.nodes), len(graph.edges)))\n\n return graph", "def read_qrels(qrelsfile):\n qrels = pd.read_csv(qrelsfile, sep='\\t', header=None, names=['qid', 'unused', 'id', 'label'])\n positive_pairs = []\n for item in qrels.itertuples():\n query_id = str(item.qid)\n id = str(item.id)\n label = item.label\n if label > 0:\n positive_pairs.append((query_id, id))\n return positive_pairs", "def load_training_data(self, file_, slf_loop=True, symmetry_edge=True):\n logging.info('loading data from %s' % file_)\n edge_data_by_type = dict()\n all_edges = list()\n all_nodes = list()\n\n with open(file_, 'r') as reader:\n for line in reader:\n words = line.strip().split(' ')\n if words[0] not in edge_data_by_type:\n edge_data_by_type[words[0]] = []\n src, dst = words[1], words[2]\n edge_data_by_type[words[0]].append((src, dst))\n all_edges.append((src, dst))\n all_nodes.append(src)\n all_nodes.append(dst)\n\n if symmetry_edge:\n edge_data_by_type[words[0]].append((dst, src))\n all_edges.append((dst, src))\n\n all_nodes = list(set(all_nodes))\n all_edges = list(set(all_edges))\n # edge_data_by_type['Base'] = all_edges\n\n if slf_loop:\n for e_type in edge_data_by_type.keys():\n for n in all_nodes:\n edge_data_by_type[e_type].append((n, n))\n\n # remapping to index\n edges_by_type = {}\n for edge_type, edges in edge_data_by_type.items():\n res_edges = []\n for edge in edges:\n res_edges.append(\n (self.word2index[edge[0]], self.word2index[edge[1]]))\n edges_by_type[edge_type] = res_edges\n\n return edges_by_type, all_edges, all_nodes", "def load_edgelist(in_fname, out_fname, tm_size):\n graph = nx.Graph()\n \n rf = open(in_fname, \"r\")\n reader = csv.reader(rf, delimiter=\" \")\n count = 0\n t = 0\n \n for row in reader:\n src = int(row[0])\n dst = int(row[1])\n \n graph.add_edge(src, dst, label=\"yes\", add=t)\n \n count += 1\n if count % tm_size == 0:\n t += 1 # next timestamp\n \n rf.close()\n \n nx.set_node_attributes(graph, \"cyan\", \"label\")\n \n with open(out_fname, \"w\") as wf:\n data = json_graph.node_link_data(graph)\n json.dump(data, wf, indent=2)", "def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)", "def gml_to_node_edge_list(infile, outfile=None, routing=False, write_to_disk=True):\n\n if outfile is None:\n outfile = infile + '.csv'\n if os.path.exists(outfile):\n raise ValueError(\"Output file %s already exists\", outfile)\n\n # read gml\n data = itn.read_gml(infile)\n net = itn.ITNStreetNet.from_data_structure(data)\n\n g = net.g_routing if routing else net.g\n out_data = []\n\n edges_seen = set()\n for start_node in g.edge.iterkeys():\n for end_node, edges in g.edge[start_node].iteritems():\n for edge_id, attr in edges.items():\n if routing:\n if (start_node, end_node, edge_id) in edges_seen:\n continue\n else:\n if (start_node, end_node, edge_id) in edges_seen or (end_node, start_node, edge_id) in edges_seen:\n continue\n\n edges_seen.add((start_node, end_node, edge_id))\n t = (start_node, end_node, edge_id, attr['length'])\n out_data.append(t)\n\n fields = (\n 'start_node',\n 'end_node',\n 'edge_id',\n 'edge_length',\n )\n\n if write_to_disk:\n with open(outfile, 'wb') as f:\n c = csv.writer(f)\n c.writerow(fields)\n c.writerows(out_data)\n print \"Saved CSV to %s\" % outfile\n\n return out_data", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def graph_reader(path):\n graph = nx.from_edgelist(pd.read_csv(path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def read_connections(file_name, point_names):\r\n\r\n connections = []\r\n fid = open(file_name, 'r')\r\n line=fid.readline()\r\n while(line):\r\n connections.append(np.array(line.split(',')))\r\n connections[-1][0] = connections[-1][0].strip()\r\n connections[-1][1] = connections[-1][1].strip()\r\n line = fid.readline()\r\n connect = np.zeros((len(point_names), len(point_names)),dtype=bool)\r\n for i in range(len(point_names)):\r\n for j in range(len(point_names)):\r\n for k in range(len(connections)):\r\n if connections[k][0] == point_names[i] and connections[k][1] == point_names[j]:\r\n \r\n connect[i,j]=True\r\n connect[j,i]=True\r\n break\r\n \r\n return connect", "def readAdjacencyGraph(self,filename):\n try:\n for line in open(filename,'r'):\n incoming,outgoing=line.strip().split(\":\")\n no_outgoing=outgoing.split(\",\")\n self.adjacencyMetadata[incoming]=dict(zip(no_outgoing,range(len(no_outgoing))))\n if incoming not in self.adjacency.keys():\n self.adjacency[incoming]=None\n for item in no_outgoing:\n if item not in self.adjacency.keys():\n self.adjacency[item]=None\n except Exception as e:\n raise" ]
[ "0.6817114", "0.67747307", "0.65344524", "0.6419645", "0.6242016", "0.6231345", "0.6211329", "0.61715627", "0.6137391", "0.6045606", "0.6036997", "0.60356927", "0.6027112", "0.59217256", "0.5908259", "0.5896088", "0.5891709", "0.58771986", "0.5860478", "0.5830904", "0.5827634", "0.57906765", "0.5786626", "0.57490546", "0.5732307", "0.57280576", "0.5725189", "0.5709903", "0.5678762", "0.56579894" ]
0.69430107
0
Returns whether or not a user can modify settings in a LocalSite. This checks that the user is either staff with the proper permissions, or that they're listed in the 'admins' field. By default, this is checking whether the LocalSite itself can be modified, but a different permission can be passed to check for another object.
def is_mutable_by(self, user, perm='site.change_localsite'): return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_modify_permissions(self, request, obj, local_site=None, *args,\n **kwargs):\n return obj.is_mutable_by(request.user, local_site=local_site)", "def is_local_administrator(self):\n\t\treturn bool(call_sdk_function('PrlUsrCfg_IsLocalAdministrator', self.handle))", "def has_perm(self, user, perm, obj=None):\n if obj is not None and not isinstance(obj, LocalSite):\n logging.error('Unexpected object %r passed to has_perm. '\n 'Returning False.', obj)\n\n if settings.DEBUG:\n raise ValueError('Unexpected object %r' % obj)\n\n return False\n\n if not user.is_active:\n return False\n\n if obj is not None:\n if not hasattr(user, '_local_site_admin_for'):\n user._local_site_admin_for = {}\n\n if obj.pk not in user._local_site_admin_for:\n user._local_site_admin_for[obj.pk] = obj.is_mutable_by(user)\n\n if user._local_site_admin_for[obj.pk]:\n return perm in self._VALID_LOCAL_SITE_PERMISSIONS\n\n return super(StandardAuthBackend, self).has_perm(user, perm, obj)", "def user_can_edit_setting_type(user, model):\n return user.has_perm(\n \"{}.change_{}\".format(model._meta.app_label, model._meta.model_name)\n )", "def can_be_moderated_by(user):\n return user.is_active and user.is_staff and (\n user.has_perm('blog.change_membership') or\n user.has_perm('blog.change_blog'))", "def is_staff(self):\r\n return self.is_admin", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def is_staff(self):\n return self.is_admin", "def is_staff(self):\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def CAN_CHANGE_PERMISSIONS(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})", "def user_is_admin(user):\n return user in admins", "def has_access_permissions(self, request, obj, local_site=None, *args,\n **kwargs):\n return obj.is_accessible_by(request.user, local_site=local_site)", "def is_staff(self):\n\t\treturn self.is_admin", "def checkIfAllowed(self, user):\n\n # Default case if mod access is not needed everyone has access\n if not self.modOnlyAccess:\n return True\n\n # Otherwise check the user's access level\n if user.modAccess == self.modOnlyAccess:\n return True\n else:\n return False", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def hasPerm(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"perm_name\",\"admin_username\")\n if request.auth_name!=request[\"admin_username\"]: \n request.getAuthNameObj().canDo(\"SEE ADMIN PERMISSIONS\")\n return admin_main.getLoader().getAdminByName(request[\"admin_username\"]).hasPerm(request[\"perm_name\"])", "def _is_admin_only(self, pvm: PermissionView) -> bool:\n\n if (\n pvm.view_menu.name in self.READ_ONLY_MODEL_VIEWS\n and pvm.permission.name not in self.READ_ONLY_PERMISSION\n ):\n return True\n return (\n pvm.view_menu.name in self.ADMIN_ONLY_VIEW_MENUS\n or pvm.permission.name in self.ADMIN_ONLY_PERMISSIONS\n )", "def CAN_MODERATE(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def get_sites_user_can_modify(user, project):\n if project.has_modify_permission(user):\n sites = Site.objects.filter(project=project)\n else:\n sites = get_objects_for_user(\n user,\n 'modify_site_data',\n klass=Site).filter(\n project=project)\n\n return sites", "def is_staff(self) -> bool:\n return self.is_admin", "def is_user_local_admin(user: str = None) -> bool:\n\n if not user:\n # Get current user\n user = whoami()\n\n local_admins = get_local_group_members(group_sid=\"S-1-5-32-544\")\n for local_admin in local_admins:\n if user.casefold() == local_admin[\"name\"].casefold():\n return True\n return False", "def edit_allowed(self):\n account = Account.current_user_account\n if account is None:\n return False\n return self.user_can_edit(account.user)", "def can_edit_user(user):\n\tu = current_user._get_current_object()\n\treturn u==user or u.is_admin()", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def is_user_allowed(self, user):\n return user.is_staff" ]
[ "0.69915265", "0.6767986", "0.6745964", "0.66542995", "0.6571868", "0.6538805", "0.65115726", "0.64365876", "0.64365876", "0.6423288", "0.6423288", "0.6423288", "0.6416041", "0.64117044", "0.6377946", "0.63580614", "0.6347583", "0.63408816", "0.6334341", "0.6306698", "0.63005733", "0.62996024", "0.62562263", "0.6244174", "0.62352884", "0.61995155", "0.61947393", "0.61725163", "0.61725163", "0.6151343" ]
0.7325419
0
Test user_id starts from one and increments by one
def test_user_id(self): new_user = self.app self.assertTrue(new_user.user_id, 0) new_user.create_user() self.assertTrue(new_user.user_id, 1) for key in new_user.users: self.assertEqual(new_user.user_id, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_identity(self) -> UserId:\n ...", "def increaseTimes(self, userId):\n for user in self.requestLog:\n if str(userId) == user[0]:\n user[1] += 1\n break", "def new_id(users):\n\n #nonlocal index\n if len(users) > 1:\n new_index = new_player_id.index\n new_player_id.index += 1\n else:\n new_index = users[0]\n\n return new_index", "def testLoginTwoUniqueUsersConsecutively(self):\n self.users.TESTAPI_resetFixture()\n\n self.users.add(\"happy\", \"birthday\")\n self.users.login(\"happy\", \"birthday\")\n self.users.login(\"happy\", \"birthday\")\n respData = self.users.login(\"happy\", \"birthday\")\n self.assertEquals(respData, 4)\n\n self.users.add(\"merry\", \"christmas\")\n respData = self.users.login(\"merry\", \"christmas\")\n self.assertEquals(respData, 2)\n respData = self.users.login(\"happy\", \"birthday\")\n self.assertEquals(respData, 5)", "def set_val_user_id():\n\n # Get the Max user_id in the database\n result = db.session.query(func.max(User.user_id)).one()\n max_id = int(result[0])\n\n # Set the value for the next user_id to be max_id + 1\n query = \"SELECT setval('users_user_id_seq', :new_id)\"\n db.session.execute(query, {'new_id': max_id + 1})\n db.session.commit()", "def test_user_id_in_shoplist(self):\n new_shoplist = self.app\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304", "def update_next_id(cls):\n cls.next_id += 1", "def test_user_id_get(self):\n pass", "def next_invoice_number(cls, user):\n cur_max = cls.query.filter_by(user_id=user.id).count()\n cur_max += 1\n\n return str(cur_max)", "def test_user_id_put(self):\n pass", "def test_count__same_user_id(collection, user_id, media_item, repo):\n insert1 = collection.insert_one(media_item | {\"id\": \"id1\", \"userId\": user_id})\n insert2 = collection.insert_one(\n media_item | {\"id\": \"id2\", \"userId\": \"test-other-user-id\"}\n )\n\n count = repo.count()\n\n assert count == 1", "def test_token_only_for_1_user(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertNotEqual(self.user.verify_auth_token(user_token),\n self.user2)", "def add_user(self, u: \"Node\") -> None:\n\n if u not in self.users_:\n self.users_[u] = 0\n self.users_[u] += 1", "def test_all__different_user_id(collection, user_id, media_item, repo):\n insert1 = collection.insert_one(media_item | {\"id\": \"id1\", \"userId\": user_id})\n insert2 = collection.insert_one(\n media_item | {\"id\": \"id2\", \"userId\": \"test-other-user-id\"}\n )\n\n documents = repo.all()\n\n ids = {doc[\"id\"] for doc in documents}\n assert \"id2\" not in ids", "def test_uidnext(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDNEXT 4392] Predicted next UID')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'UIDNEXT': 4392})", "def user(self, user):\n self.user_id = user.get_id()", "def create_id_number(self):\n id_number = get_random_string(8).lower()\n if User.objects.filter(id_number=id_number).first():\n self.create_id_number()\n\n return id_number", "def update_collection_num(user_id, another_user_id, is_add):\n\n user = db_session.query(User).filter_by(user_id=user_id).scalar()\n another_user = db_session.query(User).filter_by(\n user_id=another_user_id).scalar()\n if is_add:\n user.follow_num += 1\n another_user.be_followed_num += 1\n else:\n user.follow_num -= 1\n another_user.be_followed_num -= 1\n db_session.commit()", "def test_id_uniqueness(self):\n user_2 = User()\n self.assertNotEqual(self.user_1.id, user_2.id)", "def __getNewUserID(self):\n return db_main.getHandle().seqNextVal(\"users_user_id_seq\")", "def test_011_add_same_user(self):\n testflow.step(ADD_USR_MSG, TEST_USER1)\n assert not USER_CLI.run('add', TEST_USER1)[0]", "def set_val_user_id():\n\n # Get the max user_id in the database\n result = db.session.query(func.max(User.user_id)).one()\n max_id = int(result[0])\n\n # Set the value for the next user_id to be max_id\n query = \"SELECT setval('users_user_id_seq', :new_id)\"\n db.session.execute(query, {'new_id': max_id})\n db.session.commit()", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(id='1', name='google',\r\n email='[email protected]')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id='10', name=self.name,\r\n email=self.email_addr)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"User should be the same\"\r\n print user.google_user_id\r\n assert user.google_user_id == '10', err_msg", "def same_user(user_id):\n return user_id == login_session['user_id']", "def test_enrol_user_invalid_userId(self):\r\n print(\"Enrol user invalid user id\")\r\n u_id = 9999\r\n c_id = 1\r\n\r\n prev_noEnrolments = len(Enrolment.query.all())\r\n self.assertEqual(self.system.create_enrolment(u_id, c_id), 0)\r\n curr_noEnrolments = len(Enrolment.query.all())\r\n self.assertEqual(prev_noEnrolments, curr_noEnrolments)", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def increment_login_attempts(self, increment):\r\n increment == self.login_attempts\r\n self.login_attempts += 1", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']" ]
[ "0.68791264", "0.6670912", "0.6601971", "0.6345259", "0.623204", "0.61628485", "0.6151919", "0.6070988", "0.59979314", "0.5988349", "0.59776485", "0.59571034", "0.59226906", "0.5910287", "0.5886617", "0.58862144", "0.58774656", "0.58749765", "0.5874914", "0.58122075", "0.58013636", "0.5777201", "0.57686114", "0.5763747", "0.57604474", "0.5751432", "0.57024246", "0.5692792", "0.5680527", "0.5673453" ]
0.68441284
1
Test Shoppinglist's dict is empty at first
def test_shoplists_dictionary(self): new_shoplist = self.app self.assertEqual(len(new_shoplist.shoplists), 0) new_shoplist.create_shoplist() self.assertIsInstance(new_shoplist, Shoppinglist) self.assertEqual(len(new_shoplist.shoplists), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_new_shopping_list(create_shopping_list):\n shopping_list = create_shopping_list\n assert shopping_list.items.values_list().count() == 0\n assert shopping_list.budget == 0", "def test_shopping_cart_is_empty(self):\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your shopping cart is empty.\")\n self.assertQuerysetEqual(response.context['contents'], [])", "def test_if_app_can_search_for_existing_list_without_products(self):\n add_list=self.client.post('/shoppinglists/', \n data=self.shopllist,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(searchforlists.status_code,200) \n self.assertIn(\"No list found\",str(searchforlists.data))", "def test_initially_empty(self):\n self.assertEqual(0, len(self.redis.redis[LIST1]))", "def test_get_list_empty(self):\r\n result = self.get_json(self.LIST_URI)\r\n self.assertEqual(result[\"count\"], 0)\r\n self.assertIsNone(result[\"next\"])\r\n self.assertIsNone(result[\"previous\"])\r\n self.assertEqual(result[\"results\"], [])", "def empty(self):\n if len(self.List_store) == 0:\n return True\n return False", "def empty(self):\n return False if self.items else True", "def empty(self):\n return False if self.items else True", "def empty(self):\n return False if self.items else True", "def test_shopping_cart_not_empty(self):\n expected_contents = self.fill_session_cart()\n response = self.client.get(self.SHOP_CART_URL)\n self.assertEqual(response.context['contents'], expected_contents)", "def test_listEmpty(self):\n store = Store()\n self.assertSuccessStatus(self._makeConfig(store), [\"list\"])\n self.assertIn(\"There are no ports configured.\", sys.stdout.getvalue())", "def test_empty_list(self):\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 200)\n\n response_json = response.json()\n self.assertEqual(response_json['count'], 0)", "def test_create_shoplist_without_user_fails(self):\n User.users = {}\n result = self.app.create_shoplist()\n expected = {1: {'user_id': 1, 'name': 'Apple', 'description': 'Fresh Green Apples'}}\n self.assertNotEqual(expected, result)", "def its_empty(self) -> bool:\n return self.items == []", "def helper_test_vessel_non_empty_list(self):\n url = reverse('vessel-list')\n response = self.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json_data = json.loads(response.content)\n is_empty = True\n if type(json_data) == list:\n is_empty = len(json_data) == 0\n\n self.assertEqual(is_empty, False)", "def test_query_with_no_matches_returns_nothing(test_store):\n items = list(test_store.get_by(name=\"Sugar\"))\n\n assert len(items) == 0", "def is_empty(self):\n return self.items == []", "def test_get_empty_product_list(self):\n response = self.client().get('/api/v1/products')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"There are no books\")\n self.assertEqual(response.status_code, 404)", "def is_empty(self):\n return self.id is None or self.nb_cart_items == 0", "def test_list_none(self):\n self.model.objects.all().delete()\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertEquals(response.context['object_list'].count(), 0)", "def test_empty(self):\n pass", "def test_empty_referencelibrary():\n dict = {}\n reference_library = ReferenceLibrary(**dict)\n assert len(reference_library.books) == 0\n\n dict = {\n \"books\": []\n }\n reference_library = ReferenceLibrary(**dict)\n assert len(reference_library.books) == 0", "def test_empty_list(self):\n self.assertEqual(self.obj.to_json_string([]), '[]')", "def is_empty(self):\n if len(self.items) == 0:\n return True\n else:\n return False", "def empty(self):\n if len(self.store) == 0:\n return True", "def test_set_empty_field(self):\n self._p.fields = {}\n received = self._p.fields\n expected = {}\n msg = 'Setting field with empty list should not produce error.'\n self.assertDictEqual(received, expected, msg)", "def is_empty(self):\n\n return self.items == []", "def is_empty(self):\n\n return self.items == []", "def test_empty_list_of_dict_arg(self):\n self.assertEqual(self.obj.to_json_string([{}, {}]), '[{}, {}]')", "def test_successful_shoplist_creation(self):\n result = self.app.create_shoplist()\n expected = {5: {'user_id': 0, 'name': 'apples', 'description': 'Fresh Green Apples'}}\n self.assertEqual(expected, result)" ]
[ "0.6769545", "0.67441076", "0.66115814", "0.65915734", "0.6533823", "0.6522", "0.6508257", "0.6508257", "0.6508257", "0.6459706", "0.6428222", "0.6398528", "0.6377994", "0.62983876", "0.6295201", "0.6289959", "0.6289575", "0.6281986", "0.6245945", "0.6245707", "0.62340784", "0.6217479", "0.62023383", "0.61859936", "0.616393", "0.6137648", "0.6127936", "0.6127936", "0.6105945", "0.609291" ]
0.70351964
0
Test shoplist_id starts from one and increments by one
def test_shoplist_id(self): new_shoplist = self.app self.assertTrue(new_shoplist.shop_id, 0) new_shoplist.create_shoplist() self.assertTrue(new_shoplist.shop_id, 1) for key in new_shoplist.shoplists: self.assertEqual(new_shoplist.shop_id, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_id_in_shoplist(self):\n new_shoplist = self.app\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def test_create_shoplist(self):\n new_shoplist = self.app\n new_shoplist.create_shoplist()\n self.assertEqual(len(new_shoplist.shoplists), 1)", "def test_list_increment_with_missing_index(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_create_new_shopping_list(create_shopping_list):\n shopping_list = create_shopping_list\n assert shopping_list.items.values_list().count() == 0\n assert shopping_list.budget == 0", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_if_app_gets_shoppinglists(self):\n li = self.client.get('/shoppinglists/?each_page=1&page_number=1',\n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(li.status_code, 200)", "def test_successful_shoplist_creation(self):\n result = self.app.create_shoplist()\n expected = {5: {'user_id': 0, 'name': 'apples', 'description': 'Fresh Green Apples'}}\n self.assertEqual(expected, result)", "def test_list_increment_with_valid_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"bin\": \"int_bin\", \"index\": 2, \"val\": 20}]\n\n _, _, bins = self.as_connection.operate(key, list)\n\n assert bins == {\"int_bin\": 23}\n _, _, bins = self.as_connection.get(key)\n\n assert bins[\"int_bin\"] == [1, 2, 23, 4]", "def testspecincrement(self):\n global idct\n a = Base()\n idct += 1\n self.assertEqual(a.id, idct)\n b = Base(19)\n self.assertEqual(b.id, 19)\n c = Base()\n idct += 1\n self.assertEqual(c.id, idct)\n d = Base()\n idct += 1\n self.assertEqual(d.id, idct)", "def test_list_identity(self):\n pass", "def test_app_can_add_list(self):\n add_list=self.client.post('/addshoppinglists/?user='+self.user['user'], \n data=self.shopllist, \n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(add_list.status_code,200)", "def test_list_increment_with_missing_bin(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"val\": 20}]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def test_shoplists_dictionary(self):\n new_shoplist = self.app\n self.assertEqual(len(new_shoplist.shoplists), 0)\n new_shoplist.create_shoplist()\n self.assertIsInstance(new_shoplist, Shoppinglist)\n self.assertEqual(len(new_shoplist.shoplists), 1)", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"[email protected]\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"[email protected]\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def incr_proof_item(item, start, n):\n item.id = incr_id_after(item.id, start, n)\n item.prevs = [incr_id_after(id, start, n) for id in item.prevs]\n if item.subproof:\n for subitem in item.subproof.items:\n incr_proof_item(subitem, start, n)", "def add_shopping_list(title, store_name):\n shopping_list = ShoppingList.query.filter_by(title=title, store_name=store_name).first()\n if shopping_list is None:\n shopping_list = ShoppingList(title=title, store_name=store_name)\n db.session.add(shopping_list)\n db.session.commit()\n else:\n return \"\"\n return shopping_list.id", "def incr_id_after(id, start, n):\n k = len(start)\n if len(id) >= k and id[:k-1] == start[:k-1] and id[k-1] >= start[k-1]:\n return id[:k-1] + (id[k-1] + n,) + id[k:]\n else:\n return id", "def index_already_there(index, given_list):\n\n # check if ID already exists\n already_there = False\n if len(given_list)>0:\n for item in given_list:\n if isinstance(item, AutoBaseObject):\n if item.ID == index:\n already_there = True\n break\n else:\n print(\"Issue with list: item is not AutoBaseObject\")\n print(\" index=\\n\",index)\n sys.exit()\n return already_there", "def test_shelflist_firstitemperlocation_list(test_data, search, expected,\n api_settings, redis_obj,\n assemble_custom_shelflist,\n api_client, get_found_ids,\n do_filter_search):\n test_data_by_location = {}\n for test_id, _, rec in test_data:\n lcode = rec['location_code']\n recs = test_data_by_location.get(lcode, []) + [(test_id, rec)]\n test_data_by_location[lcode] = recs\n\n index = ShelflistItemIndex()\n for test_lcode, data in test_data_by_location.items():\n assemble_custom_shelflist(test_lcode, data, id_field='record_number')\n manifest = index.get_location_manifest(test_lcode)\n redis_key = '{}:{}'.format(REDIS_SHELFLIST_PREFIX, test_lcode)\n redis_obj(redis_key).set(manifest)\n\n resource_url = '{}firstitemperlocation/'.format(API_ROOT)\n rsp = do_filter_search(resource_url, search, api_client)\n rsp_items = rsp.data['_embedded']['items']\n\n if expected is None:\n for item in rsp_items:\n assert item['locationCode'] not in test_data_by_location.keys()\n else:\n for exp_id in expected:\n exp_row = [i[1] for i in test_data if i[0] == exp_id][0]\n item = [i for i in rsp_items if i['recordNumber'] == exp_id][0]\n exp_sli = '{}/shelflistitems/{}'.format(item['locationCode'],\n item['id'])\n assert item['rowNumber'] == exp_row\n assert item['_links']['shelflistItem']['href'].endswith(exp_sli)", "def increment_counter(self) -> None:", "def test_duplicate_ids():\n assert query_row(db_conf, 'osm_buildings', 51001)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51001)['type'] == 'mp'\n assert query_row(db_conf, 'osm_buildings', 51011)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51011)['type'] == 'mp'", "def test_id_inc(self):\n keys = [\"a\", \"b\", \"c\"]\n trie = marisa_trie.Trie(keys)\n\n for i in range(len(keys)):\n self.assertEqual(i, trie[keys[i]])", "def test_creation_of_duplicate_service_in_store(self):\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n response3 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response3.status, \"409 CONFLICT\")\n self.assertIn(\"Sorry. Live at the yard already exists in this store.\", str(response3.data))", "def test_list(self):\n product_3 = Product.objects.create(\n name='Third product',\n sku='99999999',\n category=self.category_1,\n description='Displayed in FIRST page',\n price=129.99\n )\n Product.objects.create(\n name='Fourth product',\n sku='88888888',\n category=self.category_1,\n description='Displayed in SECOND page',\n price=129.99\n )\n expected = {\n 'count': 4,\n 'next': 'http://testserver/api/products/?page=2',\n 'previous': None,\n 'results': [\n {\n 'id': self.product_1.id,\n 'name': self.product_1.name,\n 'category': self.product_1.category.id,\n 'created': '2018-12-20T10:15:30Z',\n 'description': self.product_1.description,\n 'featured': self.product_1.featured,\n 'price': str(self.product_1.price),\n 'sku': self.product_1.sku\n },\n {\n 'id': self.product_2.id,\n 'name': self.product_2.name,\n 'category': self.product_2.category.id,\n 'created': '2018-12-20T10:15:30Z',\n 'description': self.product_2.description,\n 'featured': self.product_2.featured,\n 'price': str(self.product_2.price),\n 'sku': self.product_2.sku\n },\n {\n 'id': product_3.id,\n 'name': product_3.name,\n 'category': product_3.category.id,\n 'created': '2018-12-20T10:15:30Z',\n 'description': product_3.description,\n 'featured': product_3.featured,\n 'price': str(product_3.price),\n 'sku': product_3.sku\n }\n ]\n }\n token = Token.objects.create(user=self.user_1)\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(token)\n }\n response = self.client.get('/api/products/', **headers)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(response.json(), expected)", "def new_id(self):\n self.next += 1\n return self.next", "def test_update_shopping_cart(self):\n food_cost = self.browser.find_element_by_id('food-cost')\n old_food_cost = int(food_cost.text)\n\n items = self.get_list_of_items()\n index = randint(1, len(items) - 1)\n list_item = self.get_item_dict(items[index])\n item_price = self.expected_contents[index]['price']\n old_cost = self.expected_contents[index]['cost']\n\n increase_by = randint(5, 10)\n directions = [\n {\n 'action': 'increase',\n 'range': range(1, increase_by + 1)\n },\n {\n 'action': 'decrease',\n 'range': range(increase_by - 1, - 1, -1)\n }\n ]\n for direction in directions:\n for i in direction['range']:\n list_item[direction['action']].click()\n sleep(0.1)\n new_cost = int(list_item['cost'].text)\n new_food_cost = int(food_cost.text)\n self.assertTrue(new_food_cost - old_food_cost ==\n new_cost - old_cost == item_price * i)", "def update_next_id(cls):\n cls.next_id += 1", "def test_start_ids_1(self):\n\t\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details, len(details))\n\t\tactual_num_layers = len(details)\n\t\texpected_num_layers = 11\n\t\texpected_ids = details.layer_id.to_numpy().tolist()\n\t\texpected_ids = [x+1 for x in expected_ids]\n\n\t\tself.assertEqual(actual_num_layers, expected_num_layers)\n\t\tself.assertEqual(len(expected_ids), expected_num_layers)\n\n\n\t\t# test decribe\n\t\tdetails = self.watcher.describe(start_ids=1)\n\t\tprint(details)\n\t\tactual_ids = details.layer_id.to_numpy().tolist()\n\t\tself.assertEqual(actual_ids,expected_ids)\n\n\t\t# test analyze: very slow\n\t\t# details = self.watcher.analyze(start_ids=1)\n\t\t# actual_ids = details.layer_id.to_numpy().tolist()\n\t\t# self.assertEqual(actual_ids,expected_ids)\n\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams[START_IDS]=1\n\t\tparams[MIN_EVALS]=1 # there may be a side effect that resets this\n\t\t\n\t\t# test iterator\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, params=params)\n\t\tnum = 0\n\t\tactual_ids = []\n\t\tfor ww_layer in iterator:\n\t\t\tself.assertGreater(ww_layer.layer_id,0)\n\t\t\tactual_ids.append(ww_layer.layer_id)\n\t\t\tnum += 1\n\t\t\tprint(num, ww_layer.layer_id)\n\t\tself.assertEqual(num,11)\n\t\tself.assertEqual(actual_ids,expected_ids)" ]
[ "0.71740144", "0.6463204", "0.60883856", "0.56410897", "0.56002337", "0.55714893", "0.54950434", "0.5471539", "0.54486376", "0.53863215", "0.53844637", "0.53601474", "0.535629", "0.52940756", "0.5292217", "0.52636516", "0.5251364", "0.5215265", "0.52150106", "0.52086705", "0.52076536", "0.51776075", "0.5155719", "0.51513535", "0.51475734", "0.51441336", "0.51439404", "0.50868547", "0.50862795", "0.50855225" ]
0.74459314
0
Test shoplist can be created
def test_create_shoplist(self): new_shoplist = self.app new_shoplist.create_shoplist() self.assertEqual(len(new_shoplist.shoplists), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_successful_shoplist_creation(self):\n result = self.app.create_shoplist()\n expected = {5: {'user_id': 0, 'name': 'apples', 'description': 'Fresh Green Apples'}}\n self.assertEqual(expected, result)", "def test_app_can_add_list(self):\n add_list=self.client.post('/addshoppinglists/?user='+self.user['user'], \n data=self.shopllist, \n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(add_list.status_code,200)", "def test_shoppinglist_creation(self):\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n res = self.app.post(\n '/shoppinglist', data={'list-name': 'Easter'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n self.assertIsInstance(response, list)\n self.assertIn(\"Easter\", str(res.data))", "def test_shoplists_dictionary(self):\n new_shoplist = self.app\n self.assertEqual(len(new_shoplist.shoplists), 0)\n new_shoplist.create_shoplist()\n self.assertIsInstance(new_shoplist, Shoppinglist)\n self.assertEqual(len(new_shoplist.shoplists), 1)", "def test_shoplist_id(self):\n new_shoplist = self.app\n self.assertTrue(new_shoplist.shop_id, 0)\n new_shoplist.create_shoplist()\n self.assertTrue(new_shoplist.shop_id, 1)\n for key in new_shoplist.shoplists:\n self.assertEqual(new_shoplist.shop_id, key)", "def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', '[email protected]')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))", "def test_create_new_shopping_list(create_shopping_list):\n shopping_list = create_shopping_list\n assert shopping_list.items.values_list().count() == 0\n assert shopping_list.budget == 0", "def test_if_app_can_search_for_existing_list_without_products(self):\n add_list=self.client.post('/shoppinglists/', \n data=self.shopllist,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(searchforlists.status_code,200) \n self.assertIn(\"No list found\",str(searchforlists.data))", "def test_if_app_can_search_for_existing_lists_with_products(self):\n product_to_add = {'product':'nikes', 'Quantity':3, 'Amountspent':5000}\n jsonproduct_to_add = json.dumps(product_to_add)\n add_list = self.client.post('/shoppinglists/',\n data = self.shopllist, \n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n add_product=self.client.post('/shoppinglist/shoes/items/',\n data=jsonproduct_to_add,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforproducts=self.client.get('/searchProduct/?q=nike',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertIn(\"Success\",str(searchforlists.data))\n self.assertIn(\"Success\",str(searchforproducts.data))\n self.assertEqual(searchforproducts.status_code,200)\n self.assertEqual(searchforlists.status_code,200)", "def test_create_shoplist_without_user_fails(self):\n User.users = {}\n result = self.app.create_shoplist()\n expected = {1: {'user_id': 1, 'name': 'Apple', 'description': 'Fresh Green Apples'}}\n self.assertNotEqual(expected, result)", "def test_shoppinglist_creation_with_error(self):\n res = self.app.post(\n '/shoppinglist', data={'name': 'Easter!'})\n self.assertEqual(res.status_code, 200)\n response = self.shopping_class_obj.create_list(\n 'Easter!', '[email protected]')\n self.assertIn(\"No special characters\", response)", "def test_create_new_shopping_list_correct_user(create_user, create_shopping_list): # noqa\n shopping_list = create_shopping_list\n owner = create_user\n assert shopping_list.owner == owner", "def test_wish_list(self):\n data = {\"name\": \"test list 1\"}\n response = self.client.post(\"/wish_list/\", data, format='json')\n self.assertEqual(response.status_code, 200)\n response = self.client.get(\"/wish_list/\")\n self.assertEqual(response.status_code, 200)\n # item = Item.objects.get(name=\"New Item\")\n # self.assertEqual(item.name(), \"New Item\")", "def test_shoppingcart_list(self):\n self.url = reverse(\"shoppingcart-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_shoppingcart_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n self._create_model(\"shoppingcart\", data, [ \"quantity\", \"discount_value\", \"is_closed\" ])\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_list(self):\n pass", "def test_list(self):\n pass", "def test_create_order_list(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)", "def test_create(self):\n pass", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def test_user_id_in_shoplist(self):\n new_shoplist = self.app\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)\n new_shoplist.create_shoplist()\n for value in Shoppinglist.shoplists.values():\n for key in User.users:\n self.assertEqual(value['user_id']+1, key)", "def test_if_app_gets_shoppinglists(self):\n li = self.client.get('/shoppinglists/?each_page=1&page_number=1',\n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(li.status_code, 200)", "def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_shoppingitems_creation_with_error(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread-'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread-', '[email protected]')\n # test response from shoppingitems class\n self.assertIn(\"No special characters\", response)\n # check if item was successfully created\n self.assertIn(\"No special characters\", str(res.data))", "def test_list_product(self):\n url = reverse('products:list')\n response = self.client.get(url)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['name'], 'Eggs')", "def test_user_create_list(self):\n c = Client()\n c.login(username= 'testuser', password = 'password')\n c.post('/inventory/list/add/', {'name': 'testname'})\n\n self.assertEqual(List.objects.get(name = 'testname'), List.objects.get(users__username = 'testuser'))", "def test_add_product(self):\n view = ProductCreateListView.as_view({'post': 'create'})\n uri = reverse('products:create/list-products')\n data = {\n \"name\": \"Iphone 7\",\n \"description\": \"Mobile phone\",\n \"price\": 200,\n \"is_available\": True\n }\n request = self.factory.post(uri, data, HTTP_AUTHORIZATION='Token {}'.format(self.token_admin.key))\n request.user = self.user['admin']\n response = view(request)\n self.assertEqual(response.status_code, 201,\n f'Expected Response Code 201, received {response.status_code} instead.')", "def testcreatelist(self):\n rv = self.app.get('/createcategory')\n self.assertEqual(rv.status_code, 302, \"createlist page should not load unless signed in\")", "def test_create_shopping_cart(self):\n client = APIClient()\n # First create a user\n Customer.objects.create_user(name=\"kevin\", email=\"[email protected]\", password=\"secret_pass\",\n shipping_region_id=1)\n\n # Then force login with that user\n url = reverse('login')\n data = {'email': \"[email protected]\", 'password': \"secret_pass\"}\n response = client.post(url, data, format='json')\n access_token = response.data['access']\n\n # Then add products to the shopping cart\n url = reverse('shopping_cart_add_product')\n data = {'cart_id': \"\", 'product_id': 1, 'attributes': \"Blue, XL\"}\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token)\n response = client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0]['item_id'], 1)\n self.assertEqual(ShoppingCart.objects.count(), 1)", "def test_create_collection(self):\n pass" ]
[ "0.8193725", "0.7992437", "0.790068", "0.780376", "0.76251024", "0.7561878", "0.7440926", "0.7405313", "0.73396647", "0.69817495", "0.6914466", "0.68958485", "0.68450737", "0.6805334", "0.6804944", "0.6802431", "0.6802431", "0.676612", "0.6746457", "0.67143774", "0.66929317", "0.6676378", "0.6665702", "0.6633545", "0.66198134", "0.6606369", "0.6586576", "0.65748304", "0.6478081", "0.64518774" ]
0.84382606
0