query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Gets the current features_selection
def _get_features_selection(self): self._validate_features_selection() if self.features_selection == "auto": if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return True if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.features_selection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSelection(self):\n return self.selection", "def GetSelection(self):\r\n\r\n return self.selection", "def GetSelection(self):\r\n\r\n return self._current", "def GetSelection(self):\n \n return self.selected", "def get_selected_nodes(self):\n return self._selection", "def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()", "def currentSelection():\n sel = bpy.context.selected_objects\n if sel:\n return sel\n else:\n col = activeCollection()\n if col is not None:\n # Filter only mesh objects.\n return collectionMeshes(col)", "def get_selection(self, name):\n print 'hi being selected in plotdata'\n return self.selections.get(name, None)", "def _getAsSelection(self):\n return self._asSelection", "def get_selected(self):\n return self.selected", "def feature(self):\n return self._feature", "def feature(self):\n return self._feature", "def current_choice(self):\n\t\treturn self.choice_data_list[self.select_index]", "def get_active(cls) -> FeatureSet:\n if cls._active_feature_set is None:\n raise RuntimeError('No FeatureSet being actively defined.')\n return cls._active_feature_set", "def get_features(self):\n return self._features", "def selection(self) -> str:\n return self._selection", "def GetSelection(self):\n return self.__selected_item", "def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features", "def selected(self):\n return self._selected", "def GetSelection(self):\r\n \r\n return self._curpage", "def benchmark_selection(self):\n return self._benchmark_selection", "def get_selection(self, selection_name, format=None):", "def _feature_selection(self , x ,y):\n # initialize good features list\n # and best scores to keep track of both\n good_features = []\n best_scores = []\n\n # calculating the number of features\n num_features = x.shape[1]\n\n # infinite loop\n while True:\n # intialize best feature and score of this loop\n this_feature = None\n best_score = 0\n\n # loop over all features\n for feature in range(num_features):\n # if feature is already in good features,\n # skip this for loop\n if feature in good_features:\n\n continue\n # selected features are all good till now\n # and current feature\n selected_features = good_features + [feature]\n # remove all other feature from the data\n xtrain = x[: , selected_features]\n # calculate the score , in our case AUC\n score = self.evaluate_score(xtrain , y)\n # if score is greater then the best score\n # of this loop, change best score and best feature\n if score > best_score:\n this_feature = feature\n best_score = score\n\n # if we have selected a feature , add it to\n # the good feature list and update best score list\n if this_feature != None:\n good_features.append(this_feature)\n best_scores.append(best_score)\n\n # if we did not improve during the last two rounds,\n # exit the while loop\n if len(best_score) > 2:\n if best_scores[-1] < best_scores[-2]:\n break\n\n # return the best score and good features\n # why do we remove the last data point?\n return best_scores[:-1] , good_features[:-1]", "def get_selected_features(dataset_features, model):\r\n model = SelectFromModel(model, prefit=True)\r\n feature_bool_mask = model.get_support()\r\n selected_features = dataset_features.columns[feature_bool_mask]\r\n transformed_dataset = pd.DataFrame(model.transform(dataset_features), columns=dataset_features.columns[feature_bool_mask], index=dataset_features.index)\r\n return selected_features, transformed_dataset", "def features(self):\n return self._features", "def __window_getSelection(self):\n return None", "def selected(self):\n return self._choices[self._selected][0]", "def select_features(\n self, x_train, y_train, estimator, selection_type, selection_params\n ):\n x_train = _check_X(x_train)\n if selection_type == \"regularization\":\n fe_sel_ = SelectFromModel(estimator)\n fe_sel_.fit(x_train, y_train)\n selected_feat = x_train.columns[(fe_sel_.get_support())]\n # get_support returns list of Bool values where a column is important or not\n return fe_sel_, selected_feat\n else:\n try:\n from mlxtend.feature_selection import (\n SequentialFeatureSelector as sfs,\n ) # noqa\n except ImportError as e:\n raise ImportError(\n \"{} using recursion requires {} from {}. \"\n \"You can install with `pip install {}`\".format(\n \"select_features\",\n \"SequentialFeatureSelector\",\n \"mlxtend\",\n \"mlxtend\",\n )\n ) from e\n fe_sel_ = sfs(estimator, **selection_params)\n fe_sel_.fit(x_train, y_train)\n return fe_sel_, fe_sel_.k_feature_names_", "def GetOldSelection(self):\r\n\r\n return self.old_selection", "def get_selected(self):\n return [sel.internalPointer().obj for sel in self.view.selectedIndexes()]" ]
[ "0.72452754", "0.7188117", "0.71176845", "0.6940054", "0.67970186", "0.6783767", "0.67610663", "0.6721341", "0.66002655", "0.6589361", "0.6549051", "0.6549051", "0.6516982", "0.64433926", "0.6428101", "0.64220285", "0.6418494", "0.6352769", "0.6308269", "0.62836784", "0.6261021", "0.6135232", "0.61306363", "0.6128573", "0.6118927", "0.61087346", "0.6099219", "0.60949004", "0.6089752", "0.6058946" ]
0.7440437
0
Gets the current start_random_models
def _get_start_random_models(self): self._validate_start_random_models() if self.start_random_models == "auto": if self._get_mode() == "Explain": return 1 if self._get_mode() == "Perform": return 5 if self._get_mode() == "Compete": return 10 if self._get_mode() == "Optuna": return 1 # just 1, because it will be tuned by Optuna else: return deepcopy(self.start_random_models)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exploring_starts(self):\n def random_choice(l): return l[np.random.randint(len(l))]\n return map(random_choice, (self.env.states, self.env.moves))", "def get_random_start(self):\n arr = np.zeros(self.dimension)\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n arr[:n_fit_p] = self.get_random_fit_parameters\n arr[n_fit_p:n_fit_p+n_nui_p] = self.get_random_nuisance_parameters\n arr[n_fit_p+n_nui_p:] = self.get_random_wilson_coeffs_start\n return arr", "def getRandom(self):\n return random.choice(self.ls)", "def getStartState(self):\n return self._bot", "def getStartState(self):\n return self._bot", "def getStartState(self):\n \n pass", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def scenelist(self):\n return self.caller.player_ob.db.random_scenelist or []", "def getStartState(self):\n\t\tutil.raiseNotDefined()", "def getStartState(self):\n\t\tutil.raiseNotDefined()", "def readFirst(self):\n return self.models[0].time_next", "def get_seed(self):\n return self.solver.get_model_trues(start=0, end=self.n)\n\n # slower:\n # model = self.solver.get_model()\n # return [i for i in range(self.n) if model[i]]\n\n # slowest:\n # seed = []\n # for i in range(self.n):\n # if self.solver.model_value(i+1):\n # seed.add(i)\n # return seed", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def getStartState(self):\r\n\t\tutil.raiseNotDefined()", "def init_rnd(self):\n\n # query max number of threads\n gennum = apache.AP_MPMQ_MAX_SPARE_THREADS\n # make generators\n # this bit is from Python lib reference\n g = random.Random(time.time())\n result = [g]\n for i in range(gennum - 1):\n laststate = g.getstate()\n g = random.Random()\n g.setstate(laststate)\n g.jumpahead(1000000)\n result.append(g)\n return result", "def generate_random_start_state(self) -> State:\n part_states = []\n random.shuffle(self.blocks)\n placed = []\n t = 0\n\n for block in self.blocks:\n if 1 / (t + 1) >= random.random():\n part_states.append(PartState(f'on({block.arguments[0]},table)'))\n else:\n rand = random.randint(0, len(placed) - 1)\n part_states.append(PartState(f'on({block.arguments[0]},{placed[rand]})'))\n\n placed.append(block.arguments[0])\n t += 1\n\n return State(set(part_states))", "def get_random_start_state(self) -> State:\n if len(self.blocks) <= state_enumeration_limit:\n rnd = random.randint(0, len(self.allStates) - 1)\n return self.allStates[rnd]\n else:\n return self.generate_random_start_state()", "def get_model_count(self):\n return len(self._model_start_i)", "def get_starting_node(self, graph):\n return random.choice(list(graph.nodes))", "def get_random(self):\n return self._get_random()", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def get_auto_start_import(self):\n\t\treturn self.checkAutoStartImport.get_active()", "def initLocalBestChoice(self):\n random.seed()\n return", "def availablemodels(self):\n return self.__models.keys()", "def random_start_probs(self) -> np.ndarray:\n return self.random_state.dirichlet(np.ones(self.n_states), size=1).flatten()", "def create_models(self):\n model_list = []\n for i in range(0, len(self.X.cluster.unique())):\n foo_model = self.model\n foo_model.set_params(**self.best_params_list[i])\n model_list.append(foo_model)\n return model_list", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()" ]
[ "0.61098045", "0.5519804", "0.5495816", "0.5439313", "0.5439313", "0.54216105", "0.53990346", "0.53893465", "0.53497064", "0.53497064", "0.5334331", "0.53277075", "0.5320885", "0.5319373", "0.53144366", "0.5306886", "0.52948534", "0.5272874", "0.52695197", "0.5261325", "0.52470714", "0.5232264", "0.5231898", "0.5218057", "0.5217299", "0.52081203", "0.51753676", "0.51753676", "0.51753676", "0.51753676" ]
0.80309975
0
Gets the current hill_climbing_steps
def _get_hill_climbing_steps(self): self._validate_hill_climbing_steps() if self.hill_climbing_steps == "auto": if self._get_mode() == "Explain": return 0 if self._get_mode() == "Perform": return 2 if self._get_mode() == "Compete": return 2 if self._get_mode() == "Optuna": return 0 # all tuning is done in Optuna else: return deepcopy(self.hill_climbing_steps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_steps(self):\n return self.steps", "def get_steps(self):\n return self.steps", "def get_steps(self):\n return self.steps", "def getSteps( self ):\n\n return self.adb.get( 'steps' )", "def getSteps():", "def get_step(self):\n return self.step", "def get_step(self):\n return self.step", "def get_workflow_steps(self):\n return self._data_dict[self.KEY_WF_STEPS]", "def getCurrentStep():", "def step(self):\n return self._step", "def raw_steps(self):\n return self.obj_payload[\"steps\"]", "def expansion_steps(self):\n return self._p", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def get_step(self) -> int:\n return self.step", "def get_view_steps(self):\n return self._data_dict[self.KEY_VIEW_STEPS]", "def getWorkflowSteps(self):\n\n return self.dbase.getProcessSteps(self.scene)", "def getStep():\n # TODO: can there be non-Step logs?", "def get_steps_num():\n return 0", "def step ( self ) :\n return self.__step", "def instruction_steps(self) -> Sequence['outputs.CodelessUiConnectorConfigPropertiesResponseInstructionSteps']:\n return pulumi.get(self, \"instruction_steps\")", "def cur_step(self):\n return self._cur_step", "def get_steps(self):\n return len(self.trajectory)", "def get_time_step(self):\n return self._time_step", "def _step(self) -> int:\n return self._config[CONF_STEP]", "def current_step(self) -> FlowNode:\n return self._current_step", "def steps(self) -> pulumi.Output[Sequence['outputs.StepResponse']]:\n return pulumi.get(self, \"steps\")", "def timeStep(self):\n return self.params['h']" ]
[ "0.72193843", "0.71010685", "0.70357335", "0.66529846", "0.652234", "0.6352011", "0.6352011", "0.6341539", "0.6029986", "0.600624", "0.59786874", "0.594206", "0.59252524", "0.59252524", "0.59252524", "0.59252524", "0.58734", "0.58724695", "0.58696485", "0.5851444", "0.5811696", "0.5811048", "0.5738428", "0.57373434", "0.5737148", "0.56261575", "0.55965716", "0.557876", "0.55633664", "0.5551416" ]
0.7683898
0
Gets the current top_models_to_improve
def _get_top_models_to_improve(self): self._validate_top_models_to_improve() if self.top_models_to_improve == "auto": if self._get_mode() == "Explain": return 0 if self._get_mode() == "Perform": return 2 if self._get_mode() == "Compete": return 3 if self._get_mode() == "Optuna": return 0 else: return deepcopy(self.top_models_to_improve)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_top_models(self, return_scores=True):\n self.greater_score_is_better = is_greater_better(self.scoring_function)\n model_names = list(set([key.split('(')[0] for key in\n self.evaluated_individuals_.keys()]))\n models = OrderedDict({model: [] for model in model_names})\n for k in self.evaluated_individuals_:\n models[k.split('(')[0]].append(self.evaluated_individuals_[k])\n for model_name in model_names:\n models[model_name]=sorted(models[model_name],\n key=lambda x: x['internal_cv_score'],\n reverse=self.greater_score_is_better)\n self.models = models\n top_models = {model: models[model][0] for model in models}\n self.top_models = OrderedDict(\n sorted(top_models.items(),\n key=lambda x:x[1]['internal_cv_score'],\n reverse=self.greater_score_is_better))\n scores = {model: self.top_models[model]['internal_cv_score']\\\n for model in self.top_models}\n self.top_models_scores = OrderedDict(sorted(\n scores.items(), key=lambda x: x[1],\n reverse=self.greater_score_is_better))\n if return_scores:\n return self.top_models_scores\n else:\n return self.top_models", "def get_top_model(self):\n model = ModelHelper.load_model(filename_weight=self.top_model_path + '.h5',\n filename_model=self.top_model_path + '.json')\n\n return model", "def top(self):\r\n return self.topele", "def top(self):", "def get_best_model(self):\n return self.best_model", "def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)", "def top(self, **kwargs):\n return self.client.api.top(self.id, **kwargs)", "def get_top(self, model, limit=10, inverted=False):\n content_type= ContentType.objects.get_for_model(model)\n\n #Get a queryset of all the objects of the model. Get their scores\n results = self.filter(content_type=content_type).values('object_id').annotate(score=Sum('vote'))\n if inverted:\n results = results.order_by('score')\n else:\n results = results.order_by('-score')\n\n #We have a iterable list of objects of the requested model and their respective scores\n # Use in_bulk() to avoid O(limit) db hits.\n class_name = content_type.model_class()\n objects = class_name.objects.in_bulk([item['object_id'] for item in results[:limit]])\n\n # Yield each object, score pair. Because of the lazy nature of generic\n # relations, missing objects are silently ignored.\n\n for item in results[:limit]:\n id, score = item['object_id'], item['score']\n\n if not score:\n continue\n\n if int(id) in objects:\n yield objects[int(id)], int(score)", "def top(self):\n return self[0]", "def get_best_model_configs(self):\n self.best_models = {}\n with self.database:\n cur = self.database.cursor()\n for model in self.active_models:\n if self.tuning_depth == 'minimal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results\")\n elif self.tuning_depth == 'normal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n elif self.tuning_depth == 'maximal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n # TODO not implimented, same as normal\n self.best_models[model] = list(a)[0][0]", "def get_latest_model():\n return get_models()[-1]", "def top(self):\n return self.List_store[len(self.List_store)]", "def top(self) -> Optional[FloatObject]:\n return self.get(\"/Top\", None)", "def get_best_known_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='total_score', desc=False)", "def top(self):\n return self.q1.return_top()", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def top(self, **kwargs) -> Dict[str, Any]:", "def select_top_predictions(self, predictions):\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def select_top_predictions(self, predictions):\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def select_top_predictions(self, predictions):\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def get_top_pages(model=None):\n return get_page_children(page=None, model=model)", "def top(self):\n return self._top", "def top(self):\n return self._top", "def get_model_topN_accuracies(self):\n\n accuracy = {}\n filename = self._get_data_filename(\"test_eval.json\")\n\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n accuracy[\"top1\"] = _format_float(100 * (1.0 - float(results[\"average_top1_error\"])))\n accuracy[\"top5\"] = _format_float(100 * (1.0 - float(results[\"average_top5_error\"])))\n\n return accuracy", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None", "def __get_top_with_detail(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productId', keep=\"first\")[\n :top]\n\n return result", "def top( self , i , j ):\n return self._get_top( i , j )" ]
[ "0.6821835", "0.6430581", "0.629004", "0.62063706", "0.6155155", "0.6126578", "0.5943312", "0.59303916", "0.59013474", "0.5836023", "0.5798655", "0.5795692", "0.56650555", "0.565281", "0.56480753", "0.5645917", "0.56216484", "0.55647224", "0.55647224", "0.55647224", "0.55510247", "0.55452687", "0.55452687", "0.5527156", "0.5524048", "0.5524048", "0.5524048", "0.54983", "0.5487157", "0.5482859" ]
0.8301511
0
Gets the current boost_on_errors
def _get_boost_on_errors(self): self._validate_boost_on_errors() if self.boost_on_errors == "auto": val = self._get_validation_strategy() if val.get("validation_type", "") == "custom": return False if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return False if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.boost_on_errors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_raise_on_error():\n global raise_on_error\n return raise_on_error", "def back_err(self):\n return self._derived_properties[\"bkgd_err\"]", "def get_errors(self):\n return {'loss': self.loss.data[0]}", "def getB(self):\n return self.error", "def getErrorHandler(self):\n pass", "def check_errors(self) -> None:\n # TODO check the manual for error codes & interpert them.\n return self.send(self.cmd.GET_GLOBALSTATUS_CURRENTERROR)", "def errors(self):\n return self.__errors", "def get_error_log(self) -> Any:\n return self.err", "def getErrors(self):\n return self.errors", "def error(self):\n return self['error']", "def failed_on(self):\n return self._failed_on", "def errors(self):\n return self._errors", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def error(self):\n return self._error", "def error(self):\n return self._error", "def error(self):\n return self._error", "def errorbars (self):\n return self._errorbars", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def get_error(self):\n return self.exc_info", "def getErrorLog(self):\n return _libsbml.SBMLValidator_getErrorLog(self)", "def validation_errors(self):\n return self._validation_errors", "def errors():\n return THE_LOGGER.errors", "def error(self) -> list:\n return self.__err", "def error(self):\n return self.get('error')", "def Errors(self):\n return self._get_attribute('errors')", "def on_failure(self):\n return self._on_failure", "def errors(self):\n raise NotImplementedError", "def geterr():\n return __errprof.state.copy()" ]
[ "0.6346581", "0.6290803", "0.61281234", "0.60806453", "0.59473616", "0.5876309", "0.585333", "0.5807219", "0.5801431", "0.5784405", "0.57784134", "0.576894", "0.57465225", "0.57465225", "0.57174766", "0.5715621", "0.5715621", "0.5715621", "0.5680192", "0.56616455", "0.5624751", "0.5611684", "0.55759263", "0.55685544", "0.55388784", "0.5533462", "0.55222183", "0.55069", "0.54646885", "0.54543144" ]
0.7447873
0
Gets the current kmeans_features
def _get_kmeans_features(self): self._validate_kmeans_features() if self.kmeans_features == "auto": if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return False if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.kmeans_features)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_features(self):\n return self._features", "def features(self):\n return self._features", "def features(self):\n\n return self._features", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def findK_centroids_average(self, features, clusters):\n\n class InnerFeatures:\n def __init__(self, kps, des, pos):\n self.kps = kps\n self.des = des\n self.pos = pos\n\n kmeans = KMeans(n_clusters=clusters)\n\n pts = np.array(features.pos)\n kps = np.array(features.kps)\n des = np.array(features.des)\n\n kmeans.fit(pts)\n m_clusters = np.array(kmeans.labels_.tolist())\n centers = np.array(kmeans.cluster_centers_)\n\n # KeyPoint(x,y,size) -required\n\n final_kps = []\n final_des = []\n final_pts = []\n\n for cluster in range(clusters):\n indices = np.where(m_clusters == cluster)\n cluster_kps_size = np.mean(np.array([x.size for x in kps[indices]]))\n cluster_des = des[indices]\n\n average_des = np.mean(cluster_des, axis=0)\n cluster_kps = cv2.KeyPoint(x=centers[cluster][0], y=centers[cluster][1], _size=cluster_kps_size)\n\n final_kps.append(cluster_kps)\n final_des.append(average_des)\n final_pts.append([centers[cluster][0], centers[cluster][1]])\n\n final_pts = np.array(final_pts)\n final_des = np.array(final_des)\n final_kps = np.array(final_kps)\n\n result = InnerFeatures(kps=final_kps, des=final_des, pos=final_pts)\n return result", "def features(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:\n return self._features", "def cluster_feature(feature_mat, k):\n whitened = whiten(feature_mat.transpose())\n centroid, distortion = kmeans(whitened, k)\n\n return centroid, distortion", "def get_features(self):\n return []", "def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features", "def get_cat3_features(self):\n return self.category3_features", "def features(self) -> Optional[pulumi.Input['ProvisionedClustersCommonPropertiesFeaturesArgs']]:\n return pulumi.get(self, \"features\")", "def matrix_features(self):\n return self._matrix_features", "def cluster_features(self):\n logger.info('Creating term-document matrix...')\n self._create_tdm()\n init_centroids = self.centroids_from_categories()\n\n # Cluster the features using specific centroids.\n logger.info('Clustering features...')\n self.kmeans = KMeans(init=init_centroids, n_init=1, max_iter=1, n_clusters=len(self.feature_categories))\n self.clusters = self.kmeans.fit_predict(self.tdm)\n\n # The feature vector maps key features (categories) to other features that occur in the same cluster.\n logger.info('Converting clusters to feature vectors...')\n feature_vectors = self.clusters_to_feature_vectors(category_features=list(self.feature_amenity_map.keys()))\n\n return feature_vectors", "def feature_matrix(self):\n return self._feat_matrix", "def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans", "def __kmeans(self, points):\n # Prepare initial centers using K-Means++ method.\n initial_centers = kmeans_plusplus_initializer(points, 10).initialize()\n # Create instance of K-Means algorithm with prepared centers.\n self.__kmeans_instance = kmeans(sample, initial_centers)\n # Run cluster analysis and obtain results.\n kmeans_instance.process()\n kclusters = kmeans_instance.get_clusters()\n kcenters = kmeans_instance.get_centers()\n return kclusters, kcenters", "def features(self):\n return self.shape[2]", "def get_feature_labels(self):\n return self.feature_labels", "def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels", "def get_other_features(self):\n return self.other_features", "def get_cat1_features(self):\n return self.category1_features", "def kmeans(points,n_clusters):\n # create kmeans object\n kmeans = KMeans(n_clusters=n_clusters)\n # fit kmeans object to data\n kmeans.fit(points)\n # print location of clusters learned by kmeans object\n print(kmeans.cluster_centers_)\n # save new clusters for chart\n y_km = kmeans.fit_predict(points)\n\n print('Clusters partition: ', Counter(y_km))\n \n return y_km, kmeans", "def kmeans(k, descriptor_list):\r\n kmeans = KMeans(n_clusters = k, n_init=10, verbose = 1) \r\n kmeans.fit(descriptor_list)\r\n visual_words = kmeans.cluster_centers_ \r\n return visual_words", "def get_clusters(self):\r\n\r\n return self.__clusters", "def features(self) -> List[np.ndarray]:\n return None", "def read_features(self):\r\n def unpack_keypoint(data):\r\n try:\r\n kpts = data['keypoints']\r\n desc = data['descriptors']\r\n keypoints = [cv.KeyPoint(x, y, _size, _angle, _response, int(_octave), int(_class_id))\r\n for x, y, _size, _angle, _response, _octave, _class_id in list(kpts)]\r\n return keypoints, np.array(desc)\r\n except(IndexError):\r\n return np.array([]), np.array([])\r\n try:\r\n data = np.load(self.features_path + self.id + \".npz\")\r\n self.keypoints, self.descriptors = unpack_keypoint(data)\r\n logging.info(f\"Existing features for {self.name} found in features directory.\")\r\n except FileNotFoundError:\r\n logging.info(f\"Features for {self.name} not found in {self.features_path}.\")", "def get_local_features(self, img):\n kp, des = self.fe.detectAndCompute(img, None)\n return kp, des", "def get_centroids(self) -> Dict[str, np.ndarray]:\n assert self._centroids != {}\n return self._centroids", "def features(self) -> List[Feature]:\n return self._features", "def findK_centroids_closest(self, features, clusters):\n\n class InnerFeatures:\n def __init__(self, kps, des, pos):\n self.kps = kps\n self.des = des\n self.pos = pos\n\n kmeans = KMeans(n_clusters=clusters)\n\n pts = np.array(features.pos)\n kps = np.array(features.kps)\n des = np.array(features.des)\n\n kmeans.fit(pts)\n m_clusters = kmeans.labels_.tolist()\n centers = np.array(kmeans.cluster_centers_)\n\n closest, _ = pairwise_distances_argmin_min(kmeans.cluster_centers_, pts)\n\n assert len(set(closest)) == clusters\n\n result = InnerFeatures(kps[closest], des[closest], pts[closest])\n return result" ]
[ "0.6775403", "0.6481033", "0.63745344", "0.63635355", "0.62884897", "0.62546325", "0.612313", "0.6063174", "0.5997075", "0.5957198", "0.5954714", "0.5915384", "0.58808947", "0.58574826", "0.58393997", "0.58329165", "0.5800852", "0.57675624", "0.57581013", "0.57575", "0.5742533", "0.5737699", "0.5731866", "0.57173544", "0.5700815", "0.56965566", "0.5691166", "0.5670271", "0.56661594", "0.5664207" ]
0.7356534
0
Gets the current max_single_prediction_time
def _get_max_single_prediction_time(self): self._validate_max_single_prediction_time() if self.max_single_prediction_time is None: if self._get_mode() == "Perform": return 0.5 # prediction time should be under 0.5 second return None else: return deepcopy(self.max_single_prediction_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_simulated_time(self):\n\n query = \"SELECT MAX(time) FROM patient_signal_values\"\n\n return self.mysql_obj.fetch_value(query)", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def max_time(self):\n return self._max_time", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))", "def max_time(self):\n return self._ll_tree_sequence.get_max_time()", "def getMaxSimTime(self):\n return self.max_simsecs_value", "def _get_detection_time_multiplier(self):\n return self.__detection_time_multiplier", "def get_inference_time(self):\n return self._engine.get_inference_time()", "def getDefaultTime(self):\n return max(tvp[0] for tvp in self.timeValuePairs)", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def last_tick_time(self):\n return self.last_tick_", "def _get_max_t(self):\n\n return max([\n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])", "def time_to_target_training(self) -> str:\r\n # TODO: Figure out how to implement this.\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"time_to_target_training\"))\r\n return self._training_modes[0]", "def _get_last_meas_time(self):\n\n #if flag for whole data regeneration is set\n if self._process_type == 'full_gen':\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n \n \n res = self._db.Query(\"\"\"SELECT last_measurement_time\n FROM last_dashboard_element_segment_value\n WHERE\n element_id = %s\n AND segment_value_id = %s\n \"\"\",(self._id, self._segment_value_id))\n if not res:\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n item = self._db.record[0]\n if item['last_measurement_time']:\n return item['last_measurement_time']\n return datetime.datetime(1900, 1, 1, 0, 0, 0)", "def getLastestTime(self):\n if not self.cache_times:\n return None\n return self.cache_times[-1]", "def get_last(self):\n self.accumulated_time_last = pg.time.get_ticks() - self.start_time_last\n return self.accumulated_time_last", "def max_retire_time(self):\n return self._max_retire_time", "def _get_max_t(self):\n \"\"\"\n if hasattr(self,'k_of_t'):\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n self.k_of_t[-1][0],\n ])\n else:\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])\n \"\"\"\n return self.t_max", "def get_max_end_time(self):\n max_end_time = 1.\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n end_time = timestamps_dset[-1]\n if end_time > max_end_time: max_end_time = end_time\n file.close()\n return max_end_time", "def get_time(self) -> float:\n raise NotImplementedError()", "def get_last_time(self):\n \n return self._last", "def max_delay_time(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"max_delay_time\")", "def computation_time(self) -> float:\r\n if self._computation_times is None:\r\n return None\r\n else:\r\n return self._computation_times[0]", "def max_time(self) -> str:\n return self._max_time", "def last_time(self) -> datetime:\n return self.activities[-1].timestamp", "def _get_max_suppress_time(self):\n return self.__max_suppress_time", "def get_time(self):\n return self.get_timed() / 10.0", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def get_time(self):\n clock = self.pipeline.get_clock()\n tm = clock.get_internal_time()\n return tm / 1.e9" ]
[ "0.713428", "0.7000223", "0.6750113", "0.67000544", "0.66799814", "0.664459", "0.6637974", "0.65101796", "0.64204127", "0.63254064", "0.63026834", "0.62825453", "0.62509656", "0.6248696", "0.6228762", "0.6212105", "0.61944056", "0.6155345", "0.614388", "0.61273146", "0.6119531", "0.6035327", "0.60263294", "0.6002067", "0.60004765", "0.5997111", "0.59815276", "0.5949207", "0.5942173", "0.5938999" ]
0.903874
0
Gets the current optuna_time_budget
def _get_optuna_time_budget(self): self._validate_optuna_time_budget() if self.optuna_time_budget is None: if self._get_mode() == "Optuna": return 3600 return None else: if self._get_mode() != "Optuna": # use only for mode Optuna return None return deepcopy(self.optuna_time_budget)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def budget(self):\n return self._budget", "def last_optime(self):\n return self._last_optime", "def adoption_time(self):\n return self._adoption_time", "def get_time_step_to_enqueue(self):\n return self.time_step_to_enqueue", "def _get_total_time_limit(self):\n self._validate_total_time_limit()\n if self._get_mode() == \"Optuna\":\n return None # there no training limit for model in the Optuna mode\n # just train and be happy with super models :)\n return deepcopy(self.total_time_limit)", "def get_time(self):\n return self.run_command('get_time')[0]", "def get_current_time_lag_min(self):\n self.current_time_lag_min = self.get_timelag()[0] // 60", "def gettime(self):\n return self.t", "def getSelectedShowtime(self):\n\n cur = self.current()\n if cur < 0:\n return None\n else:\n return self.theater.showtimes(self.showtimeIds[cur])", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def get_t(self):\n return self.t", "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time", "def free_flight_time(self):\n return self._free_flight_time", "def dep_time(self):\n return self._dep_time", "def get_current_timeout(cls):\n return cls.current().get_timeout()", "def get(self):\n now = datetime.datetime.utcnow()\n if now > self.time_of_next_update:\n self._update_value()\n return self.value", "def get_budget(self, category: BudgetCategory) -> Budget:\n return self.budgets.get(category, None)", "def brasilia_time():\n brasilia_time = pd.Timestamp.now('UTC') - pd.Timedelta(hours=3)\n return brasilia_time", "def getTime(self):\n return self.time", "def current_period(self):\n return self._current_period", "def get_time(self):\n return self._total_time", "def get_last(self):\n self.accumulated_time_last = pg.time.get_ticks() - self.start_time_last\n return self.accumulated_time_last", "def time(self, k):\n \n it = Historial.__getitem__(self, k)\n if it != None:\n return it[0]\n else:\n return None", "def getTime(self) -> float:\n return self.t", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def get_time(self):\n return self.time_param", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def solution_meeting_time(self):\n for s in self.vars_meeting_time:\n if self.vars_meeting_time[s].solution_value():\n return s\n return None" ]
[ "0.6425248", "0.6043277", "0.5918131", "0.57312304", "0.56376636", "0.55601627", "0.55194855", "0.54705125", "0.5403898", "0.53979033", "0.539061", "0.5377435", "0.53700995", "0.53586614", "0.53013504", "0.52734435", "0.52604705", "0.524631", "0.5189846", "0.5182123", "0.51480925", "0.51345056", "0.5129658", "0.51287425", "0.51148325", "0.51055187", "0.51004833", "0.5093716", "0.5093716", "0.50905025" ]
0.8460654
0
Gets the current optuna_init_params
def _get_optuna_init_params(self): self._validate_optuna_init_params() if self._get_mode() != "Optuna": # use only for mode Optuna return {} return deepcopy(self.optuna_init_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getInitParams(self):\n return {}", "def _get_current_training_params(self):\n params = {}\n params[\"lyap_relu_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.lyapunov_relu.state_dict())\n if not self.R_options.fixed_R:\n params[\"R_params\"] = self.R_options._variables.clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n params[\"controller_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.system.controller_network.\n state_dict())\n return params", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['p'] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['p'] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['p'] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def initial_parameters(self):\n return self._initial_parameters", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low'] = self.low\n paramDict['alpha'] = self.alpha\n paramDict['beta'] = self.beta\n return paramDict", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['base'] = self.base\n return paramDict", "def getInitParams(self):\n paramDict = {}\n paramDict['upperBoundUsed' ] = self.upperBoundUsed\n paramDict['lowerBoundUsed' ] = self.lowerBoundUsed\n paramDict['hasInfiniteBound'] = self.hasInfiniteBound\n paramDict['upperBound' ] = self.upperBound\n paramDict['lowerBound' ] = self.lowerBound\n paramDict['adjustmentType' ] = self.__adjustmentType\n paramDict['dimensionality' ] = self.dimensionality\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def load_params(self):\n return self.params" ]
[ "0.67133015", "0.6513982", "0.64959013", "0.64958394", "0.644193", "0.644193", "0.64085335", "0.6405476", "0.6403037", "0.63784325", "0.63532674", "0.63532674", "0.63532674", "0.63532674", "0.6348272", "0.6348272", "0.6348272", "0.6348272", "0.6346435", "0.6290466", "0.62406987", "0.6235463", "0.61874795", "0.6179658", "0.6179658", "0.61690176", "0.6144498", "0.6127659", "0.6100183", "0.6099213" ]
0.87101316
0
Gets the current optuna_verbose
def _get_optuna_verbose(self): self._validate_optuna_verbose() # use only for mode Optuna if self._get_mode() != "Optuna": return True return deepcopy(self.optuna_verbose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_verbose(self):\n self._validate_verbose()\n return deepcopy(self.verbose)", "def verbose(self):\n return self.conf.get(\"verbose\")", "def verbose():\n return _verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def getVerbose(self):\n return self.__VERBOSE", "def verbose(self):\n\n return self._verbose", "def isVerbose(self):\n return self.opts.verbose", "def _verbose(self):\n return self._toBool(os.environ.get('VERBOSE', 0))", "def is_verbose():\n return g_verbose", "def verbose( self ):\n return Verbose.__level", "def verbose():\n return Verbose.level()", "def is_verbose() -> bool:\n return VERBOSE", "def verbosity(self):\n return self._get('verbosity')", "def verbose(self):\n enabled = self.lib.iperf_get_verbose(self._test)\n\n if enabled:\n self._verbose = True\n else:\n self._verbose = False\n\n return self._verbose", "def verbosity(self):\n return self._verbosity", "def verbose_str(self):\n return self.summary.verbose(self.results) or ''", "def verbose(self):\n verbose = self.__class__.__name__ + \", alpha: \" + str(self.alpha)\n return verbose", "def verbose():\n GLOBAL['VERBOSE'] = True", "def verbosePref(self):\n # If the level of the object is below the Preference level,\n # recursively calls base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(VERBOSE_PREF, self._verbose_pref.level)[0]", "def enable_verbose(self):\n self.verbose = True", "def verbose(value=None):\n global verbosity\n\n if value != None:\n verbosity = value\n \n try:\n rval = verbosity\n except NameError:\n verbosity = False\n rval = verbosity\n\n return rval", "def _set_verbose(value):\n global VERBOSE\n VERBOSE = value", "def gnupg_verbose():\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n return [\"--verbose\"]\n\n return [\"-q\"]", "def verbosity_for_session(request):\n return request.config.getoption(\"--verbosity-project\")", "def tunnel1_log_options(self) -> pulumi.Output['outputs.VpnConnectionTunnel1LogOptions']:\n return pulumi.get(self, \"tunnel1_log_options\")", "def option(self):\r\n return conf.lib.clang_getDiagnosticOption(self, None)", "def set_verbose(self, v):\n self._verbose = bool(v)" ]
[ "0.78699535", "0.7726708", "0.7537154", "0.746297", "0.746297", "0.746297", "0.746297", "0.7356259", "0.73098314", "0.7288619", "0.71457535", "0.70966977", "0.7072135", "0.70582813", "0.66449106", "0.65642446", "0.65546435", "0.650735", "0.6440845", "0.6396217", "0.63302183", "0.6306379", "0.6233047", "0.6164169", "0.6089423", "0.607687", "0.60681003", "0.6021105", "0.6017878", "0.599446" ]
0.8742914
0
Gets the current n_jobs
def _get_n_jobs(self): self._validate_n_jobs() return deepcopy(self.n_jobs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_n_jobs(self):\n return self.n_jobs", "def effective_n_jobs(n_jobs=-1):\n if n_jobs == 1:\n return 1\n\n backend, backend_n_jobs = get_active_backend()\n if n_jobs is None:\n n_jobs = backend_n_jobs\n return backend.effective_n_jobs(n_jobs=n_jobs)", "def get_num_jobs(self):\n return str(self.num_jobs)", "def num_worker(self):\n return self.config.get(\"jobs\", 4)", "def _get_njobs_in_queue(self, username):", "def num_jobs(self):\n return self.jobs.qsize()", "def jobs(self):\n return self.get_jobs()", "def check_n_jobs(n_jobs):\n # scikit-learn convention\n # https://scikit-learn.org/stable/glossary.html#term-n-jobs\n if n_jobs is None:\n return 1\n elif not is_int(n_jobs):\n raise ValueError(f\"`n_jobs` must be None or an integer, but found: {n_jobs}\")\n elif n_jobs < 0:\n return os.cpu_count()\n else:\n return n_jobs", "def __number_of_jobs__(self):\n # | - __number_of_jobs__\n num_jobs = 0\n\n # Regular jobs\n if self.job_var_lst is not None:\n num_jobs = len(self.job_var_lst)\n\n # Individual dir jobs\n if self.indiv_dir_lst is not None:\n num_jobs += len(self.indiv_dir_lst)\n\n\n return(num_jobs)\n # __|", "def jobs(self):\n return self._jobs", "def jobserver_running_jobs():\n\n if _MakeJobServer._singleton is None:\n return '?'\n\n try:\n buf = array.array('i', [0])\n if fcntl.ioctl(_MakeJobServer._singleton.job_pipe[0], FIONREAD, buf) == 0:\n return _MakeJobServer._singleton.num_jobs - buf[0]\n except NotImplementedError:\n pass\n except OSError:\n pass\n\n return _MakeJobServer._singleton.num_jobs", "def check_n_jobs(n_jobs):\n if n_jobs is None:\n return 1\n elif not is_int(n_jobs):\n raise ValueError(f\"`n_jobs` must be None or an integer, but found: {n_jobs}\")\n elif n_jobs < 0:\n return cpu_count() + n_jobs + 1\n else:\n return min(n_jobs,cpu_count())", "def numSubmitted(self):\n return len(self.__submittedJobs)", "def set_n_jobs(self, new_n_jobs=None):\n self.n_jobs = new_n_jobs", "def jobserver_max_jobs():\n\n if _MakeJobServer._singleton is not None:\n return _MakeJobServer._singleton.num_jobs\n else:\n return 0", "def jobs(self):\n raise NotImplementedError()", "def nworkers(self):\n return len(self._workers)", "def getnoofjobs(self):\n select_noofjobs = (\n \"select count(*) from public.jobs where latestjobversion=True \"\n \"and insertdate between Date(%s) and Date(%s) \"\n \"and (username not in (%s))\"\n )\n\n\n\n self.pgcursor.execute(select_noofjobs, (self.startdate, self.enddate, self.adminusers))\n\n noofjobs = 0\n count = self.pgcursor.fetchone()\n if count is not None:\n noofjobs = count[0]\n\n # print(str.format(\"total no of jobs: {0}\", noofjobs))\n return noofjobs", "def get_waiting_jobs(self):\n return []", "def __init__(self, n_jobs=1, verbose=True):\n self.n_jobs = n_jobs\n self.verbose = verbose", "def count(self):\n # no auth?\n return self.app.db.jobs.count()", "def get_njobs_in_queue(self, username=None):\n if username is None: username = getpass.getuser()\n njobs, process = self._get_njobs_in_queue(username=username)\n\n if process is not None and process.returncode != 0:\n # there's a problem talking to squeue server?\n err_msg = ('Error trying to get the number of jobs in the queue' +\n 'The error response reads:\\n {}'.format(process.stderr.read()))\n logger.critical(err_msg)\n\n if not isinstance(self, ShellAdapter):\n logger.info('The number of jobs currently in the queue is: {}'.format(njobs))\n\n return njobs", "def n_worker(self):\n return self.redis.pubsub_numsub(MSG)[0][-1]", "def _check_n_jobs(n_jobs):\n _check_type(n_jobs, (\"int\",), \"n_jobs\")\n if n_jobs <= 0:\n n_cores = mp.cpu_count()\n n_jobs_orig = n_jobs\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError(\n f\"If n_jobs has a non-positive value ({n_jobs_orig}), it must \"\n f\"not be less than the number of CPUs present ({n_cores}).\"\n )\n return n_jobs", "def get_ncores(self):\n return self._ncores", "def num_workers(self):\n return self._num_workers", "def getWorkers(self):\n return self.workers", "def get_n_workers(self):\n return self.df.worker.nunique()", "def get_num_parallel_workers():\n return _config.get_num_parallel_workers()", "def getJobID(self):\n return self.__nupicJobID" ]
[ "0.8941863", "0.80259633", "0.79868776", "0.78013074", "0.78005415", "0.7614501", "0.72230357", "0.71267", "0.70833606", "0.703076", "0.69810003", "0.69787186", "0.6843887", "0.6817876", "0.674785", "0.666808", "0.66524726", "0.6553903", "0.6538402", "0.65348506", "0.6515888", "0.651586", "0.6515622", "0.6509251", "0.64978224", "0.6491501", "0.64796466", "0.6465163", "0.64496046", "0.6389787" ]
0.84748805
1
Gets the current random_state
def _get_random_state(self): self._validate_random_state() return deepcopy(self.random_state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rand(self):\n return self.State.rand()", "def getstate(self):\n return (self.baseseed, self.counter, self.randbits_remaining)", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state" ]
[ "0.8123552", "0.7858806", "0.7808915", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387" ]
0.8489528
0
Gets the fairness metric
def _get_fairness_metric(self): self._validate_fairness_metric() if self.fairness_metric == "auto": if self._get_ml_task() == BINARY_CLASSIFICATION: return "demographic_parity_ratio" if self._get_ml_task() == REGRESSION: return "group_loss_ratio" if self._get_ml_task() == MULTICLASS_CLASSIFICATION: return "demographic_parity_ratio" else: return deepcopy(self.fairness_metric)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_fairness_threshold(self):\n if self.fairness_threshold == \"auto\":\n if self._get_ml_task() in [\n BINARY_CLASSIFICATION,\n MULTICLASS_CLASSIFICATION,\n ]:\n thresholds = {\n \"demographic_parity_difference\": 0.1,\n \"demographic_parity_ratio\": 0.8,\n \"equalized_odds_difference\": 0.1,\n \"equalized_odds_ratio\": 0.8,\n }\n return thresholds.get(self._fairness_metric, 0.8)\n elif self._get_ml_task() == REGRESSION:\n thresholds = {\n \"group_loss_ratio\": 0.8,\n }\n if self._fairness_metric == \"group_loss_difference\":\n raise AutoMLException(\n \"We can't set default fairness threshold value. Please set `fairness_threshold` value in AutoML constructor.\"\n )\n return thresholds.get(self._fairness_metric, 0.8)\n else:\n return deepcopy(self.fairness_threshold)", "def is_fair(self):\n fairness = Fairness(experience_weight=1)\n if fairness.is_fair(self):\n return 'This trade is fair!'\n return 'This trade is unfair!'", "def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def f(ns, k):\n ns = list(enumerate(sorted(ns)))\n n = len(ns)\n min_u = unfair(ns[:k])\n nns = ns[k:]\n def g(mu, n):\n (i, n) = n\n unfairness = n - ns[i - k + 1][1]\n # print(i, n, unfairness)\n if unfairness < mu:\n return unfairness\n return mu\n return reduce(g, nns, min_u)", "def fairness_discrepancy(data, n_classes, norm=0):\n unique, freq = np.unique(data, return_counts=True)\n props = freq / len(data) #Proportion of data that belongs to that data\n \n #------------------Modification to correct the zero support problem------------------------------------------------\n temp=np.zeros(n_classes)\n temp[unique]=props\n props=temp\n #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n l2_fair_d = np.sqrt(((props - truth)**2).sum())/n_classes\n l1_fair_d = abs(props - truth).sum()/n_classes\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes)/n_classes \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n #Create array to populate proportions\n # props2=np.zeros(n_classes)\n # props2[unique]=props\n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n for i in props:\n f.write(\"%f \"%(i))\n f.write(\"\\n\")\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity,wd/metric_max(n_classes,\"wd\"),wds/metric_max(n_classes,\"wds\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence", "def sampling_priority(self):\n # type: () -> Optional[NumericType]\n return self._metrics.get(SAMPLING_PRIORITY_KEY)", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def get_score(self):\r\n return None", "def get_serendipity_val(dic, key):\n # The key was in the training set\n try:\n return dic[key]\n # The key wasn't in the training set, then the serendipity is 1\n except KeyError:\n return 1.0", "def get_metric(self) -> mt.Metric:\n return mt.BinaryAccuracy()", "def best_metric(self) -> float:\n return self._best_metric", "def max_staleness(self) -> str:\n return pulumi.get(self, \"max_staleness\")", "def coherence(self):\r\n return np.abs(self.coherency) ** 2", "def calc_fair_profit(self, assignment):\n fair_profit = {t:0 for t in self.tasks}\n for agent, tasks in assignment.items():\n for task in tasks:\n fair_profit[task] += self.profit(agent, task)\n return min(fair_profit.values())", "def worst_score(self):\r\n pass", "def coherency(self):\r\n coherency = tsa.cache_to_coherency(self.cache, self.ij)\r\n\r\n return coherency", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def get_performance(self):\n if self.skip_reference:\n return self.compare_sim.tps\n\n # Avoid divide by zero errors when the simulation is not executed.\n if self.reference_sim.tps == 0:\n return 0\n\n t0 = 1 / self.reference_sim.tps\n t1 = 1 / self.compare_sim.tps\n return 1 / (t1 - t0)", "def performance_measure(self, x):\n # \"calculate performance measure\" \n pref = x.evaluate()\n return pref", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_thread_priority(self)", "def __evaluate(self, preds, labels, raw, fair, sort_by_unfairness=False,\n graph_prms=None):\n # Compute the distance from fair, then divide by fair to\n # compute the relative unfairness.\n diffs = (raw - fair) / fair\n if sort_by_unfairness:\n # Sort based on unfairness.\n diffs, indices = torch.sort(diffs)\n preds = preds[indices]\n labels = labels[indices]\n # Bucketize and compute bucket accuracies.\n num_samples = preds.size()[0]\n num_buckets = min(20 * (1 if sort_by_unfairness else 4), num_samples)\n num_per_bucket = math.floor(num_samples / num_buckets)\n assert num_per_bucket > 0, \\\n (\"There must be at least one sample per bucket, but there are \"\n f\"{num_samples} samples and only {num_buckets} buckets!\")\n # The resulting buckets are tuples of three values:\n # (x-axis value for bucket, number predicted correctly, total)\n buckets = [\n (x,\n self.check_output(preds_, labels_),\n preds_.size()[0])\n for x, preds_, labels_ in [\n # Each bucket is defined by a tuple of three values:\n # (x-axis value for bucket, predictions, ground truth labels).\n # The x-axis is the mean relative difference for this\n # bucket. A few values at the end may be discarded.\n (torch.mean(diffs[i:i + num_per_bucket]),\n preds[i:i + num_per_bucket],\n labels[i:i + num_per_bucket])\n for i in range(0, num_samples, num_per_bucket)]]\n if self.graph:\n assert graph_prms is not None, \\\n \"\\\"graph_prms\\\" must be a dict(), not None.\"\n assert \"flp\" in graph_prms, \"\\\"flp\\\" not in \\\"graph_prms\\\"!\"\n assert \"x_lim\" in graph_prms, \"\\\"x_lim\\\" not in \\\"graph_prms\\\"!\"\n # Plot each bucket's accuracy.\n pyplot.plot(\n ([x for x, _, _ in buckets]\n if sort_by_unfairness else list(range(len(buckets)))),\n [c / t for _, c, t in buckets], \"bo-\")\n pyplot.ylim((-0.1, 1.1))\n x_lim = graph_prms[\"x_lim\"]\n if x_lim is not None:\n pyplot.xlim(x_lim)\n pyplot.xlabel(\n \"Unfairness (fraction of fair)\"\n if sort_by_unfairness else \"Time\")\n pyplot.ylabel(\"Classification accuracy\")\n pyplot.tight_layout()\n pyplot.savefig(graph_prms[\"flp\"])\n pyplot.close()\n # Compute the overall accuracy.\n _, corrects, totals = zip(*buckets)\n acc = sum(corrects) / sum(totals)\n print(f\" Test accuracy: {acc * 100:.2f}%\")\n return acc", "def get_random_cpu_load():\n load = random.gauss(55, 10)\n if load < 0:\n return 0.0\n elif load > 100:\n return 100.0\n else:\n return round(load, 1)", "def best_value(self):\r\n return self._best_value", "def CountRandomLoadRate(self):\n\t\treturn self._get_attribute('countRandomLoadRate')", "def binary_fairness(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n task: Literal[\"demographic_parity\", \"equal_opportunity\", \"all\"] = \"all\",\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> Dict[str, torch.Tensor]:\n if task not in [\"demographic_parity\", \"equal_opportunity\", \"all\"]:\n raise ValueError(\n f\"Expected argument `task` to either be ``demographic_parity``,\"\n f\"``equal_opportunity`` or ``all`` but got {task}.\"\n )\n\n if task == \"demographic_parity\":\n if target is not None:\n rank_zero_warn(\"The task demographic_parity does not require a target.\", UserWarning)\n target = torch.zeros(preds.shape)\n\n num_groups = torch.unique(groups).shape[0]\n group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)\n\n transformed_group_stats = _groups_stat_transform(group_stats)\n\n if task == \"demographic_parity\":\n return _compute_binary_demographic_parity(**transformed_group_stats)\n\n if task == \"equal_opportunity\":\n return _compute_binary_equal_opportunity(**transformed_group_stats)\n\n if task == \"all\":\n return {\n **_compute_binary_demographic_parity(**transformed_group_stats),\n **_compute_binary_equal_opportunity(**transformed_group_stats),\n }\n return None", "def random_importance_function():\n return random()", "def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)" ]
[ "0.67016745", "0.5999203", "0.59884006", "0.58963954", "0.56670684", "0.5645477", "0.54838866", "0.5344908", "0.53396153", "0.5330606", "0.53292984", "0.5284739", "0.52752274", "0.5267313", "0.52610236", "0.5256685", "0.52457917", "0.52381456", "0.5232188", "0.52058727", "0.5198721", "0.5190431", "0.5178682", "0.517371", "0.5166543", "0.516368", "0.514149", "0.5099078", "0.5088327", "0.5085441" ]
0.68944013
0
Gets the fairness threshold
def _get_fairness_threshold(self): if self.fairness_threshold == "auto": if self._get_ml_task() in [ BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, ]: thresholds = { "demographic_parity_difference": 0.1, "demographic_parity_ratio": 0.8, "equalized_odds_difference": 0.1, "equalized_odds_ratio": 0.8, } return thresholds.get(self._fairness_metric, 0.8) elif self._get_ml_task() == REGRESSION: thresholds = { "group_loss_ratio": 0.8, } if self._fairness_metric == "group_loss_difference": raise AutoMLException( "We can't set default fairness threshold value. Please set `fairness_threshold` value in AutoML constructor." ) return thresholds.get(self._fairness_metric, 0.8) else: return deepcopy(self.fairness_threshold)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FastConvergenceThreshold(self):\n\t\treturn self._get_attribute('fastConvergenceThreshold')", "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def reward_threshold(self) -> Optional[float]:", "def get_performance_threshold(self):\n\n if Test.performance_params: return float(Test.performance_params[0])\n elif self._check_performance: return self._performance_threshold\n else: return None", "def _get_fairness_metric(self):\n self._validate_fairness_metric()\n if self.fairness_metric == \"auto\":\n if self._get_ml_task() == BINARY_CLASSIFICATION:\n return \"demographic_parity_ratio\"\n if self._get_ml_task() == REGRESSION:\n return \"group_loss_ratio\"\n if self._get_ml_task() == MULTICLASS_CLASSIFICATION:\n return \"demographic_parity_ratio\"\n else:\n return deepcopy(self.fairness_metric)", "def getThreshold(self): # real signature unknown; restored from __doc__\n pass", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def f(ns, k):\n ns = list(enumerate(sorted(ns)))\n n = len(ns)\n min_u = unfair(ns[:k])\n nns = ns[k:]\n def g(mu, n):\n (i, n) = n\n unfairness = n - ns[i - k + 1][1]\n # print(i, n, unfairness)\n if unfairness < mu:\n return unfairness\n return mu\n return reduce(g, nns, min_u)", "def threshold(self):\n return self._threshold", "def threshold(self):\n return self._threshold", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_thread_priority(self)", "def thresholdfactor(self):\n return self.__thresholdfactor", "def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result", "def is_fair(self):\n fairness = Fairness(experience_weight=1)\n if fairness.is_fair(self):\n return 'This trade is fair!'\n return 'This trade is unfair!'", "def get_wait_time(*args, threshold: float = 0.9, rate_limit_header: str = \"X-Shopify-Shop-Api-Call-Limit\"):\n # average load based on threshold\n mid_load = threshold / 2\n # find the requests.Response inside args list\n for arg in args:\n response = arg if isinstance(arg, requests.models.Response) else None\n # Get the rate_limits from response\n rate_limits = response.headers.get(rate_limit_header) if response else None\n # define current load from rate_limits\n if rate_limits:\n current_rate, max_rate_limit = rate_limits.split(\"/\")\n load = int(current_rate) / int(max_rate_limit)\n else:\n load = None\n # define wait_time based on load conditions\n if not load:\n # when there is no rate_limits from header, use the `sleep_on_unknown_load`\n wait_time = ShopifyRateLimiter.on_unknown_load\n elif load >= threshold:\n wait_time = ShopifyRateLimiter.on_high_load\n elif load >= mid_load:\n wait_time = ShopifyRateLimiter.on_mid_load\n elif load < mid_load:\n wait_time = ShopifyRateLimiter.on_low_load\n return wait_time", "def test_soft_threshold():\n assert snet.soft_threshold(10, 100) == 0\n assert snet.soft_threshold(-10, 100) == 0\n assert snet.soft_threshold(10, 3) == 7\n assert snet.soft_threshold(-10, 3) == -7", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def actualthreshold(self):\n return self._actualthreshold", "def max_staleness(self) -> str:\n return pulumi.get(self, \"max_staleness\")", "def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def get_threshold(self):\n confs = self.confidence[\"conf\"]\n\n return compute_minimum_kernel_density(confs)", "def get_threshold(self, cat):\n t = self.con.execute('select threshold from ct where category=\"%s\"'\n %(cat)).fetchone()\n \n if t is None:\n return 1.0\n \n return self.thresholds[cat]", "def active_thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_active_thread_priority(self)", "def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16", "def _find_threshold(self, feature, y_train, num_class):\n assert len(num_class) == 2, \"This function only assumes work with binary classification.\"\n best_threshold = 0.0\n max_exact_classification = 0.0\n is_positive_negative = False\n sorted_feature = sorted(np.unique(feature))\n for i in range(len(sorted_feature)-1):\n # assume the value less than threshold is negative (0), greater than threshold is positive (1)\n threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2\n left_partition = y_train[feature < threshold]\n right_partition = y_train[feature > threshold]\n negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))\n / len(feature))\n # assume the value less than threshold is positive (1), greater than threshold is negative. (0)\n positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))\n / len(feature))\n # make decision here\n is_positive_negative = positive_negative > negative_positive\n choose = positive_negative if is_positive_negative else negative_positive\n if max_exact_classification < choose:\n max_exact_classification = choose\n best_threshold = threshold\n return best_threshold, is_positive_negative", "def thread_priority(self):\n return _spacegrant_swig.general_burster_2_sptr_thread_priority(self)", "def _determine_threshold(threshold, clip_min=0.1, clip_max=0.9):\n if threshold != -1:\n return threshold\n\n path = os.path.join(os.path.dirname(cfg.predictions_path), 'thresholds.p')\n\n if not os.path.isfile(path):\n print('Warning: Defaulting to threshold of 0.5')\n return 0.5\n\n with open(path, 'rb') as f:\n thresholds = pickle.load(f)\n return np.clip(thresholds, clip_min, clip_max)", "def thread_priority(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_thread_priority(self)", "def find_optimal_threshold(self, hist):\n k = 256\n threshold = int(k / 2)\n lastexpected1 = lastexpected2 = 0\n\n while True:\n expected1 = expected2 = 0\n t_exp1 = sum(hist[:threshold])\n t_exp2 = sum(hist[threshold:])\n for i in range(threshold):\n expected1 += (hist[i] / t_exp1) * i\n\n for i in range(threshold, k):\n expected2 += (hist[i] / t_exp2) * i\n\n threshold = (expected1 + expected2) / 2\n if abs(expected1 - lastexpected1) != 0 and abs(expected2 - lastexpected2) != 0:\n break\n lastexpected1 = expected1\n lastexpected2 = expected2\n # print(expected2, expected1)\n return threshold", "def threshold_selection(prevalence, CostFP_minus_CostTN, CostFN_minus_CostTP, y, y_hat):\n fpr, tpr, thresholds = roc_curve(y, y_hat)\n m = ((1 - prevalence) / prevalence) * ((CostFP_minus_CostTN) / (CostFN_minus_CostTP))\n fm_thresholds = []\n for i in range(len(fpr)):\n fm = tpr[i] - (m * fpr[i])\n fm_thresholds.append((thresholds[i], fm))\n fm_thresholds = sorted(fm_thresholds, key=lambda fm_value: fm_value[1], reverse=True)\n return fm_thresholds[0][0]" ]
[ "0.6620242", "0.62111634", "0.604016", "0.60012925", "0.59756714", "0.5927428", "0.591201", "0.5910259", "0.5881854", "0.5881854", "0.5880676", "0.58554536", "0.5805405", "0.578478", "0.5761599", "0.5743415", "0.5742673", "0.56985474", "0.5683226", "0.56465083", "0.5641896", "0.5629673", "0.56252086", "0.5603558", "0.5587334", "0.5580204", "0.55372804", "0.5537269", "0.55172735", "0.5509283" ]
0.74255294
0
Gets privileged groups for fair training
def _get_privileged_groups(self): if self.privileged_groups == "auto": return [] else: return deepcopy(self.privileged_groups)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_underprivileged_groups(self):\n if self.underprivileged_groups == \"auto\":\n return []\n else:\n return deepcopy(self.underprivileged_groups)", "def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)", "def test_aws_service_api_security_groups_get(self):\n pass", "def get_pingroups(self):\n return self.groups[:]", "def getGroups():\r\n return Group.getGroups()", "def get_groups_using_malware():\n global groups_using_malware\n\n if not groups_using_malware:\n groups_using_malware = rsh.groups_using_malware(get_srcs())\n \n return groups_using_malware", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def granted_groups(self):\n return [\n g\n for g in Group.objects.filter()\n if ManagedObject.objects.filter(GroupAccess.Q(g) & Q(id=self.id)).exists()\n ]", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def available_groups(cls):\n raise NotImplementedError", "def get_security(self):\n users = find_root(self)['users']\n userids_and_groups = []\n for userid in self._groups:\n if userid in users:\n userids_and_groups.append({'userid':userid, 'groups':self.get_groups(userid)})\n return userids_and_groups", "def GetGroupMembers(self, group):\n return []", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def get_relevant_perm_groups(self):\n\n groups = Group.objects.filter(Q(name=\"everyone\") | Q(name=self.admin_group_name()) | Q(name=self.participants_group_name()))\n return groups", "def groups(self):\n return self.get_data(\"groups\")", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def get_groups_using_technique():\n global groups_using_technique\n\n if not groups_using_technique:\n groups_using_technique = rsh.groups_using_technique(get_srcs())\n \n return groups_using_technique", "def get_groups(self, username):\n return []", "def list_secgroups(self, name=None):", "def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm", "def _identify_groups_for_user(user):\n groups = []\n for group in user.groups.all():\n if group.name == 'WMT16' \\\n or group.name.lower().startswith('wmt') \\\n or group.name.startswith('eng2') \\\n or group.name.endswith('2eng'):\n continue\n \n if not group in groups:\n groups.append(group)\n \n return groups", "def get_groups():\n\n # FUTURE: Properly reutrn error, Mongo is giving it's own\n if current_user.groups:\n return Response(response=json.dumps([g.to_dict() for g in current_user.groups]), status=200, mimetype=\"application/json\")\n else:\n return return_json_error('No groups assigned to', 500)", "def groups(self):\r\n return users.Groups(self)", "def groups(self):\n return []", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups" ]
[ "0.7512295", "0.6589516", "0.65174836", "0.6486006", "0.63847315", "0.63510704", "0.63013774", "0.63013774", "0.62872154", "0.628441", "0.6190389", "0.61468303", "0.61449814", "0.61140037", "0.61140037", "0.6090746", "0.6036777", "0.60114354", "0.6006546", "0.59940183", "0.59654236", "0.59639245", "0.593845", "0.59331065", "0.59283227", "0.5925042", "0.5922163", "0.5920583", "0.59110636", "0.59110636" ]
0.7552889
0
Gets underprivileged groups for fair training
def _get_underprivileged_groups(self): if self.underprivileged_groups == "auto": return [] else: return deepcopy(self.underprivileged_groups)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_privileged_groups(self):\n if self.privileged_groups == \"auto\":\n return []\n else:\n return deepcopy(self.privileged_groups)", "def get_groups_using_malware():\n global groups_using_malware\n\n if not groups_using_malware:\n groups_using_malware = rsh.groups_using_malware(get_srcs())\n \n return groups_using_malware", "def test_aws_service_api_security_groups_get(self):\n pass", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def get_pingroups(self):\n return self.groups[:]", "def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def _identify_groups_for_user(user):\n groups = []\n for group in user.groups.all():\n if group.name == 'WMT16' \\\n or group.name.lower().startswith('wmt') \\\n or group.name.startswith('eng2') \\\n or group.name.endswith('2eng'):\n continue\n \n if not group in groups:\n groups.append(group)\n \n return groups", "def test_get_device_groups(self):\n pass", "def getGroups():\r\n return Group.getGroups()", "def get_groups_using_technique():\n global groups_using_technique\n\n if not groups_using_technique:\n groups_using_technique = rsh.groups_using_technique(get_srcs())\n \n return groups_using_technique", "def available_groups(cls):\n raise NotImplementedError", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def test_get_resource_group_by_moid(self):\n pass", "def groups(self):\n\n return ('train', 'dev', 'eval')", "def test_users_groups_get(self):\n pass", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def get_free_standins(group):", "def list_secgroups(self, name=None):", "def test_get_group(self):\n pass", "def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm", "def get_groups(self, username):\n return []", "def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)", "def test_api_v1_groups_get(self):\n pass", "def get_groups():\n\n # FUTURE: Properly reutrn error, Mongo is giving it's own\n if current_user.groups:\n return Response(response=json.dumps([g.to_dict() for g in current_user.groups]), status=200, mimetype=\"application/json\")\n else:\n return return_json_error('No groups assigned to', 500)", "def groups(self):\n return []", "def granted_groups(self):\n return [\n g\n for g in Group.objects.filter()\n if ManagedObject.objects.filter(GroupAccess.Q(g) & Q(id=self.id)).exists()\n ]", "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")" ]
[ "0.74663794", "0.6593482", "0.6520235", "0.65086764", "0.65086764", "0.6437184", "0.6431571", "0.6334094", "0.61589926", "0.6144109", "0.6134614", "0.6120121", "0.60976017", "0.60939455", "0.60939455", "0.6093541", "0.60102457", "0.60085875", "0.59747267", "0.5969062", "0.59649074", "0.5951997", "0.59271246", "0.5921322", "0.5905782", "0.5903327", "0.5902226", "0.5870792", "0.5867802", "0.58635926" ]
0.76907
0
Download log files from remote machines on local machine via ssh
def __download_via_ssh(cls, request, local_path): hosts = request.POST.getlist('hosts[]') logs = request.POST.getlist('logs[]') if not os.path.exists(local_path): os.makedirs(local_path) for host_name in hosts: host_object = Host.objects.get(host_name=host_name) host_path = os.path.join(local_path, host_name) if not os.path.exists(host_path): os.makedirs(host_path) for log_name in logs: log_object = Log.objects.get(log_name=log_name) help_methods.get_file_via_ssh( getattr(log_object, 'log_path'), host_path, getattr(host_object, 'host_name'), getattr(host_object, 'host_root_user'), getattr(host_object, 'host_root_password') )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PullLogs(ssh, log_files, download_folder):\n for log_file in log_files:\n target_file = os.path.join(download_folder, os.path.basename(log_file))\n ssh.ScpPullFile(log_file, target_file)\n _DisplayPullResult(download_folder)", "def ssh_download_files(data):\n with _ssh_connect() as ssh:\n with ssh.open_sftp() as sftp:\n with click.progressbar(data, label='downloads') as items: # noqa\n for item in items:\n _, filename = os.path.split(item)\n sftp.get(item, f'{DOWNLOAD_DIR}/{filename}')", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def download(cls, host, remotepath, localpath, user=None):\n # TODO: consider a noclobber option to backup existing files\n\n if not user:\n user = cls.user\n\n # run the command\n ssh = cls._get_ssh_connection(host, user)\n if not ssh:\n print \"ERROR: No ssh connection\"\n return False\n\n # TODO: catch exceptions thrown by SshMachine.download()\n ssh.download(remotepath, localpath)", "def pull_file():\n try:\n global IP_OR_HOST\n paramiko.util.log_to_file(BASE_DIR + '/logs/amazon_os.log')\n config = _get_hvm_config()\n key = paramiko.RSAKey.from_private_key_file(config.get('amazon_hvm').get('private_key_path'))\n transport = paramiko.Transport(IP_OR_HOST, 22)\n transport.connect(username=config.get('amazon_hvm').get('username'), pkey=key)\n sftp = paramiko.SFTPClient.from_transport(transport)\n p = sftp.put(BASE_DIR + '/logs/amazon_os.log', '/etc/test/amazon_os.log')\n # sftp.get('remove_path', 'local_path')\n transport.close()\n except Exception as e:\n transport.close()\n raise e\n else:\n return transport", "def _process_logs_download(self, logfile):\r\n\r\n print 'Downloading PCU logs'\r\n command = 'robot --outputdir \"C:\\Robot Framework\\Output\\PCU_logs\" {}.robot'.format(self.name)\r\n\r\n return self._run_command(command, logfile)", "def download_file(self):\n\n self.monitor.info(f'-> Started to download log file from: {self.url}...')\n try:\n log_file = requests.get(self.url, allow_redirects=True)\n\n postfix = f'{datetime.today().strftime(\"%Y_%m_%d\")}_{str(int(time.time()))}'\n filename = f\"log_file_{postfix}.txt\"\n\n self.monitor.info(f'-> Writing file to {LOG_FILES_PATH}...')\n open(LOG_FILES_PATH + '/' + filename, 'wb').write(log_file.content)\n self.monitor.info(f'-> Finished writing file to {LOG_FILES_PATH}.')\n\n file_path = self.log_file_path + '/' + filename\n\n return file_path, postfix\n\n except requests.exceptions.SSLError as connection_error:\n self.monitor.exception(f'-> Something bad happened. Details:\\n {repr(connection_error)}')\n return None, None", "def sync_log(self):\r\n print('Synchronizing log files...')\r\n\r\n # Connect with SSH-PubKey and synchronize files\r\n subprocess.run(\r\n ['scp',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}:/home/robot/.bin/*_tmux.log'.format(self.settings['ip']),\r\n self.log_path\r\n ])\r\n\r\n print('Done.')", "def remote_fetch(ip_addr, username, cmd):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n ssh.connect(ip_addr, username=username)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n return stdout.readlines()", "def Logs():\n # time.sleep(100)\n params = request.get_json()\n hostname = params.get('vaultip', '164.99.91.35')\n password = params.get('boxpass', 'novell')\n username = params.get('boxusername', 'root')\n port = 22\n logType = 'download'\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=hostname, username=username, password=password, port=port)\n\n try:\n print('configure')\n logType= 'configure'\n sftp = client.open_sftp()\n stdin = sftp.open('/var/opt/netiq/idm/log/idmconfigure.log','r')\n # sftp.close();\n except Exception:\n try:\n print('install')\n logType= 'install'\n # sftp = client.open_sftp()\n stdin = sftp.open('/var/opt/netiq/idm/log/idminstall.log','r')\n except Exception:\n #sftp.close()\n print('download')\n logType= 'download'\n try:\n stdin = sftp.open('/tmp/download.log','r')\n except Exception:\n sftp.close()\n return jsonify('no file'),200\n #sftp.close()\n log = stdin.readlines()\n data={'type':logType,'log':log}\n return jsonify(data),200", "def download(self, remotepath, localpath):\n sftp = self.connection.open_sftp()\n if isinstance(remotepath, str):\n sftp.get(remotepath, localpath)\n else:\n for path in remotepath:\n filename = os.path.split(path)[-1]\n sftp.get(path, localpath + \"/\" + filename)\n sftp.close()", "def sftp_download_latest_file(self, host, port, usr, pwd, remote, local=None, **kwargs):\n filefilter = kwargs.get('filter')\n with pysftp.Connection(host, username=usr, password=pwd, port=int(port)) as self.sftp:\n try:\n self.sftp.chdir(remote)\n self._log.debug('sftp walking to %s', remote)\n except (IOError, OSError):\n self._log.debug(\"sftp cd to dir '%s' failed!\", remote)\n\n sftp_curr_dir = self.sftp.getcwd()\n\n statfiles = list(\"%s/%s\" % (sftp_curr_dir, filename) for filename in self.sftp.listdir(sftp_curr_dir) if re.search(filefilter, filename))\n sorted_statfiles = list(sorted([filename for filename in statfiles], key=self.mtime))\n try:\n target_file = sorted_statfiles[-1]\n except (IndexError, NameError):\n self._log.debug(\"'%s' not found!\", filefilter)\n\n if local is None:\n local = os.getcwd()\n if '.' not in os.path.basename(local):\n local = os.path.join(local, target_file.split('/')[-1])\n if os.path.exists(os.path.split(local)[0]) is False:\n os.makedirs(os.path.split(local)[0])\n\n self.sftp.get(target_file, local)\n self.sftp.close()", "def download(self, output):\n self.wait()\n path = 'auditlogEntryReport/download'\n with open(output, 'w') as f:\n f.write(self._session.get(path))\n LOGGER.info('log downloaded: {}'.format(output))", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def download(server):\n for i in range(10):\n start_time = time.time()\n logging.debug('Start downloading: %d' % i)\n os.system(\"scp %s:18DOWNLOAD downloads/\" % server)\n end_time = time.time()\n logging.debug('End downloading...')\n logging.debug('Time taken by downloader: %s' % (end_time - start_time))", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):\n logf = ''\n logn = self._logcnt()\n logcodei = \"%put E3969440A681A24088859985\" + logn + \";\"\n logcodeo = \"\\nE3969440A681A24088859985\" + logn\n logcodeb = logcodeo.encode()\n\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" does not exist.\"}\n\n if valid == {}:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" is a directory.\"}\n\n if os.path.isdir(localfile):\n locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]\n else:\n locf = localfile\n\n try:\n fd = open(locf, 'wb')\n fd.write(b'write can fail even if open worked, as it turns out')\n fd.close()\n fd = open(locf, 'wb')\n except OSError as e:\n return {'Success' : False, \n 'LOG' : \"File \"+str(locf)+\" could not be opened or written to. Error was: \"+str(e)}\n\n code = \"filename _sp_updn '\"+remotefile+\"' recfm=F encoding=binary lrecl=4096;\"\n\n ll = self.submit(code, \"text\")\n logf = ll['LOG']\n\n self.stdin[0].send(b'tom says EOL=DNLOAD \\n')\n self.stdin[0].send(b'\\n'+logcodei.encode()+b'\\n'+b'tom says EOL='+logcodeb+b'\\n')\n\n done = False\n datar = b''\n bail = False\n\n while not done:\n while True:\n if os.name == 'nt':\n try:\n rc = self.pid.wait(0)\n self.pid = None\n self._sb.SASpid = None\n return {'Success' : False, \n 'LOG' : \"SAS process has terminated unexpectedly. RC from wait was: \"+str(rc)}\n except:\n pass\n else:\n rc = os.waitpid(self.pid, os.WNOHANG)\n if rc[1]:\n self.pid = None\n self._sb.SASpid = None\n return {'Success' : False, \n 'LOG' : \"SAS process has terminated unexpectedly. RC from wait was: \"+str(rc)}\n\n if bail:\n if datar.count(logcodeb) >= 1:\n break\n try:\n data = self.stdout[0].recv(4096)\n except (BlockingIOError):\n data = b''\n\n if len(data) > 0:\n datar += data\n if len(datar) > 8300:\n fd.write(datar[:8192])\n datar = datar[8192:]\n else:\n sleep(0.1)\n try:\n log = self.stderr[0].recv(4096).decode(self.sascfg.encoding, errors='replace')\n except (BlockingIOError):\n log = b''\n\n if len(log) > 0:\n logf += log\n if logf.count(logcodeo) >= 1:\n bail = True\n done = True\n\n fd.write(datar.rpartition(logcodeb)[0])\n fd.flush()\n fd.close()\n\n self._log += logf\n final = logf.partition(logcodei)\n z = final[0].rpartition(chr(10))\n prev = '%08d' % (self._log_cnt - 1)\n zz = z[0].rpartition(\"\\nE3969440A681A24088859985\" + prev +'\\n')\n logd = zz[2].replace(\";*\\';*\\\";*/;\", '')\n\n ll = self.submit(\"filename _sp_updn;\", 'text')\n logd += ll['LOG']\n \n return {'Success' : True, \n 'LOG' : logd}", "def get(host, username, remotepath, localpath=None, port=22):\n log = logging.getLogger('device.remotecall')\n log.info('geting file from remote:%s -> %s', remotepath, localpath)\n if not localpath:\n localpath = os.path.split(remotepath)[1]\n cmd = 'scp -P %s %s@%s:%s %s' % (port, username, host, remotepath, localpath)\n try:\n null = open('/dev/null', 'w')\n subprocess.call(shlex.split(cmd), stdin=subprocess.PIPE, stdout=null, stderr=null)\n null.close()\n except Exception as e:\n log.debug('Could not retrieve %s file from %s: Error %s', remotepath, host, e)", "def download_stewicombo_from_remote(name):\n meta = set_stewicombo_meta(name, category='')\n log.info(f'attempting download of {name} from {paths.remote_path}')\n download_from_remote(meta, paths)", "def GetAllLogFilePaths(ssh):\n ssh_cmd = [ssh.GetBaseCmd(constants.SSH_BIN), _FIND_LOG_FILE_CMD]\n log_files = []\n try:\n files_output = utils.CheckOutput(\" \".join(ssh_cmd), shell=True)\n log_files = FilterLogfiles(files_output.splitlines())\n except subprocess.CalledProcessError:\n logger.debug(\"The folder(%s) that running launch_cvd doesn't exist.\",\n constants.REMOTE_LOG_FOLDER)\n return log_files", "def collect_files_from_vm(self, hostname='localhost', username=None, password=None, src=None, dst=None):\n self._scp_get(hostname=hostname, username=username, password=password, srcfile=src, destfile=dst)", "def download_files(file_uris):\n\n if os.path.exists(LOG_FILE):\n log_file = open(LOG_FILE, \"rU+\")\n downloaded_podcasts = strip_newlines(log_file)\n else:\n log_file = open(LOG_FILE,\"w\")\n downloaded_podcasts = []\n\n for uri in file_uris:\n # if the current file URI is not found in the log, it is a new file, and\n # is thus downloaded\n if uri not in downloaded_podcasts:\n # extract filename from the URI \n uri_split = re.split(\"/\", uri)\n filename = uri_split[len(uri_split) - 1]\n \n # download the file\n if OUTPUT:\n print \"downloading \" + uri\n urllib.urlretrieve(uri, DEST_DIR + os.sep + filename)\n log_file.write(uri + os.linesep)\n\n log_file.close()", "def download_cloudtrail_logs(target_dir, bucket, cloudtrail_prefix, org_ids,\n account_ids, regions, from_date, to_date, parallelism):\n prefixes = _s3_key_prefixes(cloudtrail_prefix, org_ids, account_ids, regions, from_date, to_date)\n _s3_download_recursive(bucket, prefixes, target_dir, parallelism)", "def download_all_ftp(download_dir, file_match, ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory, max_wait=60):\r\n if max_wait < 0:\r\n max_wait = 0\r\n \r\n remove_old_ftp_downloads(download_dir)\r\n #open the file for writing in binary mode\r\n all_files_downloaded = []\r\n print 'Opening local file'\r\n time_start_connect_attempt = datetime.datetime.utcnow()\r\n request_incomplete = True\r\n ftp_exception = \"FTP Request Incomplete\"\r\n attempt_count = 1\r\n while ((datetime.datetime.utcnow()-time_start_connect_attempt)<datetime.timedelta(minutes=max_wait) \\\r\n or attempt_count == 1) and request_incomplete:\r\n try:\r\n #init FTPClient (moved here because of traffic issues)\r\n ftp_client = PyFTPclient(host=ftp_host,\r\n login=ftp_login,\r\n passwd=ftp_passwd,\r\n directory=ftp_directory)\r\n ftp_client.connect()\r\n file_list = ftp_client.ftp.nlst(file_match)\r\n ftp_client.ftp.quit()\r\n #if there is a file list and the request completed, it is a success\r\n if file_list:\r\n for dst_filename in file_list:\r\n local_path = os.path.join(download_dir, dst_filename)\r\n local_dir = local_path[:-1*len(FileExtension(local_path))-1]\r\n #download and unzip file\r\n try:\r\n #download from ftp site\r\n unzip_file = False\r\n if not os.path.exists(local_path) and not os.path.exists(local_dir):\r\n print \"Downloading from ftp site: \" + dst_filename\r\n unzip_file = ftp_client.download_file(dst_filename, local_path)\r\n else:\r\n print dst_filename + ' already exists. Skipping download ...'\r\n #extract from tar.gz\r\n if unzip_file:\r\n\t\t\t print \"Extracting: \" + dst_filename\r\n ExtractNested(local_path, True)\r\n #add successfully downloaded file to list\r\n all_files_downloaded.append(local_dir)\r\n #request successful when one file downloaded and extracted \r\n request_incomplete = False\r\n else:\r\n print dst_filename + ' already extracted. Skipping extraction ...'\r\n except Exception as ex:\r\n print ex\r\n if os.path.exists(local_path):\r\n os.remove(local_path)\r\n continue\r\n \r\n except Exception as ex:\r\n ftp_exception = ex\r\n pass\r\n \r\n if request_incomplete:\r\n print \"Attempt\", attempt_count, \"failed ...\"\r\n attempt_count += 1\r\n if max_wait > 0:\r\n sleep_time = 5.1\r\n if max_wait < 5.1:\r\n sleep_time = max(max_wait, 0.1)\r\n print \"Sleeping for\", (sleep_time-0.1), \"minutes and trying again ...\"\r\n time.sleep((sleep_time-0.1)*60)\r\n \r\n \r\n \r\n if request_incomplete:\r\n print \"Maximum wait time of\", max_wait, \"minutes exeeded and request still failed. Quitting ...\"\r\n raise Exception(ftp_exception)\r\n \r\n print \"All downloads completed!\"\r\n return all_files_downloaded", "def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()", "def download_file(self, remote_file):\n remote_file.download()", "def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def SelectLogFileToPull(ssh, file_name=None):\n log_files = GetAllLogFilePaths(ssh)\n if file_name:\n file_path = os.path.join(constants.REMOTE_LOG_FOLDER, file_name)\n if file_path in log_files:\n return [file_path]\n raise errors.CheckPathError(\"Can't find this log file(%s) from remote \"\n \"instance.\" % file_path)\n\n if len(log_files) == 1:\n return log_files\n\n if len(log_files) > 1:\n print(\"Multiple log files detected, choose any one to proceed:\")\n return utils.GetAnswerFromList(log_files, enable_choose_all=True)\n\n raise errors.CheckPathError(\"Can't find any log file in folder(%s) from \"\n \"remote instance.\" % constants.REMOTE_LOG_FOLDER)", "def cli_copy_pcc_logs(host_ip:str, linux_user:str, linux_password:str)->dict:\n try:\n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp pccserver:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/pccserver_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/pccserver_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/pccserver_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/pccserver_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/pccserver_logs/error.log\")\n cmd = \"sudo rm -rf /home/ceph/; sudo docker cp pccserver:/home/jobs/ceph /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/pccserver_logs/ceph\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/ceph/cluster/\",\"output/pccserver_logs/ceph/\")\n \n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp platina-executor:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/platina_executor_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/platina_executor_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/platina_executor_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/platina_executor_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/platina_executor_logs/error.log\")\n cmd = \"sudo rm -rf /home/kubernetes/; sudo docker cp platina-executor:/home/jobs/kubernetes /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/platina_executor_logs/kubernetes\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/kubernetes/cluster/\",\"output/platina_executor_logs/kubernetes/\")\n \n cmd = \"sudo rm -rf /output/logs\"\n os.system(cmd) \n \n return \"OK\"\n except Exception as e:\n return {\"Error\": str(e)}" ]
[ "0.73364204", "0.6596629", "0.6111099", "0.60842896", "0.6064838", "0.6026996", "0.60165", "0.59927064", "0.59570765", "0.59108406", "0.59068495", "0.58801275", "0.5834578", "0.57861924", "0.5749585", "0.573902", "0.5687385", "0.56273234", "0.5601198", "0.5537666", "0.55214125", "0.5501977", "0.5491566", "0.5452155", "0.5445011", "0.5423388", "0.5414817", "0.5411799", "0.54074377", "0.540065" ]
0.7495217
0
Make an hashable representation of an object for hashlib
def hashable(obj): return bytes(str(obj), "utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()", "def hash_obj(self, obj):\r\n md5er = hashlib.md5()\r\n update_hash(md5er, obj)\r\n return md5er.hexdigest()", "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def hash(obj, hash_name='md5', coerce_mmap=False):\n if 'numpy' in sys.modules:\n hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)\n else:\n hasher = Hasher(hash_name=hash_name)\n return hasher.hash(obj)", "def hash(obj, hash_name='md5', coerce_mmap=False):\r\n if 'numpy' in sys.modules:\r\n hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)\r\n else:\r\n hasher = Hasher(hash_name=hash_name)\r\n return hasher.hash(obj)", "def deep_hash(obj):\n pass", "def pickle_and_hash(obj: Any) -> str:\n try:\n s = dill.dumps(obj)\n except:\n raise UnpickleableError()\n\n return hashlib.sha512(s).hexdigest()", "def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def dict_hash(obj, start=''):\n h = hashlib.sha1(to_bytes(start))\n h.update(to_bytes(obj.__class__.__name__))\n if isinstance(obj, dict):\n for key, value in sorted(obj.items()):\n h.update(to_bytes(key))\n h.update(to_bytes(dict_hash(value)))\n elif isinstance(obj, (list, tuple)):\n for el in obj:\n h.update(to_bytes(dict_hash(el)))\n else:\n # basic types\n if isinstance(obj, bool):\n value = str(int(obj))\n elif isinstance(obj, (six.integer_types, float)):\n value = str(obj)\n elif isinstance(obj, (six.text_type, bytes)):\n value = obj\n elif obj is None:\n value = b''\n else:\n raise ValueError(\"Unsupported value type: %s\" % obj.__class__)\n h.update(to_bytes(value))\n return h.hexdigest()", "def hashcode(o):", "def hash_simple_obj_to_hex(obj):\n\n hash_ = sha256()\n try:\n update_hash(hash_, obj)\n except ValueError as e:\n raise ValueError(\"%s (full object was %r)\" % (e, obj))\n return hash_.hexdigest()", "def hash(self) -> bytes:", "def object_sha256(obj):\n\n return hashlib.sha256(json.dumps(obj).encode()).hexdigest()", "def hash_data(obj):\n collect = sha1()\n for text in bytes_iter(obj):\n if isinstance(text, six.text_type):\n text = text.encode('utf-8')\n collect.update(text)\n return collect.hexdigest()", "def structural_hash(obj: object) -> bytes:\n hasher = hashlib.blake2b()\n if isinstance(obj, (int, str, float, PurePath)):\n hasher.update(bytes(\"P\" + str(obj), \"utf-8\"))\n elif dataclasses.is_dataclass(obj):\n fields = dataclasses.fields(obj)\n hasher.update(bytes(f\"O{len(fields)}\\x20\", \"utf-8\"))\n for field in sorted(fields, key=lambda x: x.name):\n if not field.metadata.get(\"nohash\"):\n hasher.update(bytes(f\"F{len(field.name)}\\x20{field.name}\", \"utf-8\"))\n hasher.update(structural_hash(getattr(obj, field.name)))\n elif isinstance(obj, (collections.abc.Sequence, collections.abc.Set)):\n hasher.update(bytes(f\"L{len(obj)}\\x20\", \"utf-8\"))\n for member in obj:\n child_hash = structural_hash(member)\n hasher.update(bytes(f\"E{len(child_hash)}\\x20\", \"utf-8\"))\n hasher.update(child_hash)\n elif isinstance(obj, collections.abc.Mapping):\n hasher.update(bytes(f\"M{len(obj)}\\x20\", \"utf-8\"))\n for key, member in obj.items():\n child_hash = structural_hash(member)\n hasher.update(\n bytes(f\"E{len(key)}\\x20{key}\\x20{len(child_hash)}\\x20\", \"utf-8\")\n )\n hasher.update(child_hash)\n elif isinstance(obj, enum.Enum):\n hasher.update(bytes(str(obj), \"utf-8\"))\n elif obj is None:\n hasher.update(b\"N\")\n else:\n raise TypeError(\"Unhashable type\", obj)\n\n return hasher.digest()", "def hash(self, oid):\n data = self.family_name + self.name +\\\n self.date_of_birth + self.date_of_issue +\\\n self.date_of_expiry + self.issuing_country +\\\n self.issuing_authority + self.license_number +\\\n \"\".join(self.categories_of_vehicles) +\\\n str(self.number_of_entries)\n if oid == 'id-sha1':\n digest = hashes.Hash(hashes.SHA1(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha224':\n digest = hashes.Hash(hashes.SHA224(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha256':\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha384':\n digest = hashes.Hash(hashes.SHA384(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha512':\n digest = hashes.Hash(hashes.SHA512(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n else:\n print('ERROR: Hash algorithm not implemented.')\n sys.exit(1)", "def toHashable(self) -> str:\r\n\r\n return self.toHashBase().encode('utf-8')", "def __hash__(self):\n hashable = tuple(self.pandas_object.values.tobytes())\n if isinstance(self.pandas_object, pd.DataFrame):\n hashable += tuple(self.pandas_object.columns)\n else:\n hashable += tuple(self.pandas_object.name)\n return hash(hashable)", "def __hash__(self):\n return self.to_hash()", "def hash(self) -> str:\r\n ...", "def hash(space, w_object):\n return space.hash(w_object)", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def object_hash(obj):\n try:\n code = obj.__code__.co_code\n except AttributeError:\n attrlist = [getattr(obj, name) for name in dir(obj)\n if not name.startswith('__')]\n codelist = [attr.__code__.co_code for attr in attrlist\n if hasattr(attr, '__code__')]\n code = b','.join(codelist)\n digest = hashlib.md5(code).hexdigest()\n return digest", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def __hash__(self):\n return hash(self.hash)", "def object_sha1(obj):\n\n return hashlib.sha1(json.dumps(obj).encode()).hexdigest()", "def hashkey(obj, salt=0):\n if isinstance(obj, str):\n return zlib.adler32(obj.encode(), salt) & 0xffffffff\n elif isinstance(obj, bytes):\n return zlib.adler32(obj, salt) & 0xffffffff\n elif isinstance(obj, datetime_type):\n return zlib.adler32(str(obj).encode(), salt) & 0xffffffff\n return hash(obj) & 0xffffffff", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode" ]
[ "0.7611977", "0.73950815", "0.72174555", "0.72113204", "0.71887934", "0.7181167", "0.7116037", "0.7108791", "0.701039", "0.70093346", "0.6961172", "0.691401", "0.6912288", "0.6907212", "0.6874116", "0.683543", "0.6820438", "0.67326635", "0.67163974", "0.6707556", "0.66794527", "0.66767013", "0.6674996", "0.66376996", "0.66369027", "0.656375", "0.65088356", "0.6507814", "0.6507814", "0.6507814" ]
0.8125466
0
Ensure row data is valid This currently just checks that 2D arrays match the variable components.
def validate_row(row): subkeys = [INDEP, DEP] for subkey in subkeys: for k, v in row[subkey].items(): if v is None: continue if np.ndim(v) > 1: assert np.ndim(v) == 2 if 1 not in np.shape(v): assert isinstance(k, variable.Variable) assert k.components is not None assert len(k.components) in np.shape(v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_data(self, row, col, value):\n\n return True", "def check_row(row):\n \n if len(row) != _ncols:\n raise ValueError(\"Row contains {0} columns, expected {1}!\\n\\n{2}\\n\".format(len(row), _ncols, row))", "def validate(self, row):\n raise NotImplementedError", "def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1", "def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))", "def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid", "def _validateRowCol(self, rows, cols, numRow, numCol, dvName):\n if rows is not None:\n rowArr = np.array(rows)\n if np.max(rowArr) > numRow:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numRow)\n + \" rows and index up to \"\n + str(np.max(rowArr))\n + \" was specified: \"\n + str(rows)\n )\n if np.min(rowArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Row index less than 1 specified: \"\n + str(rows)\n )\n if len(rows) != len(set(rows)):\n # duplicates\n raise Error(\"Duplicate indices specified in the rows of design variable \" + dvName + \": \" + str(rows))\n\n if cols is not None:\n colArr = np.array(cols)\n if np.max(colArr) > numCol:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numCol)\n + \" cols and index up to \"\n + str(np.max(colArr))\n + \" was specified: \"\n + str(cols)\n )\n if np.min(colArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"col index less than 1 specified: \"\n + str(cols)\n )\n if len(cols) != len(set(cols)):\n # duplicates\n raise Error(\"Duplicate indices specified in the cols of design variable \" + dvName + \": \" + str(cols))", "def _validate_from_plain(data: Sequence[Sequence],\n columns: Sequence[str],\n dtypes: Sequence[str],\n row_wise: bool):\n\n if row_wise:\n # assert equal number of elements across rows\n row_lenghts = {len(row) for row in data}\n if len(row_lenghts) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"row. Please check provided input data\")\n\n # assert equal number of columns and elements per row\n row_lenghts.add(len(columns))\n if len(row_lenghts) > 1:\n raise ValueError(\n \"Number of columns has to equal the number of \"\n \"values per row. Please check column names and \"\n \"provided input data.\")\n\n # assert equal number of dtypes and elements per row\n row_lenghts.add(len(dtypes))\n if len(row_lenghts) > 1:\n raise ValueError(\"Number of dtypes has to equal the number of \"\n \"values per row. Please check dtypes and \"\n \"provided input data.\")\n\n else:\n # assert equal number of elements across columns\n col_lengths = {len(col) for col in data}\n if len(col_lengths) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"columns. Please check provided input data\")\n\n # assert equal number of columns in data, column names and dtypes\n col_count = len(columns)\n if col_count != len(data):\n raise ValueError(\"Input data and column names have different \"\n \"amount of columns. Please check provided \"\n \"input data\")\n\n if col_count != len(dtypes):\n raise ValueError(\"Input data and dtypes have different \"\n \"amount of columns. Please check provided \"\n \"input data\")", "def isValid(self):\n for ir in range(self.nRow): # Check rows for duplicates\n row = ir + 1\n vals = {}\n for ic in range(self.nCol):\n col = ic + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"doing row {row} at col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n \n for ic in range(self.nCol): # Check cols for duplicates\n col = ic + 1\n vals = {}\n for ir in range(self.nRow):\n row = ir + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"at row={row} doing col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n return True", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def _verify_integrity(self):\n if len(self.data.shape) != 1:\n raise ValueError(\n \"Data array must be one dimensional \"\n \"(is {})\".format(len(self.data.shape))\n )\n\n if len(self.shape.shape) != 2:\n raise ValueError(\n \"Shape array must be two dimensional \"\n \"(is {})\".format(len(self.shape.shape))\n )\n\n shape_size, data_size = self._cumsum[-1], self.data.size\n\n if not shape_size == data_size:\n raise ValueError(\n \"Size of data ({data_size}) does not match that \"\n \"of the given shapes ({shape_size}).\".format(\n data_size=data_size, shape_size=shape_size\n )\n )", "def verify_grid_row_data(self, row_data):\n return self.verify_grid_row_details(self.vendors_div_id, row_data)", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def validate_data(self):\n if self.type == 'grid':\n for layout in self.data:\n grid = layout.get('grid')\n if not grid:\n raise ChartError(\n \"Layout grid setting must be set \"\n \"if layout type is 'grid'\")\n\n if not grid.get('location'):\n raise ChartError(\n \"Layout grid location must be set \"\n \"if layout type is 'grid'\")\n\n if len(grid['location']) != 2:\n raise ChartError(\"Layout grid location length must be 2\")", "def is_rows_valid(bd):\n for row in rows:\n seen = []\n for num in nums:\n if bd[row[num]] == \" \":\n continue\n elif bd[row[num]] not in seen:\n seen += [bd[row[num]]]\n else:\n return False\n else:\n continue\n return True", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def _check_data_valid(self):\n\n is_valid = (sum(~np.isnan(self.data).flatten()) > 0 and self.data.flatten().sum() != 0)\n if not is_valid:\n raise FITSException(f\"No data in {self.survey}\")", "def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False", "def is_valid(array, index):\n row, column = index\n return 0 <= row < len(array) and 0 <= column < len(array[row])", "def __is_valid_row(self, row_number):\n return self.__is_valid((row_number, 0))", "def validate_dataset(columns, rows):\n # Ensure that all column identifier are zero or greater, unique, and smaller\n # than the column counter (if given)\n col_ids = set()\n for col in columns:\n if col.identifier < 0:\n raise ValueError('negative column identifier \\'' + str(col.identifier) + '\\'')\n elif col.identifier in col_ids:\n raise ValueError('duplicate column identifier \\'' + str(col.identifier) + '\\'')\n col_ids.add(col.identifier)\n # Ensure that all row identifier are zero or greater, unique, smaller than\n # the row counter (if given), and contain exactly one value for each column\n row_ids = set()\n for row in rows:\n if len(row.values) != len(columns):\n raise ValueError('schema violation for row \\'' + str(row.identifier) + '\\'')\n elif row.identifier < 0:\n raise ValueError('negative row identifier \\'' + str(row.identifier) + '\\'')\n elif row.identifier in row_ids:\n raise ValueError('duplicate row identifier \\'' + str(row.identifier) + '\\'')\n row_ids.add(row.identifier)\n return max(col_ids) if len(col_ids) > 0 else -1, max(row_ids) if len(row_ids) > 0 else -1", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def _validate_input_data(self):\n\n if type(self.data) in [np.ndarray, da.Array]:\n if not self.data.dtype.names:\n raise ValueError('QuadTree: numpy array provided for data, but no names were found, array must be a structured array')\n if 'x' not in self.data.dtype.names or 'y' not in self.data.dtype.names:\n raise ValueError('QuadTree: numpy structured array provided for data, but \"x\" or \"y\" not found in variable names')\n self.layernames = [self.rev_layer_lookup[var] for var in self.data.dtype.names if var in ['z', 'tvu']]\n elif type(self.data) == xr.Dataset:\n if 'x' not in self.data:\n raise ValueError('QuadTree: xarray Dataset provided for data, but \"x\" or \"y\" not found in variable names')\n if len(self.data.dims) > 1:\n raise ValueError('QuadTree: xarray Dataset provided for data, but found multiple dimensions, must be one dimensional: {}'.format(self.data.dims))\n self.layernames = [self.rev_layer_lookup[var] for var in self.data if var in ['z', 'tvu']]\n self._convert_dataset() # internally we just convert xarray dataset to numpy for ease of use\n else:\n raise ValueError('QuadTree: numpy structured array or dask array with \"x\" and \"y\" as variable must be provided')", "def validate_datasets(row):\n data_validator = DataJSONDataset(row)\n valid = data_validator.validate(validator_schema=row['validator_schema'])\n errors = data_validator.errors\n row['validation_errors'] = errors\n if not valid:\n logger.error(f'Error validating {row}: {errors}')", "def validate_dataset(self):\n pass", "def is_valid_row(self):\r\n return self.valid_row", "def _validate_level(self, levelText):\n if len([line for line in levelText.splitlines() if line.strip()]) != 6:\n # wrong num rows\n return False\n \n if any(len(list(line)) != 6 for line in levelText.splitlines() if line.strip()):\n # wrong num cols\n return False\n\n return True", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )" ]
[ "0.69890326", "0.6909129", "0.6653121", "0.6620799", "0.6608853", "0.6602894", "0.65480334", "0.6546582", "0.65440375", "0.6527276", "0.649017", "0.64542824", "0.6438385", "0.6417175", "0.6406108", "0.6311875", "0.6278405", "0.6260668", "0.6255064", "0.6238612", "0.62377685", "0.6222857", "0.62165743", "0.6214945", "0.62121654", "0.62018585", "0.6194205", "0.617039", "0.6169317", "0.61635673" ]
0.7375959
0
Add a dictionary of dependent data
def add_dict(self, indep, dep): dfull = {IND: len(self), INDEP: indep.copy(), DEP: dep} validate_row(dfull) check_objects(dfull) if settings.CONVERT_SCALAR_ARRAYS: scalarise(dfull) if settings.PRINT_UPDATES: print(self.show([dfull])) self.append(dfull) self._combine(dfull)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(\n self, indep, key=None, value=None, dep=None, keys=None, values=None, **kwargs\n ):\n if key is not None and value is not None:\n if isinstance(key, str):\n self.add_value(indep, key, value)\n elif isinstance(key, list):\n self.add_array(indep, key, value)\n elif keys is not None and values is not None:\n self.add_array(indep, keys, values)\n elif dep is not None:\n self.add_dict(indep, dep)\n elif isinstance(key, dict):\n self.add_dict(indep, key)\n elif len(kwargs) > 0:\n self.add_dict(indep, kwargs)", "def add_value(self, indep, key, value):\n self.add_dict(indep, {key: value})", "def add_data_paths(self, path_dict: dict):\n self.data_dict.update(path_dict)", "def add_array(self, indep, keys, values):\n if np.ndim(values) > 1:\n values = orient(values, keys)\n dep = {k: v for k, v in zip(keys, values)}\n self.add_dict(indep, dep)", "def add_data(self, v, m, x, pos=1):\n if x is not None:\n if v in self.variables:\n if m in self.models:\n self.data.update({self.__gen_key(m, v, pos): x})\n self.pos.update({self.__gen_key(m, v, pos): pos})\n else:\n pass\n else:\n pass\n else:\n pass", "def addData(self, d):\n self.__populateDict(self._data, d)", "def add_dependency(self, dep):\n \n if dep == OrderedDict(): return False\n dep_key, dep_dict = dep.popitem()\n graph_list = self.get_dependencies(dep_key, self.graph, list())\n if graph_list != None:\n if graph_list != list():\n for graph in graph_list:\n graph[dep_key] = dep_dict\n else:\n self.graph[dep_key] = dep_dict\n return True\n return False", "def test_addDict(self):\n lidi = []\n lidi.append({'term': 'foo', 'tags': 'a', 'value': '1'})\n lidi.append({'term': 'bar', 'tags': 'a, b', 'value': '2'})\n lidi.append({'term': 'gnark', 'tags': 'a, c', 'value': '3'})\n self.g.add_dict(lidi)", "def add_data(self,**kwargs):\n self.given_data.update([x for x in kwargs.keys() if kwargs[x]!=None ])\n for i in range(len(self.attr)):\n param=self.attr[i]\n if param in kwargs and kwargs[param]!=None:\n if i==0 and not (0 <= kwargs['angle'] <= 90) :# atribute is angle\n raise ValueError('Angle should be between 0 an 90 degrees')\n elif i==7 and not (0 <= kwargs[param] <= 1):\n raise ValueError('Coefficient (kf) should be between 0 and 1')\n else:\n self.data[param]=kwargs[param]\n print('Added data to object. See current data by using print(object_name) or using check_data method')", "def require_data(self, typename):\n self.required_data_products.add(typename)", "def require_data(self, typename):\r\n self.required_data_products.add(typename)", "def _add_var_dict(self, var_dict=None):\n # add variable dictionaries, supplementing anything missing\n # with the standards defined in the json\n standards = self._load_json()\n if var_dict is None:\n var_dict = standards\n else:\n for level in standards.keys():\n if level not in var_dict.keys():\n var_dict[level] = standards[level].copy()\n else:\n for key in standards[level].keys():\n if key not in var_dict[level].keys():\n var_dict[level][key] = standards[level][key]\n self._map_cols = var_dict['columns']\n self._map_codes = var_dict['key_codes']\n self._variable_dict = var_dict\n self._standards = standards", "def add(self,x,y):\n # assert that independent variable is as long as each of the\n # dependent variables\n for ch in self.chs:\n assert len(x) == len(y[ch-1])\n apply(Storage.add, (self,[x,y]))", "def add_data(self, data: List[dict]):\n raise NotImplementedError()", "def add_records(self, data: dict, execution_context: dict):", "def _add_dependency(self, dep):\n self.dependency.append(dep)", "def add_dependency(session, data, username='system_user'):\n session = validate_session(session)\n failed_count = 0\n for deps in data['data']:\n pkg_id = deps['toppatch_id']\n for dep in deps['dependencies']:\n dep_exists = session.query(LinuxPackageDependency).\\\n filter(LinuxPackageDependency.toppatch_id == pkg_id).\\\n filter(LinuxPackageDependency.dependency == dep).first()\n if not dep_exists:\n try:\n dep_add = LinuxPackageDependency(pkg_id, dep)\n session.add(dep_add)\n session.commit()\n except Exception as e:\n session.rollback()\n failed_count += 1", "def addDic(dic, elt):\n pass", "def add_terms_data(self, terms: Dict[datetime, List[dict]]):\n raise NotImplementedError()", "def addData(self,data):\n\t\tif isinstance(data,list):\n\t\t\tif isinstance(data[0],dict):\n\t\t\t\tself.data.extend(data)\n\t\t\telif isinstance(data[0],list):\t\n\t\t\t\tfor r in data:\n\t\t\t\t\tacc= dict()\n\t\t\t\t\tfor h in self.header:\n\t\t\t\t\t\tacc[h]=r[self.header.index(h)]\t\n\t\t\t\t\tself.data.append(acc) \n\t\t\telse:\n\t\t\t\tself.data.append(dict(zip(self.header,data)))\n\t\telif isinstance(data,dict):\n\t\t\tself.data.append(data)\n\t\telse:\n\t\t\traise datatools.WrongTypeError(data)", "def add(self, other):\n\n def merge_dicts(d1, d2):\n \"\"\"\n Merge two dictionaries\n\n param d1: dictionary changed in place to have combined values\n type d1: dictionary(key -> set)\n param d2: dictioanry to be merged\n type d2: dictionary(key -> set)\n \"\"\"\n for key,value in d2.items():\n if key not in d1:\n d1[key] = value\n else:\n d1[key] |= value\n \n self.num_documents += other.num_documents\n self.num_expressions += other.num_expressions\n self.global_expressions += other.global_expressions\n self.expressions_with_e += other.expressions_with_e\n self.num_keywords += other.num_keywords\n merge_dicts(self.missing_tags, other.missing_tags)\n merge_dicts(self.problem_files, other.problem_files)", "def _add_dictionary(self, current, added):\n for key in added:\n if key in current and isinstance(current[key], collections.Mapping):\n self._add_dictionary(current[key], added[key])\n else:\n current[key] = added[key]", "def add_depend(self, data):\n try:\n self._session.add(StepDependencyEntity(\n child_id=data['child_id'],\n parent_id=data['parent_id']\n ))\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True", "def learn(primer, dependencies):\n knowledge_map = defaultdict(dict)\n for row in primer:\n for dvcol, ivcol in dependencies.items():\n # knowledge of the dependent value is mapped to the value\n # of the independent value col\n #\n # notice:\n # - if the knowledge_map has no entry for the dv col,\n # a dict is constructed automatically\n # - the value of the iv col is used\n # - overwrites the previous known relationship\n knowledge_map[dvcol][row[ivcol]] = row[dvcol]\n return knowledge_map", "def add(self, data_def_request):\n logger.debug(\"Adding a data_def request {}\"\n .format(data_def_request.to_dict()))\n self._data_defs.append(data_def_request)", "def data_dict_add_total(data_dict, sum_args, feat_name):\n for key in data_dict:\n data_dict[key][feat_name] = 0\n for feat in sum_args:\n if data_dict[key][feat] != 'NaN':\n data_dict[key][feat_name] += data_dict[key][feat]", "def set_dependencies(self,dependency_list):\n\t\tdeps = {}\n\t\tfor relation in dependency_list:\n\t\t\tself.nr_of_deps += 1\n\t\t\t# Find the type of relation\n\t\t\trel = re.match('[a-z\\_]*(?=\\()',relation).group(0)\n\t\t\t# Find head and dependent\n\t\t\thead = int(re.search('(?<=-)[0-9]*(?=, )',relation).group(0))\n\t\t\tdep = int(re.search('(?<=-)[0-9]*(?=\\)$)', relation).group(0))\n\t\t\t# Set head position and create\n\t\t\t#dictinary entries\n\t\t\tif head == 0:\n\t\t\t\tself.head_pos = dep\n\t\t\telse:\n\t\t\t\tdeps[head] = deps.get(head,[])\n\t\t\t\tdeps[head].append([dep,rel])\n\t\t#set headpos to first head in dependency list if sentence has no head\n\t\tif dependency_list and not self.head_pos:\n\t\t\tfirst_head = int(re.search('(?<=-)[0-9]*(?=, )',dependency_list[0]).group(0))\n\t\t\tself.head_pos = first_head\n\t\treturn deps", "def __init__(self,**kwargs):\n self.attr = ['angle','width','height','m','Fg','Fs','Fd','kf','Ff']\n # attributes of the incline in order: angle,width,height, mass,Fg(gravity force),Fs(statical force), Fd (dynamical force),kf(friction coefficient), Ff(friction force)\n self.data = {param: None for param in self.attr}#initialazing data\n self.given_data = set() #set of data given by user\n self.add_data(**kwargs)", "def add_info():\r\n car_order_list = []\r\n order_info = {}\r\n\r\n order_type = get_order_type()\r\n customer_name = get_name()\r\n phone_number = get_phone_number()\r\n address = get_address()\r\n cars_on_order = get_order(car_order_list)\r\n subsidy_of_cars = total_car_subsidy(cars_on_order)\r\n order_subsidy = event_charge(order_type)\r\n total_subsidy = get_total_subsidy(subsidy_of_cars, order_subsidy)\r\n\r\n order_info[\"Name\"] = customer_name\r\n order_info[\"Phone number\"] = phone_number\r\n order_info[\"Address\"] = address\r\n order_info[\"Order type\"] = order_type\r\n order_info[\"Cars on order\"] = cars_on_order\r\n order_info[\"Subsidy of cars\"] = subsidy_of_cars\r\n order_info[\"Order subsidy\"] = order_subsidy\r\n order_info[\"Total Subsidy\"] = total_subsidy\r\n\r\n # order_dict[(len(order_dict)+1)] = order_info\r\n\r\n return order_info", "def add_features(data_dict, features_list):\n\n for name in data_dict:\n # add features for the log values of the financial data\n for feat in features_financial:\n try:\n data_dict[name][feat + '_log'] = math.log(data_dict[name][feat] + 1)\n except:\n data_dict[name][feat + '_log'] = 'NaN'\n\n # Add ratio of POI messages to total.\n try:\n total_messages = data_dict[name]['from_messages'] + data_dict[name]['to_messages']\n poi_related_messages = data_dict[name][\"from_poi_to_this_person\"] +\\\n data_dict[name][\"from_this_person_to_poi\"] +\\\n data_dict[name][\"shared_receipt_with_poi\"]\n poi_ratio = 1.* poi_related_messages / total_messages\n data_dict[name]['poi_ratio_messages'] = poi_ratio\n except:\n data_dict[name]['poi_ratio_messages'] = 'NaN'\n\n return data_dict" ]
[ "0.6675702", "0.64973605", "0.6014381", "0.60071164", "0.5916942", "0.58490884", "0.5743055", "0.57284117", "0.5649685", "0.5642036", "0.5622459", "0.5599008", "0.5532404", "0.55121297", "0.5506236", "0.55047566", "0.5473821", "0.5455027", "0.5451647", "0.5436743", "0.54222435", "0.53919524", "0.5380917", "0.53624856", "0.53478223", "0.5345641", "0.53385013", "0.53384936", "0.5333633", "0.53280425" ]
0.69769204
0
Return a list of dictionaries that only contain values for keys
def exclusively(self, keys, lst=None): minimal = self.minimal() if lst is None else lst def make_exclusive(d, keys): dct = {} for k in keys: if k in d: dct[k] = d[k] else: dct[k] = -999 return dct lst = [] for d in minimal: dct = make_exclusive(d, keys) if len(dct) > 0: lst.append(dct) return lst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_filter(indict, key_list):\n \n return dict((key, value) for key, value in list(indict.items()) if key in key_list)", "def _filter_keys(item, keys):\n return dict((k, v) for k, v in item.iteritems() if k in keys)", "def exclude(m, keys):\n return {k: v for k, v in m.items() if k not in keys}", "def _filter_keys(d: dict, keys: set) -> dict:\n return {key: d[key] for key in keys if key in d}", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n return [{key: value for key, value in dic.items() if key not in redundant_keys} for dic in data]", "def filter_record_keys(record_list, whitelist_keys):\n\n filtered = [{k: v for k, v in [y for y in list(x.items()) if y[0] in whitelist_keys]} for x in record_list]\n return filtered", "def _pick(d, keys):\n return {k: v for k, v in d.items() if k in keys}", "def dicts(self, value=None):\n if value is None:\n return [dict(zip(self.keys, line)) for line in self.data]\n return [dict(zip(self.keys, line)) for line in self.data if value in line]", "def select_keys(my_dict: Dict, keys: Sequence) -> Dict:\n keyset = set(keys)\n return {k: v for k, v in my_dict.items() if k in keyset}", "def filter_dic_by_keys(dic,allowed_keys):\n new_dic = {}\n for key in dic:\n if key in allowed_keys:\n new_dic[key] = dic[key]\n return new_dic", "def keepers(d: dict) -> dict:\n keep = {k: v for k, v in d.items() if v is not None}\n return keep", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def unique_dicts_by_value(d, key):\n return list({v[key]: v for v in d}.values())", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def subset_of_dict(dict, chosen_keys):\r\n return {key: value for key, value in dict.items() if key in chosen_keys}", "def dfilter(d: dict, *keys: Iterable, reverse=False) -> dict:\n return {k: v for k, v in d.items() if k in keys and not reverse or k not in keys and reverse}", "def filter_values(function, dictionary):\n return {k: v for k, v in dictionary.items() if function(v)}", "def remove_empty_values(_dict):\n return {k: v for k, v in list(_dict.items()) if v is not None}", "def clean_dict(dictionary):\n return {k: v for k, v in dictionary.items() if v}", "def select_features(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def remove_empty_list(dictionary):\n\n return {k: v for k, v in dictionary.items() if v != []}", "def clean_dict_values(d: dict, rogue_values: list) -> dict:\n return {key: value for key, value in d.items() if not value in rogue_values}", "def filter_by_keys(self, keys):\n return list(filter(lambda item: item.keyword in set(keys), self._metadata))", "def unique_dicts(d):\n return [dict(y) for y in set(tuple(x.items()) for x in d)]", "def remove_skip_values(self, data):\n return {\n key: value for key, value in data.items()\n if value not in self.__skipvalues\n }", "def filter_valid_values(dictionary):\n return ((key, value)\n for key, value in six.iteritems(dictionary)\n if value is not None)", "def dict_filter(d, keys, into=dict):\n \n if hasattr(keys, \"__call__\"):\n f = keys\n keys = filter(f, d.keys())\n return into(map(lambda k:(k,d[k]), keys))", "def exclude_keys(dictionary: Mapping, keys: Sequence[Hashable]) -> dict:\n return {k: v for k, v in dictionary.items() if k not in keys}", "def get_dicts(self, clean=False):\n return list(self.iter_dicts(clean=clean))" ]
[ "0.7157758", "0.6764101", "0.67270654", "0.6699573", "0.6676822", "0.6673541", "0.6655425", "0.66365665", "0.66270673", "0.6611581", "0.6571578", "0.65568775", "0.6545871", "0.6436229", "0.6432588", "0.6381771", "0.63771033", "0.6347461", "0.63027394", "0.62880474", "0.62869287", "0.6242114", "0.6226327", "0.6169943", "0.61549866", "0.61478144", "0.6137546", "0.61231184", "0.6122416", "0.61175287" ]
0.72772044
0
Merge this Box with one or more other Box instances
def merge(self, box, in_place=True): if in_place: self._merge(box) else: base = self.copy() base._merge(box) return base
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _merge(self, box_list):\n if isinstance(box_list, self.__class__):\n box_list = [box_list]\n for box in box_list:\n for row in box:\n row[IND] = len(self)\n self.append(row)\n self._combine(row)", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def union(self, other):\n self.vertices.extend(other.vertices)\n self.edges.extend(other.edges)\n self.faces.extend(other.faces)\n return self", "def union(one, other):\n left = min(one.left, other.left)\n right = max(one.right, other.right)\n top = min(one.top, other.top)\n bottom = max(one.bottom, other.bottom)\n return BBox([[left, top], [right, bottom]])", "def append(self, other):\n for i in other.blocks:\n self.blocks.append(i)", "def mergeWith(self, others):", "def merge(self, obj):\n pass", "def combine(self, existing):\n return self", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def merge(self):\n elems = []\n for x in self.elems:\n if isinstance(x, self.__class__):\n elems.extend(x.merge().elems)\n else:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)", "def merge(self, other):\n for p in other:\n for key, val in p.items():\n self.contents[key] = val\n\n return self", "def merge(self, other: \"BB\") -> \"BB\":\n cp_bb = lib.cpBBMerge(self, other)\n return BB(cp_bb.l, cp_bb.b, cp_bb.r, cp_bb.t)", "def union_boxes(boxes: List[\"Box\"]) -> \"Box\":\n left, top, right, bottom = float(\"inf\"), float(\"inf\"), float(\"-inf\"), float(\"-inf\")\n for box in boxes:\n l, t, r, b = box.coordinates\n left = min(left, l)\n top = min(top, t)\n right = max(right, r)\n bottom = max(bottom, b)\n return Box(left, top, right - left, bottom - top)", "def add_box(self, box):\n mz_from = box.from_mz\n mz_to = box.to_mz\n rt_from = box.from_rt\n rt_to = box.to_rt\n self.boxes_mz.addi(mz_from, mz_to, box)\n self.boxes_rt.addi(rt_from, rt_to, box)", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def __add__(self, other):\n mesh = deepcopy(self)\n mesh.MergeWith(other)\n return mesh", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def Add(self, *args):\n return _Bnd.Bnd_Box_Add(self, *args)", "def mergeBboxes(bboxes, bboxes_prev):\n bboxes_merged = deepcopy(bboxes)\n for bbox in bboxes_prev:\n is_exist = False\n for bbox_merged in bboxes_merged:\n if bbox.object_id == bbox_merged.object_id:\n is_exist = True\n bbox_merged.visible = bbox.visible\n break\n if not is_exist:\n bboxes_merged.append(bbox)\n return bboxes_merged", "def extend(self, other):\n # iterate through other deque\n for item in other:\n # if the current item's data is None\n if item is None:\n # that deque is empty, so we're done\n break\n # if other deque has items, push back current item and loop\n self.push_back(item)", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def merge(self,other):\n if self.empty: \n self.copy(other)\n return self\n elif other.empty:\n return self\n if(other.vmin < self.vmin):\n self.vmin = other.vmin\n if(other.vmax > self.vmax):\n self.vmax = other.vmax\n\n nA = float(self.vcount)\n nB = float(other.vcount)\n nAB = nA*nB\n nAA = float(self.vcountsq)\n nBB = float(other.vcountsq)\n nX = nA+nB\n nXX = nX**2 #nAA+nBB+2*nAB #nX**2 # actually (nA+nB)^2 = (nAA+nBB+2*nAB)\n nXXX = nXX*nX\n self.vcount = nX\n self.vcountsq = nXX\n\n self.vsum += other.vsum;\n\n # merge of mean and m2\n delta = other.vmean-self.vmean;\n delta2 = delta**2\n delta3 = delta**3\n delta4 = delta**4\n self.vmean += delta*nB/nA\n self.vm2 += other.vm2 + delta2*(nAB/nX)\n self.vm3 += other.vm3 + delta3*(nAB*(nA-nB))/nXX + 3*delta*(nA*other.vm2-nB*self.vm2)/nX\n self.vm4 += other.vm4 + delta4*(nAB*(nAA-nAB+nBB))/nXXX + 6*delta2*(nAA*other.vm2+nBB*self.vm2)/nXX + 4*delta*(nA*other.vm3-nB*self.vm3)/nX\n self.dirty = True\n return self", "def _merge(self):\n raise NotImplementedError", "def union_of_bboxes(height: int, width: int, bboxes: Sequence[BoxType], erosion_rate: float = 0.0) -> BoxType:\n x1, y1 = width, height\n x2, y2 = 0, 0\n for bbox in bboxes:\n x_min, y_min, x_max, y_max = bbox[:4]\n w, h = x_max - x_min, y_max - y_min\n lim_x1, lim_y1 = x_min + erosion_rate * w, y_min + erosion_rate * h\n lim_x2, lim_y2 = x_max - erosion_rate * w, y_max - erosion_rate * h\n x1, y1 = np.min([x1, lim_x1]), np.min([y1, lim_y1])\n x2, y2 = np.max([x2, lim_x2]), np.max([y2, lim_y2])\n return x1, y1, x2, y2", "def union(self, other): # -> BaseGeometry:\n ...", "def merge(self, other: 'Basket') -> None:\n for item in other:\n try:\n existing = self.items.get(ref=item.ref)\n existing.quantity += item.quantity\n existing.save(update_fields=['quantity'])\n except item.DoesNotExist:\n item.basket = self\n item.save(update_fields=['basket'])\n other.delete()\n self._cached_items = None", "def merge(self, other):\n\n for n in other.cfg_nodes:\n self.insert_cfgnode(n)\n\n for ins_addr, outs in other.out_branches.items():\n if ins_addr in self.out_branches:\n for stmt_idx, item in outs.items():\n if stmt_idx in self.out_branches[ins_addr]:\n self.out_branches[ins_addr][stmt_idx].merge(item)\n else:\n self.out_branches[ins_addr][stmt_idx] = item\n\n else:\n item = next(iter(outs.values()))\n self.out_branches[ins_addr][item.stmt_idx] = item" ]
[ "0.68560404", "0.6515574", "0.64632297", "0.63198626", "0.6211465", "0.62063205", "0.6064046", "0.5991924", "0.5917754", "0.5910379", "0.5859801", "0.58339703", "0.58313894", "0.57778674", "0.5742428", "0.57081246", "0.5687884", "0.56496143", "0.563442", "0.56266993", "0.5626118", "0.56121784", "0.5605912", "0.55999947", "0.558066", "0.5579901", "0.55539656", "0.55492246", "0.55463415", "0.5532292" ]
0.7087718
0
Return unique key values
def unique(self, key, lst=None): d = self.find(key, lst) vals = set(d.values()) return sorted(list(vals))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def distinct(self, key):\n return self.database.command({'distinct': self.name,\n 'key': key})['values']", "def _findUniqueMappingKeys(mapping):\n\n uniqueMappingKeys = set()\n for key, entries in viewitems(mapping):\n if len(entries) == 1:\n uniqueMappingKeys.add(key)\n return uniqueMappingKeys", "def unique_vals(client, proj, dataset, table, col_name):\n if not client.check_table(dataset, table):\n return []\n res = run_bq_query(client, \"SELECT %s FROM [%s:%s.%s] GROUP BY %s ORDER BY %s\" % (col_name, proj, dataset, table, col_name, col_name), 120)\n return [rec[col_name] for rec in res]", "def keys(self):\n return sorted(self._local_unique_map.keys())", "def _findUniqueMappingValues(mapping):\n uniqueMappingValues = set()\n for entries in viewvalues(mapping):\n if len(entries) == 1:\n uniqueMappingValues.update(entries)\n return uniqueMappingValues", "def unique(self, key: Callable[[T], Union[str, int, float]]=None) -> 'List[T]':\n return unique_values(self.array, key)", "def iunique(self, key: Callable[[T], Union[str, int, float]]=None) -> '_[T]':\n return _(unique_values(self.array, key))", "def uniq(val, key=None):\n if not isinstance(val, list):\n return val\n if key is None:\n try:\n return list(set(val))\n except TypeError:\n pass\n keys = []\n values = []\n for value in val:\n try:\n thiskey = value[key]\n except:\n thiskey = repr(value)\n if thiskey not in keys:\n keys.append(thiskey)\n values.append(value)\n return values", "def unique_rp(db):\n for rp in sorted(db['rp'].keys()):\n print(rp)", "def _unique(iterable):\n return list(dict.fromkeys(iterable))", "def unique_values(array: Iterable[T], key: Callable[[T], Union[str, int, float]]=None) -> List[T]:\n values = set()\n unique_array = []\n\n if key is None:\n for v in array:\n if v not in values:\n unique_array.append(v)\n values.add(v)\n else:\n for v in array:\n v_key = key(v)\n if v_key not in values:\n unique_array.append(v)\n values.add(v_key)\n\n return unique_array", "def _uniq( list ) : \r\n \r\n d = {} \r\n for e in list : \r\n d[e] = 1 \r\n \r\n return d.keys()", "def get_unique_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tcounts = {}\n\tmax_count = len(param_list)\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tcounts[k] = 1 + counts.get(k, 0)\n\tunique = []\n\t# now find out which keys are not shared\n\tfor k in counts:\n\t\tif counts[k] < max_count:\n\t\t\tunique.append(k)\n\tunique.sort()\n\treturn unique", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def keySet (self) -> StringSet:\n\n Logging.trace(\">>\")\n result = set(self._keyToValueMap.keys())\n Logging.trace(\"<<: %r\", result)\n return result", "def get_key_values(self):\n return self.key_values", "def unique_ssh_results(results):\n r = {}\n for k in results:\n r[results[k][0]] = True\n return r.keys()", "def get_values(self):\n return set(self._table.keys())", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def unique(self):\n # variables for uniques \n self._currentSet = 1\n self._uniqueValue = {}\n\n pd = self._dataTable\n for col in pd:\n arr = pd[col].unique()\n for i in arr:\n unique_entry = ((col,i),)\n self._uniqueValue[unique_entry] = 0 \n\n self._sets[self._currentSet] = self._uniqueValue", "def AllKeys(self) -> _n_0_t_1[str]:", "def get_unique_hstore_keys(\n session: 'Session',\n column: 'Column[dict[str, Any]]'\n) -> set[str]:\n\n base = session.query(column.keys()).with_entities( # type:ignore\n sqlalchemy.func.skeys(column).label('keys'))\n\n query = sqlalchemy.select(\n [sqlalchemy.func.array_agg(sqlalchemy.column('keys'))],\n distinct=True\n ).select_from(base.subquery())\n\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()", "def _uniq(self, lst):\n h = {}\n for e in lst:\n h[e] = 1\n return sorted(h.keys())", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)", "def get_unique(self):\n return self.serie.nunique()", "def get_unique_values(local_data, attr):\n\tvalues = []\n\tfor element in local_data:\n\t\tif element[attr] not in values:\n\t\t\tvalues.extend([element[attr]])\n\treturn values", "def find_uniq_preserve_order(orig_keys, orig_values=None):\n seen = {}\n keys = []\n values = []\n for i, item in enumerate(orig_keys):\n if item in seen:\n continue\n seen[item] = 1\n keys.append(item)\n if orig_values:\n values.append(orig_values[i])\n return keys, values", "def keys():", "def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))" ]
[ "0.72342044", "0.7174483", "0.68495494", "0.67468446", "0.66601634", "0.6650714", "0.6622684", "0.6563308", "0.64356095", "0.64118284", "0.6411825", "0.6408245", "0.6370248", "0.6339864", "0.6293389", "0.6292799", "0.62809783", "0.6236327", "0.62303406", "0.6222865", "0.61984926", "0.6180261", "0.61780614", "0.6175361", "0.6149021", "0.6135467", "0.6114433", "0.6101749", "0.60853934", "0.60821867" ]
0.7189072
1
The set methods must raise a ComponentsErrorEx in case of wrong mode
def test_wrong_mode(self): self.assertRaises(ComponentErrorsEx, self.dp.setRewindingMode, 'FOO')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modifyNotValuableComponents(self):\n # Nothing to do\n pass", "def set_comms_mode(self):", "def magic_xmode(self,parameter_s = ''):\n\n new_mode = parameter_s.strip().capitalize()\n try:\n self.InteractiveTB.set_mode(mode = new_mode)\n print 'Exception reporting mode:',self.InteractiveTB.mode\n except:\n warn('Error changing exception modes.\\n' + str(sys.exc_info()[1]))", "def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )", "async def test_set_swing_mode_bad_attr(opp):\n state = opp.states.get(ENTITY_CLIMATE)\n assert \"Off\" == state.attributes.get(ATTR_SWING_MODE)\n\n with pytest.raises(vol.Invalid):\n await common.async_set_swing_mode(opp, None, ENTITY_CLIMATE)\n await opp.async_block_till_done()\n\n state = opp.states.get(ENTITY_CLIMATE)\n assert \"Off\" == state.attributes.get(ATTR_SWING_MODE)", "def enable(self) -> None:", "def error(self):\n pass", "def test_mode_invalid(mode):\n # Test errors on construction\n with pytest.raises(TypeConversionError):\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5,\n mode=mode)\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5)\n gay_berne.params[('A', 'A')] = {'epsilon': 1, 'lpar': 0.5, 'lperp': 1.0}\n # Test errors on setting\n with pytest.raises(TypeConversionError):\n gay_berne.mode = mode", "def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)", "def setMode(self,mode):\n self.mode=mode\n if self.mode==0:\n self.setDrawing()\n elif self.mode==1:\n self.setConstruction()\n elif self.mode==2:\n self.setDisplay()\n self.context.text.append(\"mode: \"+self.messages[self.mode])", "def enable(self):", "def modifyComponentsNotPreferableOnServer(self):\n # Nothing to do\n pass", "def test_handling_wrong_context(member, mode, arg, msg):\n with pytest.raises(TypeError) as excinfo:\n member.set_validate_mode(getattr(Validate, mode), arg)\n assert msg in excinfo.exconly()", "async def test_async_set_preset_mode_invalid():\n\n client = Mock()\n device_stub = Mock()\n\n logger = Mock()\n logger.debug = Mock()\n logger.warning = Mock()\n\n wrapper = WinixDeviceWrapper(client, device_stub, logger)\n\n await wrapper.async_set_preset_mode(\"INVALID_PRESET\")\n logger.warning.call_count == 1", "def validate(self, mode): # pragma: no cover\n pass", "def error(self):\n ...", "def test_component_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n # create component_owner option\n self.env.config.set('ticket-field-config','component_owner','test')\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_component_list(), self.new['component'])", "def testProtocolSetBadType(self):\n def setProtocol():\n self.mr.protocol = 12345\n\n self.assertRaises(\n TypeError,\n setProtocol\n )", "def test_set_mode_wrong_args(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_mode(None)", "def set_state( self ):", "async def test_set_operation_bad_attr_and_state(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"\n with pytest.raises(vol.Invalid):\n await common.async_set_operation_mode(opp, None, ENTITY_WATER_HEATER)\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"", "def test_tooManyModeParameters(self):\n self._sendModeChange(\"+s\", \"wrong\")\n self._checkModeChange([])\n errors = self.flushLoggedErrors(irc.IRCBadModes)\n self.assertEqual(len(errors), 1)\n self.assertSubstring(\"Too many parameters\", errors[0].getErrorMessage())", "def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1", "def setMode(self, mode):\n self.mode = mode\n if self.mode == 0:\n self.setDrawingMode()\n elif self.mode == 1:\n self.setConstructionMode()\n elif self.mode == 2:\n self.setDisplayMode()\n self.context.text.append(\"mode: \" + self.messages[self.mode])", "def mode (self, mode) :\r\n self.mode_ = mode", "def set_error(self, index: int) -> None:\n ...", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def xmode(self, parameter_s=''):\n\n def xmode_switch_err(name):\n warn('Error changing %s exception modes.\\n%s' %\n (name,sys.exc_info()[1]))\n\n shell = self.shell\n if parameter_s.strip() == \"--show\":\n shell.InteractiveTB.skip_hidden = False\n return\n if parameter_s.strip() == \"--hide\":\n shell.InteractiveTB.skip_hidden = True\n return\n\n new_mode = parameter_s.strip().capitalize()\n try:\n shell.InteractiveTB.set_mode(mode=new_mode)\n print('Exception reporting mode:',shell.InteractiveTB.mode)\n except:\n xmode_switch_err('user')" ]
[ "0.58727384", "0.5831983", "0.57986915", "0.5704553", "0.56496924", "0.5616517", "0.5566472", "0.55448025", "0.54887784", "0.54351026", "0.54308224", "0.54267853", "0.54206556", "0.5400841", "0.5366232", "0.5362594", "0.5343893", "0.53403306", "0.53249055", "0.5301672", "0.5286788", "0.5281175", "0.5260891", "0.5248365", "0.5246494", "0.5238808", "0.5200003", "0.5199685", "0.5176162", "0.5167902" ]
0.6587823
0
collect docker logs from servers $ command is $ log_collector.py
def main(): global tar_file_descr help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>' hosts = [] if len(sys.argv) == 2: if '-h' == sys.argv[1] or '--help' == sys.argv[1]: print(help_msg) sys.exit(0) elif 'all' == sys.argv[1]: # get logs from all hosts hosts = [] host_objs = CLIENT.host_get_all() for host_obj in host_objs: hosts.append(host_obj.name) else: # get logs from specified hosts hostnames = sys.argv[1].split(',') for host in hostnames: if host not in hosts: hosts.append(host) else: print(help_msg) sys.exit(1) # open tar file for storing logs fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_', suffix='.tgz') os.close(fd) # avoid fd leak with tarfile.open(tar_path, 'w:gz') as tar_file_descr: # clear out old logs if os.path.exists(LOGDIR): shutil.rmtree(LOGDIR) os.mkdir(LOGDIR) # gather logs from selected hosts try: for host in hosts: get_logs_from_host(host) # tar up all the container logs tar_file_descr.add(LOGDIR, arcname='container_logs') finally: # remove uncompressed logs if os.path.exists(LOGDIR): shutil.rmtree(LOGDIR) # gather dump output from kolla-cli dump_kolla_info() print('Log collection complete. Logs are at %s' % tar_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def do_logs(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['stdout'] = args.stdout\n opts['stderr'] = args.stderr\n opts['since'] = args.since\n opts['timestamps'] = args.timestamps\n opts['tail'] = args.tail\n opts = zun_utils.remove_null_parms(**opts)\n logs = cs.containers.logs(**opts)\n print(logs)", "def tail(name):\n\n try:\n container = CLIENT.containers.get(name)\n for line in container.logs(stream=True):\n click.secho(line.strip(), bg='blue', fg='white')\n except docker.errors.NotFound as err:\n print(err)", "def logs(self, container: Container) -> str:", "def stream_container_logs(container: Container) -> None:\n logs = container.logs(stream=True, follow=True)\n for log in logs:\n for line in log.splitlines():\n print(f'[Container {container.id[:5]}] {line.decode()}')", "def on_server_start(self):\n self._container = self._docker_client.containers.run(self.docker_image_name, detach=True, **self.docker_params)\n self.signal_ready()\n\n for log_line in self.get_lines():\n try:\n alert_dict = self.parse_line(log_line)\n if alert_dict:\n self.add_alert_to_queue(alert_dict)\n except Exception:\n self.logger.exception(None)", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def logging(containerids,filename):\n click.echo('*** LOGS CONSOLIDATION IS INITIATED')\n for x in containerids:\n click.echo(\"Got Logs for Container:\"+str(x))\n base = \"http://127.0.0.1:2376\"\n url = \"/containers/%s/logs?stderr=1&stdout=1&tail=100&stream=0\" % (str(x))\n try:\n resp = requests.get( base + url)\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n click.echo(message)\n with click.open_file(filename, 'a+') as f:\n f.write(\"\\nContainerID(%s): \\n\" %x)\n for line in resp:\n f.write(str(line)+\"\\n\")", "def logs(name):\n\n try:\n container = CLIENT.containers.get(name)\n click.secho(str(container.logs()), bg='blue', fg='white')\n except docker.errors.NotFound as err:\n print(err)", "def collect_logs_from_pod(namespace, pod, logs_dir, k8s_cli):\n containers = get_list_of_containers_from_pod(namespace, pod, k8s_cli)\n init_containers = get_list_of_init_containers_from_pod(namespace, pod, k8s_cli)\n containers.extend(init_containers)\n if containers is None:\n logger.warning(\"Namespace '%s' Could not get containers for pod: %s list - \"\n \"skipping pods logs collection\", namespace, pod)\n return\n for container in containers:\n cmd = \"{} logs -c {} -n {} {}\" \\\n .format(k8s_cli, container, namespace, pod)\n with open(os.path.join(logs_dir, \"{}-{}.log\".format(pod, container)),\n \"w+\", encoding='UTF-8') as file_handle:\n _, output = run_shell_command(cmd)\n file_handle.write(output)\n\n # operator and admission containers restart after changing the operator-environment-configmap\n # getting the logs of the containers before the restart can help us with debugging potential bugs\n get_logs_before_restart_cmd = \"{} logs -c {} -n {} {} -p\" \\\n .format(k8s_cli, container, namespace, pod)\n err_code, output = run_shell_command(get_logs_before_restart_cmd)\n container_log_before_restart_file = os.path.join(logs_dir,\n '{}-{}-instance-before-restart.log'.format(pod, container))\n if err_code == 0: # Previous container instance found; did restart.\n with open(container_log_before_restart_file, \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(output)\n\n logger.info(\"Namespace '%s': + %s-%s\", namespace, pod, container)", "def _dump_docker_log(container_name: str, dir: Path) -> None:\n destination = dir / f\"{container_name}.log\"\n with open(destination, \"wb\") as out_stream:\n popen = subprocess.Popen(\n [\n \"docker\",\n \"logs\",\n \"--timestamps\",\n container_name,\n ],\n stdout=out_stream,\n )\n popen.wait()", "def main():\n lines = read_syslog()\n if len(sys.argv) > 1:\n lines = filter_logs(sys.argv[1], lines)\n for line in lines:\n print(line)", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def getLogs():", "def getLogs():", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def mix():\n\n with open(\"output.log\", 'w') as outfile:\n log_file = [container.logs(timestamps=True).split(\",\") for container in\n CLIENT.containers.list()]\n for c_log in log_file:\n outfile.write(\" \".join(map(str, c_log)) + '\\n')\n click.secho('Log output of each container has been written to output.log.',\n bg='blue', fg='white')", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def logs(self, data):\n required = {'token', 'container_id'}\n api.validate(data, required)\n token = data['token']\n container_id = data['container_id']\n self.credentials_module.authorize_container(token,\n container_id)\n results = self.docker_module.logs_container(container_id)\n return results", "def getDockerOutput(imgName, command, client):\n cont = None\n try:\n cont = client.containers.create(image=imgName, command=command)\n cont.start()\n ret_code = cont.wait()\n if isinstance(ret_code, dict):\n ret_code = ret_code['StatusCode']\n logs = cont.logs(stdout=True, stderr=False, stream=False)\n cont.remove()\n except Exception as err:\n if cont:\n try:\n cont.remove()\n except Exception:\n pass\n logger.exception(\n 'Attempt to docker run %s %s failed', imgName, command)\n raise DockerImageError(\n 'Attempt to docker run %s %s failed ' % (\n imgName, command) + str(err), imgName)\n if ret_code != 0:\n raise DockerImageError(\n 'Attempt to docker run %s %s failed' % (imgName, command), imgName)\n return logs", "def collect_tcpdump(self, log_dir, count=10, timeout=30):\n log_type = \"tcpdump\"\n log_name = \"tcpdump.txt\"\n cmd = \"tcpdump -c {} > /tmp/{}\".format(count, log_name)\n\n self._collect_log(log_type, log_dir, log_name, cmd,\n timeout=timeout, background=False)", "def container_logs(ctx, token, container_id):\n try:\n out = ctx.obj.container_logs(token, container_id)\n print_message(out)\n except BaseException:\n m = (\"Error: No container related to %s\" %\n container_id)\n print_error(m)", "def collect_k8s_logs(cfg: ElasticBlastConfig):\n dry_run = cfg.cluster.dry_run\n k8s_ctx = cfg.appstate.k8s_ctx\n if not k8s_ctx:\n raise RuntimeError(f'kubernetes context is missing for {cfg.cluster.name}')\n # TODO use named constants for labels and containers\n # also modify corresponding YAML templates and their substitution\n get_logs(k8s_ctx, 'app=setup', [K8S_JOB_GET_BLASTDB, K8S_JOB_IMPORT_QUERY_BATCHES, K8S_JOB_SUBMIT_JOBS], dry_run)\n get_logs(k8s_ctx, 'app=blast', [K8S_JOB_BLAST, K8S_JOB_RESULTS_EXPORT], dry_run)", "def cli_copy_pcc_logs(host_ip:str, linux_user:str, linux_password:str)->dict:\n try:\n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp pccserver:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/pccserver_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/pccserver_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/pccserver_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/pccserver_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/pccserver_logs/error.log\")\n cmd = \"sudo rm -rf /home/ceph/; sudo docker cp pccserver:/home/jobs/ceph /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/pccserver_logs/ceph\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/ceph/cluster/\",\"output/pccserver_logs/ceph/\")\n \n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp platina-executor:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/platina_executor_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/platina_executor_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/platina_executor_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/platina_executor_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/platina_executor_logs/error.log\")\n cmd = \"sudo rm -rf /home/kubernetes/; sudo docker cp platina-executor:/home/jobs/kubernetes /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/platina_executor_logs/kubernetes\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/kubernetes/cluster/\",\"output/platina_executor_logs/kubernetes/\")\n \n cmd = \"sudo rm -rf /output/logs\"\n os.system(cmd) \n \n return \"OK\"\n except Exception as e:\n return {\"Error\": str(e)}", "def cmd_logs(args):\n\n remote.show_log(_get_current_project_name(), num=args.num, tail=args.tail)", "def get_docker_logs(container_name):\n p = subprocess.run(\n [\"docker\", \"logs\", container_name],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n return p.stdout.decode()", "def CollectLogs(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[str, None]\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"collectLogs\", payload=payload, response_object=None)", "def docker_worker():", "def run(self):\n \n from subprocess import PIPE, Popen\n from shlex import split\n tag = 0\n old_r = ''\n location = '172.20.127.233'\n while 1:\n cmd = 'tcpdump -v -i eth1' \n p = Popen(split(cmd),stdout=PIPE) \n syslog_dict = {}\n for row in p.stdout:\n r = row\n if ('syslog' in r):\n tag = 1\n segment = old_r\n segment = segment + r\n elif tag == 1:\n tag = 2\n segment = segment + r\n elif tag == 2:\n tag = 0\n segment = segment + r\n tm = datetime.now().isoformat()\n name = '172.20.127.233'+':'+str(tm)\n type = 'syslog'\n syslog_dict[name]={'object-name':name,'object-type':type,'object-location':location,'location-type':'network','message-content':segment,'timestamp':datetime.now()}\n self.updates_and_deletes(syslog_dict)\n else:\n old_r =r\n #except KeyboardInterrupt:\n # p.terminate()\n\t\t\t######################\n # perform collection #\n # update and delete #\n ##################### \n # call super's function to perform updating and deleting\n #self.updates_and_deletes(parking_dict)\n #######################\n # sleep for some time #\n #######################\n #time.sleep(REFRESH_RATE)\n #time.sleep(sleep_time)", "def show_logs_for_running_containers(services, tail):\n if not check_for_docker_compose_file():\n log.info('No running containers found')\n sys.exit(1)\n\n try:\n if tail:\n run_docker_compose_command(['logs', '-f'] + services)\n else:\n run_docker_compose_command(['logs'] + services)\n except KeyboardInterrupt:\n sys.exit(0)" ]
[ "0.73023754", "0.66543895", "0.6469975", "0.6311792", "0.6195516", "0.619132", "0.6190507", "0.618653", "0.6127707", "0.6075922", "0.60741675", "0.60354525", "0.60254896", "0.59426993", "0.59426993", "0.59009355", "0.58668494", "0.58238095", "0.58153385", "0.57909554", "0.5778273", "0.57710785", "0.5757475", "0.57255214", "0.57233375", "0.5722884", "0.572274", "0.5717944", "0.57012284", "0.5699877" ]
0.6732043
1
Read in labels from digitStruct.mat file to create a dict of image file name and corresponding labels
def read_labels(digitstruct_file): labels = dict() for dsObj in tdqm(yieldNextDigitStruct(digitstruct_file), ncols=50): image_labels = [] for bbox in dsObj.bboxList: image_labels.append(bbox.label) labels[dsObj.name] = image_labels return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def get_images_and_labels(tampered_path, authentic_path):\n tampered_dir = tampered_path\n authentic_dir = authentic_path\n images = {}\n for im in glob.glob(authentic_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 0\n for im in glob.glob(tampered_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 1\n return images", "def load_labels(path, kmer=True, rg=True, clip=True, rna=True, go=True):\n\n labels = dict()\n if go: labels[\"X_GO\"] = gzip.open(os.path.join(path,\n \"matrix_GeneOntology.tab.gz\")).readline().split(\"\\t\")\n if kmer: labels[\"X_KMER\"] = gzip.open(os.path.join(path,\n \"matrix_RNAkmers.tab.gz\")).readline().split(\"\\t\")\n if rg: labels[\"X_RG\"] = gzip.open(os.path.join(path,\n \"matrix_RegionType.tab.gz\")).readline().split(\"\\t\")\n if clip: labels[\"X_CLIP\"] = gzip.open(os.path.join(path,\n \"matrix_Cobinding.tab.gz\")).readline().split(\"\\t\")\n if rna: labels[\"X_RNA\"] = gzip.open(os.path.join(path,\n \"matrix_RNAfold.tab.gz\")).readline().split(\"\\t\")\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def get_pet_labels(images_dir):\r\n \r\n # Creates a list of files in directory from pet images directory\r\n in_files = listdir(images_dir)\r\n \r\n # Process each of the files such that the created dictionary would have\r\n # key = filename and the value = picture label\r\n \r\n # Create an empty dictionary to hold pet labels\r\n petlabels_dic = dict()\r\n \r\n \r\n \r\n for idx in range(0, len(in_files), 1): \r\n if in_files[idx][0] != \".\":\r\n pet_image_name = in_files[idx].split(\"_\")\r\n # Check if the first character is uppercase letter. If it is, then lowercase that first character\r\n if pet_image_name[0].isupper() : \r\n pet_image_name = pet_image_name.lower()\r\n # Create a temporary label variable to hold pet label name\r\n pet_label = \" \"\r\n \r\n # Process each of the character strings(words) split by '_' in \r\n # the list pet_image_name\r\n for word in pet_image_name: \r\n if word.isalpha():\r\n pet_label += word + \" \"\r\n pet_label = pet_label.strip()\r\n if in_files[idx] not in petlabels_dic:\r\n petlabels_dic[in_files[idx]] = [pet_label]\r\n else: \r\n print(\" Warning: Duplicate files exist in dictionary\", in_files[idx])\r\n \r\n \r\n # Return dictionary of pet lables\r\n return(petlabels_dic)", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_image_labels_mapping(images_fp, labels_fp):\n name_map = {}\n\n for f in images_fp():\n image_name = f[0]['file']\n vars = {k.upper():v for k,v in f[0].items() if k!='file' }\n label_name = labels_fp.get_matching(**vars)[0]['file']\n name_map[image_name] = label_name\n return name_map", "def retrieve_labels(file, label_indices):\n\n\t# Initialize numpy matrix to store the images\n\tlabels = np.zeros((len(label_indices), 10))\n\n\twith open(file, \"rb\") as f:\n\t\t# Intialize counters\n\t\ti = 0\n\t\tlabel_number = 0\n\n\t\t# Read first byte\n\t\tbyte = f.read(1)\n\n\t\t# Find each image in the data file\n\t\tfor label_index in label_indices:\n\t\t\t# Read in bytes until you arrive at the label\n\t\t\twhile byte and (i < (label_index + 8)):\n\t\t\t\tbyte = f.read(1)\n\t\t\t\ti += 1\n\n\t\t\t# Store label value in numpy array\n\t\t\tvalue = int.from_bytes(byte, \"big\")\n\t\t\tlabels[label_number] = np.zeros(10)\n\t\t\tlabels[label_number, value] = 1\n\n\t\t\t# Increment to next label\n\t\t\tlabel_number += 1\n\n\treturn labels", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def parse_labelfile(path):\n with open(path, \"r\") as FILE:\n lines = FILE.readlines()\n\n\n labels = {x.split(\":\")[0]: x.split(\":\")[1] for x in lines[1:]}\n\n for key in labels:\n labels[key] = np.array(labels[key].split(\",\")).astype(\"uint8\")\n\n return labels", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def create_labelmapDict_patch(list_all_images, path_dataset):\n list_all_classes = []\n for idx, name_image_ in enumerate(list_all_images):\n _, tail = os.path.split(name_image_)\n temp_obj = []\n name_file_xml_all = os.path.join(path_dataset, 'LABELS', tail[0:-3] + 'xml')\n if os.path.exists(name_file_xml_all):\n with tf.gfile.GFile(name_file_xml_all, 'rb') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n data = tfrecord_util.recursive_parse_xml_to_dict(xml)['annotation']\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ != 'INCOMPLETAS':\n list_all_classes.append(name_in_obj_)\n temp_obj.append(obj)\n # list_all_classes = unique_list(list_all_classes)\n list_all_classes = list(set(list_all_classes))\n list_all_classes.sort()\n list_all_classes.insert(0, 'background')\n labelmap_ = {el: k for k, el in enumerate(list_all_classes)}\n return labelmap_", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def extract_labels(filename, num_images):\n gt_imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n gt_patches = [img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, 0, False) for i in range(num_images)]\n data = numpy.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = numpy.asarray([value_to_class(numpy.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(numpy.float32)", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_label(self, file, variable_name=\"group\"):\n data = scipy.io.loadmat(file)\n self.logger.info(\"loading mat file %s\", file)\n label = data[variable_name].todense().astype(np.int)\n label = np.array(label)\n print(label.shape, type(label), label.min(), label.max())\n return label", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def create_readable_names_for_imagenet_labels():\n\n base_url = 'http://cnbj1-fds.api.xiaomi.net/ml-datasets/imagenet/' # noqa\n synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)\n synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)\n\n filename, _ = urllib.urlretrieve(synset_url)\n synset_list = [s.strip() for s in open(filename).readlines()]\n num_synsets_in_ilsvrc = len(synset_list)\n assert num_synsets_in_ilsvrc == 1000\n\n filename, _ = urllib.urlretrieve(synset_to_human_url)\n synset_to_human_list = open(filename).readlines()\n num_synsets_in_all_imagenet = len(synset_to_human_list)\n assert num_synsets_in_all_imagenet == 21842\n\n synset_to_human = {}\n for s in synset_to_human_list:\n parts = s.strip().split('\\t')\n assert len(parts) == 2\n synset = parts[0]\n human = parts[1]\n synset_to_human[synset] = human\n\n label_index = 1\n labels_to_names = {0: 'background'}\n for synset in synset_list:\n name = synset_to_human[synset]\n labels_to_names[label_index] = name\n label_index += 1\n\n return labels_to_names", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def ExtractLabel(ImgName):\n # Each img has name notation \"*****a0X*\" where X is PlasticType\n PlasticType = ImgName[7] \n return {\n '1': 0, # PET\n '2': 1, # HDPE\n '4': 2, # LDPE\n '5': 3, # PP\n '6': 4, # PS\n '7': 5, # Other\n }[PlasticType]", "def read_image_with_label(dir, file):\n assert type(file) == str, \"File name is not string.\"\n f = os.path.join(dir, file)\n info = file.split(\"_\")\n try:\n label = [int(info[x]) for x in range(1, 3)]\n except:\n print(\"The format of file name is not correct.\")\n else:\n return Image.open(f), label", "def get_pet_labels(image_dir):\n # Create dictionary\n petlabels_dic = {}\n\n # Retrieve the filenames from folder pet_images/\n # Try to catch exceptions (folder does not exists, etc..)\n try:\n filename_list = listdir(image_dir)\n except:\n print('** Error: unable to list files in \"{}\" folder.'.format(image_dir))\n exit()\n else:\n for idx in range(0,len(filename_list)):\n #if filename_list[idx] not in petlabels_dic: # required? probably not\n # Remove extension from filename\n filename = filename_list[idx].split('.')[0]\n # Create a list of words from filename, removing digits\n filename_labels = list(filter(lambda label: label.isalpha(), filename.split('_')))\n # Create key->value item in dictonary\n petlabels_dic[filename_list[idx]] = [\" \".join(filename_labels).lower()]\n\n # Return dictionary\n return petlabels_dic" ]
[ "0.7442581", "0.67145514", "0.6680717", "0.66700083", "0.6651974", "0.6599294", "0.65706545", "0.6568262", "0.65624034", "0.65466106", "0.6527709", "0.65229243", "0.65100825", "0.6500305", "0.649048", "0.6466592", "0.6466018", "0.6442053", "0.6429563", "0.6409631", "0.63989353", "0.6398331", "0.6392108", "0.63798136", "0.63793224", "0.63647455", "0.6362771", "0.63621897", "0.6351548", "0.6340121" ]
0.8415674
0
Construct a heap from a list of elements with priorities. Each element of the list must be in the form (Item, Priority).
def construct_heap(self, elems): for e in elems: self.n += 1 self.A.append(e) self.pos[e[0]] = self.n for i in range(self.n // 2, 0, -1): self.combine(i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heap_sort(list):\n pass", "def build_heap(self, items):\n for key in items:\n self.insert(key)", "def __init__(self, list = []):\n # initialize empty heap\n self.heap = []\n\n # initialize heap with provided list\n for element in list:\n self.add(element)", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def build_heap(data):\n n = len(data) # elements 0 .. n-1\n swaps = []\n def swap(i, j):\n t = data[i]\n data[i] = data[j]\n data[j] = t\n swaps.append((i,j))\n def sift_down(i):\n # 3-way comparison to restore heap property to i\n new_i = i\n l = left(i); r = right(i)\n if l < n and data[l] < data[new_i]: new_i = l\n if r < n and data[r] < data[new_i]: new_i = r\n if not i == new_i:\n # i did not satsify heap property, swap and carry on down\n swap(i, new_i)\n sift_down(new_i)\n # starting from end, parent of n-1 is first that may break heap condition\n for i in range(parent(n - 1), -1, -1):\n sift_down(i)\n return swaps", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def heapify(x):\n pass", "def build_heap(arr):\n for i in range((len(arr)//2), -1, -1):\n heapify(arr,index=i, size=len(arr)-1)", "def build_heap(arr):\n for i in range(len(arr)-1, -1, -1):\n down_heapify(arr, len(arr), i)", "def buildHeap(A):\n n = len(A)\n for i in range(n/2-1, -1, -1):\n heapify(A, i, n)", "def construct_max_heap(self, lst):\n self.heap_list = lst\n #start compare node\n node = (len(self.heap_list)-2)/2\n while node >= 0:\n self.sift_down(node, len(self.heap_list)-1)\n node -= 1", "def build_heap(data: List[int]) -> List[Tuple[int, int]]:\n swaps: List[Tuple[int, int]] = []\n\n n = len(data)\n start = ceil(n/2) - 1\n for i in range(start, -1, -1):\n swaps = sink_down(i, data, swaps)\n\n return swaps", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def heap_sort(array):\n p = PriorityHeap(min == False)\n n = len(array)\n for i in range(n):\n p.push(array[i], array[i])\n for j in range(n - 1, -1, -1):\n item = p.pop().value\n array[j] = item\n return array", "def heap_sort(alist: list, key=None) -> list:\n newList = List()\n hp = BinaryHeap(func=key)\n\n for item in alist:\n hp.heappush(item)\n\n for _ in range(len(alist)):\n newList.append(hp.heappop())\n\n return newList", "def _create_priorities(self, pri):\n heaps = self.priorities\n heaps[pri] = MinBinaryHeap()", "def __init__(self, items=[]):\n self.set = dict((item, True) for item in items)\n self.heap = self.set.keys()\n heapq.heapify(self.heap)", "def testArbitraryItems(self):\n hd = HeapDict(size=2)\n item1 = self.PriorityItem(1.0, [None, 'Arbitrary item'])\n item2 = self.PriorityItem(2.0, {'Another item'})\n item3 = self.PriorityItem(3.0, (1, 'Third item'))\n item4 = self.PriorityItem(4.0, 0)\n hd.push(1, item1)\n hd.push(1, item3)\n hd.push(1, item2)\n hd.push(1, item4)\n self.assertEqual(hd.get_result(), {1: [item4, item3]})", "def sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def test_priority_que_success_priority_multiple(priority_queue):\n priority_queue.insert(20)\n priority_queue.insert(5)\n priority_queue.insert(100, 5)\n priority_queue.insert(10, 2)\n priority_queue.insert(50, 1)\n assert priority_queue._heap[0].value == 50", "def build_heap(self, arr):\n i = len(arr) // 2\n self.size = len(arr)\n self.heap_list = [-1] + arr[:]\n while i > 0:\n self.percolate_down(i)\n i = i - 1", "def build_max_heap(self, list_to_be_heap):\n self.heaplist = self.heaplist + list_to_be_heap\n self.currentsize = len(list_to_be_heap)\n\n # as it follow properties of complete binary tree, non leaf nodes will end to total size / 2\n index = self.currentsize // 2\n\n # > 0 : to ignore first element of the array which is 0..\n while index > 0:\n self.shift_item_down(index)\n index -= 1", "def heap_sort(l):\r\n h = SMinHeap()\r\n for el in l:\r\n h.push(el)\r\n sorted_list = [h.pop() for x in range(len(h.array))]\r\n return sorted_list", "def heapsort(self) -> Generator[T, None, None]:\n h = [e for e in self.priority_queue]\n while h:\n entry = heapq.heappop(h)[-1][0]\n if entry is not None:\n yield cast(T, entry)", "def make_heap(self, frequency):\n\n\n\t\t\tfor key in frequency:\n\t\t\t\tnode = self.HeapNode(key, frequency[key])#instaciamos un nodo con el valor y frecuencia\n\t\t\t\theapq.heappush(self.heap, node)#agregamos el nodo al priority queue", "def __init__(self, values=[]):\n self.priority_queue = {}\n if isinstance(values, list):\n try:\n for value, priority in values:\n self.insert(value, priority)\n except ValueError:\n raise TypeError(\"You need to tuplize your priorities\")\n else:\n raise TypeError(\"Put your items in a list\")", "def build_heap(data):\n # The following naive implementation just sorts the given sequence\n # using selection sort algorithm and saves the resulting sequence\n # of swaps. This turns the given array into a heap, but in the worst\n # case gives a quadratic number of swaps.\n #\n data_ = [0] * (len(data) + 1)\n data_[1:] = data\n n = len(data)\n swaps = []\n for i in reversed(range(n // 2 + 1)):\n if i == 0:\n break\n sift_down(data_, i, swaps)\n\n return swaps", "def build_heap(self, A: list):\n self.size = len(A)\n med = (self.size // 2) - 1 #Mid point of array\n for i in range(0, med + 1): #Reverse iteration\n self.heapify(A, med - i) #Reverse iteration" ]
[ "0.6715845", "0.6644555", "0.66232336", "0.6562983", "0.6562983", "0.6440951", "0.64229244", "0.64208883", "0.64108974", "0.63691944", "0.6351629", "0.63361603", "0.632387", "0.6306449", "0.6306449", "0.62739", "0.62633675", "0.62222654", "0.6173713", "0.6163355", "0.61413926", "0.6126718", "0.61000144", "0.6086264", "0.6077235", "0.607588", "0.60739946", "0.60594726", "0.60227156", "0.6021205" ]
0.6710219
1
Inserts the element elem with priority prio.
def insert(self, elem, prio): self.n += 1 self.A.append( (e,w) ) self.pos[e] = self.n i = self.n p = i // 2 self.insert_loop(i, p)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_priority(self, elem, prio):\n pos = self.pos[elem]\n currPrio = self.A[pos][1]\n self.A[pos] = (elem, prio)\n if self.cmpFn(prio, currPrio):\n self.insert_loop(pos, pos // 2) # Up heapify\n else:\n self.combine(pos) # Down heapify", "def enqueue(elem: Any, priority: int = 0) -> None:\n\tglobal queue\n\tqueue.append((priority, elem))\n\treturn None", "def insert(self, id, priority):\n self.n += 1\n i = self.n\n while i > 1:\n pIdx = int(i/2)\n p = self.elements[pIdx]\n\n if priority > p[PRIORITY]:\n break\n self.elements[i] = list(p)\n self.positions[p[ID]] = 1\n i = pIdx\n\n self.elements[i][ID] = id\n self.elements[i][PRIORITY] = priority\n self.positions[id] = i", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def insertElement(self, element , i ):\n\n self.heap[i] = element\n # Parent of ith position\n parenti = i // 2\n\n # Inserting element into the heap\n try:\n # Bubbling up\n if parenti != 0 and self.heap[i].dijkstraCriterion < self.heap[parenti].dijkstraCriterion:\n self.heap[i], self.heap[parenti] = self.heap[parenti], self.heap[i]\n self.insertElement(element, parenti)\n # Incrementing self.i position\n else:\n self.i += 1\n return\n\n except:\n # Bubbling up\n self.heap[i] = 'NaN'\n self.insertElement(element, parenti)\n return", "def append(self,data,priority):\r\n\t\tbisect.insort(self.queue,(priority,data))", "def insert(self, pri):\n heaps = self.priorities\n if pri > 10 or pri < 1:\n raise ValueError(\n 'Priority must be between 1 (high) - 10 (low)'\n )\n if pri not in heaps.keys():\n self._create_priorities(pri)\n\n priority = heaps.get(pri)\n priority.push(self._order)\n self._order += 1", "def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...", "def push(self, elem):\n pass", "def insert(self, value, priority=2):\n if not isinstance(priority, int):\n raise TypeError(\"Priority must be an integer\")\n if priority in self.priority_queue:\n self.priority_queue[priority].append(value)\n else:\n self.priority_queue[priority] = [value]\n print(self.priority_queue)", "def insert(self, node, priority=0):\n\n if node in self.entry_finder:\n self.delete(node)\n entry = [priority, node]\n self.entry_finder[node] = entry\n # logger_cagada.debug(\"el puto entry %s\" % entry)\n # logger_cagada.debug(\"l nodo q c agrega %s es %s\" % (type(node), node))\n self.heappush(self.heap, entry)\n # logger_cagada.debug(\"el finde aora es %s\" % self.entry_finder)\n # logger_cagada.debug(\"el heap aora es %s\" % self.heap)\n self.valida_caca()", "def insert(self, element):\n if self.size >= self.maxsize:\n return\n self.size += 1\n self.heap[self.size] = element\n\n current = self.size\n\n while self.heap[current] < self.heap[self.parent(current)]:\n self.swap(current, self.parent(current))\n current = self.parent(current)", "def insert(self, element):\n if self.size >= self.maxsize : \n return\n self.size+= 1\n self.Heap[self.size] = element \n \n current = self.size \n \n while self.Heap[current] < self.Heap[self.parent(current)]: \n self.swap(current, self.parent(current)) \n current = self.parent(current)", "def push(self, element, value):\n insert_pos = 0\n for index, el in enumerate(self.tops):\n if not self.find_min and el[1] >= value:\n insert_pos = index + 1\n elif self.find_min and el[1] <= value:\n insert_pos = index + 1\n self.tops.insert(insert_pos, [element, value])\n self.tops = self.tops[: self.n]", "def insert(self, p, elem):\n node = self._validate(p)\n new_node = self._Node(elem, idx=self._curr_idx, parent=node._parent)\n self._curr_idx += 1\n node._parent = new_node\n new_node._children.append(node)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(new_node)", "def insertChildBefore(new_elem, elem):\n parent = DOM.getParent(elem)\n id = DOM.getChildIndex(parent, elem)\n DOM.insertChild(parent, new_elem, id)", "def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))", "def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node", "def Insert(self, val, extra=None):\n if self._size >= 0:\n if val > self.best[0]:\n idx = bisect.bisect(self.best, val)\n # insert the new element\n if idx == self._size:\n self.best.append(val)\n self.extras.append(extra)\n else:\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)\n # and pop off the head\n self.best.pop(0)\n self.extras.pop(0)\n else:\n idx = bisect.bisect(self.best, val)\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)", "def insert(self, element: Node):\r\n if self._top == None:\r\n self._top = Node(None, element)\r\n return None\r\n new_element = self._add_element(element)\r\n self._correct_tree(new_element)", "def add(self, elem):", "def add(self, elem):", "def _heapify_after_add(self,ele):\r\n parent = self._parent(ele)\r\n if ele > 0 and self._data[ele] < self._data[parent]:\r\n self.swap(ele, parent)\r\n self._heapify_after_add(parent)", "def add(self, item, priority):\n heappush(self.contents, (priority, item))", "def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)", "def addNode(self, element):\n i = 0\n while i < len(self.nodes) and self.nodes[i].weight < element.weight:\n i += 1\n self.nodes.insert(i, element)", "def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)", "def test_insert_increases_size(sample_priorityq):\n assert len(sample_priorityq.heap_list) == 0\n sample_priorityq.insert([5, 1])\n assert len(sample_priorityq.heap_list) == 1\n sample_priorityq.insert([6, 2])\n assert len(sample_priorityq.heap_list) == 2", "def add_element(self, elem):\n self.add_element_with_id(elem, self.next_id)", "def insertElement(T,i):\r\n if not isFull(T):\r\n insertInternal(T,i)\r\n else:\r\n m, l, r = split(T)\r\n T.data = [m]\r\n T.child = [l,r]\r\n T.isLeaf = False\r\n k = findChildA(T,i) \r\n insertInternal(T.child[k],i)" ]
[ "0.74379945", "0.73325443", "0.71752936", "0.69993013", "0.65939367", "0.65610933", "0.65355706", "0.6510952", "0.6450642", "0.6369028", "0.63523465", "0.62270975", "0.622298", "0.6217232", "0.6212362", "0.6154356", "0.6117256", "0.61147964", "0.6046682", "0.5996028", "0.59924835", "0.59924835", "0.59768164", "0.59750265", "0.5967423", "0.5926508", "0.5858709", "0.5819772", "0.5814046", "0.57991993" ]
0.83393705
0
Changes the priority of the element elem to prio.
def change_priority(self, elem, prio): pos = self.pos[elem] currPrio = self.A[pos][1] self.A[pos] = (elem, prio) if self.cmpFn(prio, currPrio): self.insert_loop(pos, pos // 2) # Up heapify else: self.combine(pos) # Down heapify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPriority(self, p):\n self.priority = p", "def _update_priority(self, task, prio, worker):\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._update_priority(t, prio, worker)", "def increase_priority(self):\n if self._priority > 0:\n self._priority -= 1", "def _set_priority(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"priority must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def set_priority(self, priority):\n self._priority = priority", "def priority(self, priority):\n self._priority = priority", "def setPriority(self, *args):\n return _libsbml.Event_setPriority(self, *args)", "def get_priority(self, elem):\n pos = self.pos[elem]\n return self.A[pos][1]", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])", "def set_priority(self, priority):\n self.options[\"priority\"] = priority", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)", "def set_priority(self, priority):\n self.options['priority'] = priority", "def _set_priority(self, args):\n if 'priority' in args:\n try:\n self._priority = float(args['priority'])\n except TypeError:\n raise InvalidPriority('Invalid priority: %s' % args['priority'])\n except ValueError:\n raise InvalidPriority()\n else:\n self._priority = None", "def enqueue(elem: Any, priority: int = 0) -> None:\n\tglobal queue\n\tqueue.append((priority, elem))\n\treturn None", "def change_priority(self, priority, key):\n index = self.__position[key]\n current = self.__heap[index][0]\n self.__heap[index][0] = priority\n\n if priority > current:\n self.__bubble_down(index)\n else:\n self.__bubble_up(index)", "def SetPriorityValue(self, *args, **kwargs):\n pass", "def set_thread_priority(self, priority: \"int\") -> \"int\":\n return _beamforming_swig.doaesprit_sptr_set_thread_priority(self, priority)", "def setpriority(pid=None, priority=1):\n\n #import win32api,win32process,win32con\n from ctypes import windll\n\n priorityclasses = [0x40, # IDLE_PRIORITY_CLASS,\n 0x4000, # BELOW_NORMAL_PRIORITY_CLASS,\n 0x20, # NORMAL_PRIORITY_CLASS,\n 0x8000, # ABOVE_NORMAL_PRIORITY_CLASS,\n 0x80, # HIGH_PRIORITY_CLASS,\n 0x100, # REALTIME_PRIORITY_CLASS\n ]\n if pid is None:\n pid = windll.kernel32.GetCurrentProcessId()\n handle = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, True, pid)\n windll.kernel32.SetPriorityClass(handle, priorityclasses[priority])", "def decrease_priority(self):\n self._priority += 1", "def set_priority(priority=2, pid=None):\n print \"TODO: add os independent support\"\n priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n if pid == None:\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityclasses[priority])", "def set_priority(self, job_id, priority):\n job = Job.get_job_by_id(job_id)\n self.access_handler.check_set_priority(job)\n self.master.set_priority(job, priority)", "def _priority_changed(self, priority):\n if self.next is not None:\n self.next.priority = priority", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_fabric_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"fabric_priority must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"fabric-priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__fabric_priority = t\n if hasattr(self, '_set'):\n self._set()" ]
[ "0.67719734", "0.6752105", "0.6318923", "0.6283811", "0.6121634", "0.61172235", "0.6071931", "0.6042161", "0.5969836", "0.5969836", "0.5969836", "0.5935215", "0.5908808", "0.59068906", "0.5899444", "0.5884699", "0.5880411", "0.58484524", "0.58209383", "0.5818788", "0.5729039", "0.5682143", "0.5634272", "0.5613425", "0.5603264", "0.5583622", "0.5583622", "0.5583622", "0.55167353", "0.55167353" ]
0.81994
0
Gets the priority of an element.
def get_priority(self, elem): pos = self.pos[elem] return self.A[pos][1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getpriority(self, name):\n\t\tif name not in self:\n\t\t\treturn None\n\t\treturn self.attributes[name].priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def get_priority(self):\n return self._priority", "def get_priority(self):\n return self._priority", "def priority(node):\n return node.priority", "def get_priority(self, item):\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")", "def priority(self):\n return self._pri", "def get_priority(self):\n return self.options[\"priority\"]", "def priority(self) -> int:\n return pulumi.get(self, \"priority\")", "def getPriority(self):\n return self.priority", "def get_priority(self):\n return self.options['priority']", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> str:\n return pulumi.get(self, \"priority\")", "def getPriority(self, *args):\n return _libsbml.Event_getPriority(self, *args)", "def getPriority(self):", "def get_priority(self):\n return str(self.priority)", "def get_priority(self):\n priorities = dict(PRIORITY_CHOICES)\n return priorities.get(self.priority, \"N/A\")", "def get_priority(self):\n priority_value = (self.__priority if Priority.IMPORTANCE_RANKING == PriorityRanking.DESCENDING else Priority.MAX_PRIORITY-self.__priority)\n return 2 * priority_value", "def find_priority(x):\n pat = r\"priority\\s*(\\d*)\"\n result = re.search(pat, str(x), flags=re.IGNORECASE)\n if result:\n return int(result.group(1))" ]
[ "0.7882315", "0.7649342", "0.7649342", "0.7649342", "0.7649342", "0.7647876", "0.7647876", "0.7504323", "0.74972016", "0.7453977", "0.74093306", "0.74017596", "0.73951834", "0.73866266", "0.7357312", "0.7357312", "0.7357312", "0.72419477", "0.72419477", "0.72419477", "0.72419477", "0.7177661", "0.7177661", "0.7129124", "0.70402867", "0.7033402", "0.6959805", "0.6786755", "0.6735806", "0.6654902" ]
0.8585831
0
Transcodes a file src to a file dest.
def transcode(self, src: Path, dest: Path) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def case_convert_file_to_file(source_path: str, dest_path: str, style: CaseStyleEnum) -> None:\n with open(source_path, 'r') as f:\n contents = f.read()\n new_contents = case_convert_stream(contents, style)\n with open(dest_path, 'w') as f:\n f.write(new_contents)", "def _copy_file ( self, source, dest ):\n return", "def unify(src, dst):\n\n # NOTE: at this point it is assumed files are unifiable\n\n # get a temp file name\n dir = os.path.split(src)[0]\n tmp_handle, tmp_path = tempfile.mkstemp(dir=dir)\n os.close(tmp_handle)\n\n # rename the destination, in case we need to back out\n os.rename(dst, tmp_path)\n\n # link source to destination\n try:\n os.link(src, dst)\n except:\n # back out\n print 'Could not link %s -> %s, backing out' % (src, dst)\n try:\n if os.path.exists(dst):\n os.unlink(dst)\n os.rename(tmp_path, dst)\n except:\n print 'Could not back out!!! the destination file is still there as', tmp_file\n raise exceptions.OSError\n\n # done, remove the temp file\n os.unlink(tmp_path)", "def compress(self, sourcefile, destinationfile):\n with open(sourcefile, 'rb') as src_file, open(destinationfile,\n 'wb') as dest_file: # Öffne die Quell- und Zieldatei\n dest_file.write(bytes(\"rl3\", 'utf-8')) # Schreibe rl3 in die neue Datei zur Algorythmuserkennung\n extension_orig = bytes(os.path.splitext(sourcefile)[1][1:], 'utf-8') # Splitte die Dateiendung\n dest_file.write(len(extension_orig).to_bytes(1, 'big')) # Schreibe die Länge der Dateiendung\n dest_file.write(extension_orig) # Schreibe die Dateiendung\n counter = 1 # Setze den Wiederhohlungszähler auf 1\n last_byte = None # Erstelle die leere Variable mit dem letzten Byte\n chunk = src_file.read(self.chunk_size) # Liest Bytes aus\n while chunk: # Solange Bytes existieren\n for byte in chunk: # Für jedes Bytes\n if last_byte is not None and last_byte == byte and counter < self.MAXBYTES: # Wenn das letzte Byte gleich dem neuen Byts ist und die Anzahl nicht überschritten worden ist\n counter += 1 # Erhöhe den Zähler\n else: # Sonst\n if last_byte is not None: # Wenn das letzte Byte existiert\n if counter > (self.MAXBYTES - 255): # Wenn es sich lohnt zu komprimieren\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe das Markierungszeichen\n dest_file.write((counter - (self.MAXBYTES - 255)).to_bytes(1,\n 'big')) # Schreibe die Anzahl der Wiederhohlungen des Zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n else: # Sonst\n for i in range(counter): # Für die Anzahl der zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n if last_byte == ord(\n self.MARKER): # Wenn das Zeichen gleich dem Markierungzeichen ist\n dest_file.write(b'\\x00') # Schreibe 0 dahinter\n counter = 1 # Setze den Zähler auf 1 zurück\n last_byte = byte # Merke das aktuelle Byte für den Vergleich\n chunk = src_file.read(self.chunk_size) # Lese die neuen Bytes aus\n if counter > (self.MAXBYTES - 255): # Wenn es sich lohnt zu komprimieren\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe das Markierungszeichen\n dest_file.write((counter - (self.MAXBYTES - 255)).to_bytes(1,\n 'big')) # Schreibe die Anzahl der Wiederhohlungen des Zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n else: # Sonst\n for i in range(counter): # Für die Anzahl der zeichen\n dest_file.write(last_byte.to_bytes(1, 'big')) # Schreibe das Zeichen\n if last_byte == ord(self.MARKER): # Wenn das Zeichen gleich dem Markierungzeichen ist\n dest_file.write(b'\\x00') # Schreibe 0 dahinter", "def decompressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.decompress(data)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)", "def copy_file(file, destination):\n with open(file, 'rb') as infile, open(destination, 'wb') as outfile:\n outfile.write(infile.read())", "def convert(src, dst):\n with open(dst, 'w', encoding = 'utf-8') as myFile:\n records = read(src)\n for tag in sorted(records.keys()):\n myFile.write('%s %s\\n' %(tag, records[tag]))", "def handle_file(self, source_path, dest_path):\n raise NotImplemented", "def compress(src,dstfile):\n\tafile = zipfile.ZipFile(dstfile,\"w\",zipfile.ZIP_DEFLATED)\n\tfor root,dirs,files in os.walk(src):\n\t\tfor filename in files:\n\t\t\tabspath = osp.join(root,filename)\n\t\t\trelpath = osp.relpath(abspath,src)\n\t\t\tafile.write(abspath, relpath)\n\tafile.close();", "def _copy_file(src, dest):\n\n if src is None or dest is None:\n raise ValueError(\"src and dest must not be None\", src, dest)\n\n if not os.path.isfile(src):\n raise ValueError(\"src file does not appear to exist\", src)\n\n # if error on copy, subprocess will raise CalledProcessError\n try:\n subprocess.run(\n [\"/usr/bin/ditto\", src, dest], check=True, stderr=subprocess.PIPE\n )\n except subprocess.CalledProcessError as e:\n logging.critical(\n f\"ditto returned error: {e.returncode} {e.stderr.decode(sys.getfilesystemencoding()).rstrip()}\"\n )\n raise e", "def copy_file(fromf,tof, fromapp, toapp):\n f2w=open(tof,\"w\")\n with open(fromf) as f:\n for line in f:\n newline=line.replace(fromapp,toapp)\n f2w.write(newline.replace(fromapp.upper(),toapp.upper()))\n f2w.close()", "def copystat(src, dest):\n import shutil\n\n shutil.copystat(str(src), str(dest))", "def convert_tmpfile(src_file_name:str, dest_path:str):\n src_path = os.path.join(\n current_app.config['UPLOAD_FOLDER'],\n src_file_name\n )\n if not os.path.exists(src_path):\n abort(http.HTTPStatus.BAD_REQUEST, message='raw file not exist')\n pathlib.Path(os.path.dirname(dest_path)).mkdir(parents=True, exist_ok=True)\n shutil.move(src_path, dest_path)", "def process_file(src_file, dest_file):\n # read data\n with open(src_file) as fil:\n new_data = fil.read()\n # generate a chain of templates\n parent_template = None\n current_template = dest_file\n cursor = 1\n if EXTEND_FLAG in new_data:\n new_data = new_data.replace(EXTEND_FLAG, \"\")\n while exists(current_template):\n parent_template = current_template\n current_template = \"%s%s%d\" % (dest_file, CHILD_TPL_FLAG, cursor)\n cursor += 1\n # write data\n with open(current_template, \"w\") as fil:\n if parent_template:\n # in the chain of templates each has to extend one another\n new_data = \"\\n\".join([\n \"{%% extends \\\"%s\\\" %%}\" % parent_template,\n new_data\n ])\n fil.write(new_data)", "def copyFile(srcPath, destPath):\n shutil.copy(srcPath, destPath)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def process_file(cmap, source, destination):\n line = source.readline()\n while line:\n destination.write(process_line(cmap, line))\n line = source.readline()\n\n source.close()\n destination.close()", "def copy_file(source_file_name, dest_file_name):\n print(\"Copying \" + source_file_name + \" to \" + dest_file_name)\n shutil.copy2(source_file_name, dest_file_name)\n print(\"Copying done.\")", "def compress_stream(src, dst):\n with gzip.GzipFile(fileobj=dst, mode='wb') as gz:\n for block in iterfile(src):\n gz.write(block)", "def decompressFile(infile, outfile):\n decoder = Decoder(infile)\n for data in decoder.bytes():\n outfile.write(data)", "def copyFile(src_dir, dst_dir, f_name):\n\n try:\n src_file = open(osp.join(src_dir, f_name),\"rb\")\n dst_file = open(osp.join(dst_dir, f_name),\"wb\")\n dst_file.write(src_file.read())\n dst_file.close()\n src_file.close()\n except Exception, e:\n msg = \"!!! In copying files from < %s > dir to < %s > dir exception occur. Details: %s.\" % (src_dir,dst_dir, str(e))\n print >> import_out, msg\n LOG('performImportToPortal',INFO,'copyFile', msg)", "def convert_for_submission(source_dir, target_dir):\r\n files = subfiles(source_dir, suffix=\".nii.gz\", join=False)\r\n maybe_mkdir_p(target_dir)\r\n for f in files:\r\n img = sitk.ReadImage(join(source_dir, f))\r\n out_file = join(target_dir, f[:-7] + \".nii\")\r\n sitk.WriteImage(img, out_file)", "def pythonify(file_name, src_dir, dst_dir):\n src_dir = src_dir + [file_name]\n dst_dir = dst_dir + [file_name + '.py']\n src = os.path.join(template_path, *src_dir)\n dst = os.path.join(template_path, *dst_dir)\n shutil.move(src, dst)", "def copyFile(src, dest):\n try:\n shutil.copy(src,dest)\n except shutil.Error as e:\n print(\"Error: \" + str(e))\n except IOError as e:\n print(\"Error: \" + e.strerror)", "def copy_file(fs, inpath, outpath):\n fs.copy(inpath, outpath)" ]
[ "0.6751052", "0.6534881", "0.61683327", "0.6163265", "0.61372477", "0.6119148", "0.60524434", "0.602882", "0.598594", "0.5982905", "0.5955934", "0.59134305", "0.5866599", "0.585062", "0.58318645", "0.5749547", "0.5694051", "0.56439036", "0.5634352", "0.5634352", "0.5634352", "0.5614407", "0.55886227", "0.5582447", "0.5545823", "0.55201465", "0.55132014", "0.5504407", "0.5501212", "0.54894537" ]
0.7879794
0
Takes an integer below 1001 and converts it into english text. Ignore spaces and hyphens as the instructions require.
def int2text(integer): # Numbers 1-99 are handled by simply looking up words in the special_case # dictionary. if integer < 100: return digit2text(integer) elif integer < 1000: # If exactly some hundred, then just return the word for the hundred's # place and the word 'hundred' if integer%100 == 0: return digit2text(integer/100)+'hundred' # Otherwise return the word for the hundred's place, the word # 'hundredand' and do some composition to make the rest of the words. else: return digit2text(integer/100)+'hundredand'+\ digit2text(integer%100) # Special case for 1000. elif integer == 1000: return "onethousand"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def number2text(integer):\n\n numbers_1_20_char = [\"one\", \"two\", \"three\", \"four\", \"five\",\n \"six\", \"seven\", \"eight\", \"nine\", \"ten\",\n \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\",\n \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\", \"twenty\"]\n\n numbers_21_99_int = list(range(20, 100, 10))\n numbers_21_99_char = [\"twenty\", \"thirty\", \"forty\", \"fifty\",\n \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n\n numbers_100_999_int = list(range(100,1000,100))\n numbers_100_999_char = [\"one hundred\", \"two hundred\", \"three hundred\", \"four hundred\", \"five hundred\",\n \"six hundred\", \"seven hundred\", \"eight hundred\", \"nine hundred\"]\n\n number_1000_int = 1000\n number_1000_char = \"one thousand\"\n\n if integer <= 0:\n raise ValueError(\"The number must be higher than 0, and smaller than 1001\")\n elif 1 <= integer <= 19:\n word = numbers_1_20_char[integer - 1]\n elif 20 <= integer <= 99:\n if integer in numbers_21_99_int:\n word = numbers_21_99_char[int(integer/10) - 2]\n else:\n inBetween = list(str(integer))\n lastword = numbers_1_20_char[int(inBetween[1]) - 1]\n firstword = numbers_21_99_char[int(int(inBetween[0])) - 2]\n word = \"\".join([firstword, lastword])\n elif 100 <= integer <= 999:\n if integer in numbers_100_999_int:\n word = numbers_100_999_char[int(integer/100) - 1]\n else:\n inBetween = list(str(integer))\n firstword = numbers_100_999_char[int(integer / 100) - 1]\n if int(inBetween[2]) == 0:\n if int(inBetween[1]) == 1:\n word = \"\".join([firstword, \"and\", \"ten\"])\n else:\n secondword = numbers_21_99_char[int(int(inBetween[1])) - 2]\n word = \"\".join([firstword, \"and\", secondword])\n else:\n number = (int(inBetween[1])*10) + int(inBetween[2])\n if 1 <= number <= 20:\n secondword = numbers_1_20_char[number - 1]\n word = \"\".join([firstword, \"and\", secondword])\n else:\n secondword = numbers_21_99_char[int(int(inBetween[1])) - 2]\n thirdword = numbers_1_20_char[int(int(inBetween[2])) - 1]\n word = \"\".join([firstword, \"and\", secondword, thirdword])\n elif integer == number_1000_int:\n word = number_1000_char\n\n return word", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def digit2text(integer):\n # If the integer is in the special cases dictionary, then look up the word,\n # return it, and we're done.\n if integer in special_case_dict.keys():\n return special_case_dict[integer]\n # Otherwise compose the word, by taking the number in the ten's place and\n # multiplying by 10 (i.e. integer/10*10 evaluates to a number in the set\n # {10, 20, 30, 40, 50, 60, 70, 80, 90} for any input integer between 10-99.\n # Then add word for the number in the one's place\n else:\n return special_case_dict[integer/10*10]+special_case_dict[integer%10]", "def translateNumber(n):\r\n if type(n) != str:\r\n return None\r\n else:\r\n translation = \"\"\r\n word = \"\"\r\n for c in n:\r\n if c != ' ':\r\n word += c\r\n elif word in Numbers:\r\n translation += Numbers[word] + \" \"\r\n else:\r\n translation += word + \" \"\r\n return translation", "def num2words(num):\n # Create a dictionary of all unique numbers from 1 to 1,000\n num2words = {0:'', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven',\\\n 8:'eight', 9:'nine', 10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen',\\\n 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen', 20:'twenty',\\\n 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty',\\\n 90:'ninety', 1000:'onethousand'}\n result = ''\n while True:\n try:\n result += num2words[num]\n return result\n except:\n pass\n try:\n result += num2words[num-num%10] + num2words[num%10]\n return result\n except:\n result += num2words[(num - num%100)//100] + 'hundred'\n num = num%100\n if num == 0:\n return result\n else:\n result += 'and'", "def integer_to_english_numeral(n, activate_tts=False):\n if activate_tts is None:\n activate_tts = False\n elif not isinstance(activate_tts, bool):\n raise TypeError('Argument \"activate_tts\" is not a boolean')\n if not isinstance(n, int):\n raise TypeError('Not an integer')\n if n < 0:\n raise ValueError('Not a positive integer')\n if n > 999999999999:\n raise OverflowError('Integer greater than 999,999,999,999')\n return cardinal_numerals_eng.integer_to_english(n, activate_tts)", "def large_int_word(x):\n\n digits = [int(i) for i in str(x)]\n units = tens = hundreds = thousands = ''\n\n if len(digits) == 1:\n units = UNITS[digits[-1]]\n else:\n units = UNIT_PREFIXES[digits[-1]]\n tens = TENS[digits[-2]]\n if len(digits) >= 3:\n hundreds = HUNDREDS[digits[-3]]\n if len(digits) >= 4:\n thousands = UNITS[digits[-4]] + 'illin'\n if len(digits) >= 5:\n raise\n\n return units + tens + hundreds + thousands + 'illion'", "def hundreds_text(num):\n hundreds_digit = num // 100\n tens_digit = num % 100\n hundreds_text = singles[hundreds_digit] + ' ' + \"Hundred\"\n return hundreds_text + ' ' + tens_text(tens_digit)", "def _cardinal2word(strNumber):\n return Number.convertNumberIntoLetters(strNumber)", "def num_to_words(amount):\n digits = {\n 0: 'нуль', 1: 'одна',\n 2: 'дві', 3: 'три',\n 4: 'чотири', 5: 'п\\'ять',\n 6: 'шість', 7: 'сім',\n 8: 'вісім', 9: 'дев\\'ять',\n 10: 'десять', 11: 'одинадцять',\n 12: 'дванадцять', 13: 'тринадцять',\n 14: 'чотирнадцять', 15: 'п\\'ятнадцять',\n 16: 'шістнадцять', 17: 'сімнадцять',\n 18: 'вісімнадцять', 19: 'дев\\'ятнадцять'\n }\n\n dozens = {\n 2: 'двадцять', 3: 'тридцять',\n 4: 'сорок', 5: 'п\\'ятдесят',\n 6: 'шістдесят', 7: 'сімдесят',\n 8: 'вісімдесят', 9: 'дев\\'яносто'\n }\n\n hundreds = {\n 1: 'сто', 2: 'двісті',\n 3: 'триста', 4: 'чотириста',\n 5: 'п\\'ятсот', 6: 'шістсот',\n 7: 'сімсот', 8: 'вісімсот',\n 9: 'дев\\'ятсот'\n }\n\n strnumber = str(amount)\n if amount < 20:\n return digits[amount]\n elif amount < 100:\n if strnumber[-1] == '0':\n return dozens[int(strnumber[0])]\n else:\n return dozens[int(strnumber[0])] + \" \" + num_to_words(int(strnumber[1]))\n else:\n if strnumber[1:3] == '00':\n return hundreds[int(strnumber[0])]\n else:\n return hundreds[int(strnumber[0])] + \" \" + num_to_words(int(strnumber[1:3]))", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def int2word(n):\n # break the number into groups of 3 digits using slicing\n # each group representing hundred, thousand, million, billion, ...\n n3 = []\n r1 = \"\"\n # create numeric string\n ns = str(n)\n for k in range(3, 33, 3):\n r = ns[-k:]\n q = len(ns) - k\n # break if end of ns has been reached\n if q < -2:\n break\n else:\n if q >= 0:\n n3.append(int(r[:3]))\n elif q >= -1:\n n3.append(int(r[:2]))\n elif q >= -2:\n n3.append(int(r[:1]))\n r1 = r\n\n # print n3 # test\n\n # break each group of 3 digits into\n # ones, tens/twenties, hundreds\n # and form a string\n nw = \"\"\n for i, x in enumerate(n3):\n b1 = x % 10\n b2 = (x % 100) // 10\n b3 = (x % 1000) // 100\n # print b1, b2, b3 # test\n if x == 0:\n continue # skip\n else:\n t = thousands[i]\n if b2 == 0:\n nw = ones[b1] + t + nw\n elif b2 == 1:\n nw = tens[b1] + t + nw\n elif b2 > 1:\n nw = twenties[b2] + ones[b1] + t + nw\n if b3 > 0:\n nw = ones[b3] + \"hundred \" + nw\n return nw", "def convert_number(number):\n return ' ' + ' '.join(list(int_to_roman(number))) + ' '", "def int_to_str(number):\n rb = RuleBasedNumberFormat(URBNFRuleSetTag.SPELLOUT, Locale('pl_PL'))\n verbalized = rb.format(int(number))\n return verbalized", "def hundreds_conversion(positive_int):\n positive_int = str(positive_int)\n if int(positive_int[-3]) < 4:\n return 'C' * int(positive_int[-3])\n if int(positive_int[-3]) == 4:\n return 'CD'\n if int(positive_int[-3]) == 5:\n return 'D'\n if int(positive_int[-3]) == 6:\n return 'DC'\n if int(positive_int[-3]) == 7:\n return 'DCC'\n if int(positive_int[-3]) == 8:\n return 'DCCC'\n if int(positive_int[-3]) == 9:\n return 'CM'", "def _to_cn(number):\n\n chinese_numeral_dict = {\n '0': '零',\n '1': '一',\n '2': '二',\n '3': '三',\n '4': '四',\n '5': '五',\n '6': '六',\n '7': '七',\n '8': '八',\n '9': '九'\n }\n chinese_unit_map = [('', '十', '百', '千'),\n ('万', '十万', '百万', '千万'),\n ('亿', '十亿', '百亿', '千亿'),\n ('兆', '十兆', '百兆', '千兆'),\n ('吉', '十吉', '百吉', '千吉')]\n chinese_unit_sep = ['万', '亿', '兆', '吉']\n\n reversed_n_string = reversed(str(number))\n\n result_lst = []\n unit = 0\n\n for integer in reversed_n_string:\n if integer is not '0':\n result_lst.append(chinese_unit_map[unit // 4][unit % 4])\n result_lst.append(chinese_numeral_dict[integer])\n unit += 1\n else:\n if result_lst and result_lst[-1] != '零':\n result_lst.append('零')\n unit += 1\n\n result_lst.reverse()\n\n # clean convert result, make it more natural\n if result_lst[-1] is '零':\n result_lst.pop()\n\n result_lst = list(''.join(result_lst))\n\n for unit_sep in chinese_unit_sep:\n flag = result_lst.count(unit_sep)\n while flag > 1:\n result_lst.pop(result_lst.index(unit_sep))\n flag -= 1\n\n '''\n length = len(str(number))\n if 4 < length <= 8:\n flag = result_lst.count('万')\n while flag > 1:\n result_lst.pop(result_lst.index('万'))\n flag -= 1\n elif 8 < length <= 12:\n flag = result_lst.count('亿')\n while flag > 1:\n result_lst.pop(result_lst.index('亿'))\n flag -= 1\n elif 12 < length <= 16:\n flag = result_lst.count('兆')\n while flag > 1:\n result_lst.pop(result_lst.index('兆'))\n flag -= 1\n elif 16 < length <= 20:\n flag = result_lst.count('吉')\n while flag > 1:\n result_lst.pop(result_lst.index('吉'))\n flag -= 1\n '''\n\n return ''.join(result_lst)", "def convert(number):\n out = \"\"\n if number % 3 == 0:\n out = \"Pling\"\n if number % 5 == 0:\n out = out + \"Plang\"\n if number % 7 == 0:\n out = out + \"Plong\"\n if out == \"\":\n out = str(number)\n return out", "def numbers2words():\n\tmy_num = None\n\twhile my_num != \"0\":\n\t\tmy_num = input(\"Please enter a number greater than 0 and less than 1 trillion: \")\n\t\tprint(name_num(int(my_num.replace(\",\",\"\"))))", "def textualize(num):\n if isinstance(num, float):\n num = int(num)\n # special case\n if num == 0:\n return 'zero'\n\n # if the number is negative, we put the word\n # 'negative' in front of it.\n is_negative = False\n if num < 0:\n is_negative = True\n num = -1 * num\n\n num = str(num)\n # pad with zeroes\n while len(num) % 3 != 0:\n num = ''.join([ '0', num ])\n\n # as groups are textualized, their strings will be\n # appended to this list\n num_string = []\n group_counter = 0\n while len(num) > 0:\n group = num[-3:]\n num = num[:-3]\n text = _textualize_group(group)\n\n # thousand, million, etc.\n if group_counter > 0 and text:\n group_name = group_names[group_counter]\n text = ' '.join([ text, group_name ])\n\n if text:\n num_string.insert(0, text)\n\n group_counter += 1\n\n if is_negative:\n num_string.insert(0, 'negative')\n\n return ' '.join(num_string)", "def convert(n):\n if n in numbersDict:\n return len(numbersDict[n]), numbersDict[n]\n # else, n is greater than 20\n\n # reverse so that n[0] is the ones place an so on\n n = list(map(int, reversed(str(n))))\n\n word = []\n\n wordHundred = \"hundred\"\n wordAnd = \"and\"\n wordThousand = \"thousand\"\n\n if (n[1]*10 + n[0]) in numbersDict:\n word.append(numbersDict[(n[1]*10 + n[0])])\n else:\n word.append(numbersDict.get(n[0], \"\"))\n word.append(numbersDict.get(n[1] * 10, \"\"))\n\n if len(n) > 2:\n if n[1] or n[0]: word.append(wordAnd)\n hundreds = numbersDict.get(n[2], \"\")\n needHundred = wordHundred if hundreds else \"\"\n word.append(needHundred)\n word.append(hundreds)\n\n if len(n) > 3:\n thousands = numbersDict.get(n[3], \"\")\n needThousand = wordThousand if thousands else \"\"\n word.append(needThousand)\n word.append(thousands)\n\n return len(\"\".join(word)), \" \".join(reversed(word))", "def indian_word_currency(value):\n if isinstance(value, int) and value < 100:\n return str(value)\n if isinstance(value, float) and value < 99:\n return str(value)\n\n try:\n if isinstance(value, str):\n if '.' not in value and int(value) < 99:\n return value\n if float(value) < 99:\n return value\n except (ValueError, TypeError):\n return value\n\n value_integer = str(value).split('.')[0]\n value_len = len(value_integer)\n if value_len > 7:\n crores = value_integer[:-7]\n lakhs = value_integer[-7:-5]\n if crores == '1' and lakhs == '00':\n return '1 Crore'\n if lakhs == '00':\n return '%s Crores' % crores\n return '%s.%s Crores' % (crores, lakhs)\n elif value_len > 5:\n lakhs = value_integer[:-5]\n thousands = value_integer[-5:-3]\n if lakhs == '1' and thousands == '00':\n return '1 Lakh'\n if thousands == '00':\n return '%s Lakhs' % lakhs\n return '%s.%s Lakhs' % (lakhs, thousands)\n elif value_len > 3:\n thousands = value_integer[:-3]\n hundreds = value_integer[-3:-1]\n if thousands == '1' and hundreds == '00':\n return '1 Thousand'\n if hundreds == '00':\n return '%s Thousands' % thousands\n return '%s.%s Thousands' % (thousands, hundreds)\n else:\n hundreds = value_integer[:-2]\n tens_ones = value_integer[-2:]\n if hundreds == '1' and tens_ones == '00':\n return '1 Hundred'\n if tens_ones == '00':\n return '%s Hundreds' % hundreds\n return '%s.%s Hundreds' % (hundreds, tens_ones)", "def cardinal(n, friendly=True):\n if friendly:\n n_abs = abs(n)\n\n if n_abs < 20:\n return cardinal(n, friendly=False)\n\n if n_abs < 100 and n_abs % 10 == 0:\n return cardinal(n, friendly=False)\n\n if n_abs < 1000 and n_abs % 100 == 0:\n return cardinal(n, friendly=False)\n\n if n_abs < 12000 and n_abs % 1000 == 0:\n return cardinal(n, friendly=False)\n\n prefix = \"min \" if n < 0 else \"\"\n\n if n_abs < MILLION:\n q, r = divmod(n_abs, 1000)\n if r == 0:\n return prefix + \"%d duizend\" % q\n\n if n_abs < BILLION:\n q, r = divmod(n_abs, MILLION)\n if r == 0:\n return prefix + \"%d miljoen\" % q\n\n # No friendly variant, just return the numerical representation.\n return unicode(n)\n\n # Code below completely spells out each number.\n\n if n < 0:\n return \"min \" + cardinal(abs(n))\n\n if n < 20:\n return UNITS[n]\n\n if n < 100:\n q, r = divmod(n, 10)\n a = TENS[q]\n if r == 0:\n return a\n b = cardinal(r)\n joiner = \"en\" if not b.endswith(\"e\") else \"ën\"\n return b + joiner + a\n\n if n < 1000:\n q, r = divmod(n, 100)\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n b = cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \"honderd\" + b\n\n if 1000 < n < 10000 and n % 1000:\n # Special case for numbers that are exactly divisble by 100, but\n # not by 1000, e.g. \"tweeëntwintighonderd\"\n q, r = divmod(n, 100)\n if r == 0:\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n return a + \"honderd\"\n\n if n < MILLION:\n q, r = divmod(n, 1000)\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \"duizend\" + b\n\n if n < BILLION:\n q, r = divmod(n, MILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" miljoen\" + b\n\n if n < TRILLION:\n q, r = divmod(n, BILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" miljard\" + b\n\n if n < QUADRILLION:\n q, r = divmod(n, TRILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" biljoen\" + b\n\n # Fallback to numerical representation\n return unicode(n)", "def int_to_text(self, labels):\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')", "def to_roman(n):\n if not isinstance(n, int):\n try:\n n = int(n)\n except ValueError:\n raise NotIntegerError(\"non-integers cannot be converted\")\n\n if not (0 < n < 4000):\n raise OutOfRangeError(\"number out of range (must be 1..3999)\")\n\n result = \"\"\n for numeral, integer in ROMAN_NUMBER_MAP:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def translate_number(number):\n return NUMBER_TRANSLATOR[number]", "def tens_text(num):\n if num < 10:\n return singles[num]\n elif num < 20:\n return teens[num]\n elif num < 100:\n tens_digit = num // 10\n singles_digit = num % 10\n if singles_digit == 0:\n return tens[tens_digit]\n else:\n return tens[tens_digit-2] + ' ' + singles[singles_digit]", "def tens_conversion(positive_int):\n # I use an index of [-2] to select the ten's place, and so forth until the thousands\n positive_int = str(positive_int)\n if int(positive_int[-2]) < 4:\n return 'X' * int(positive_int[-2])\n if int(positive_int[-2]) == 4:\n return 'XL'\n if int(positive_int[-2]) == 5:\n return 'L'\n if int(positive_int[-2]) == 6:\n return 'LX'\n if int(positive_int[-2]) == 7:\n return 'LXX'\n if int(positive_int[-2]) == 8:\n return 'LXXX'\n if int(positive_int[-2]) == 9:\n return 'XC'", "def int2roman(num):\n try:\n num_int = int(num)\n except ValueError:\n raise InputError(num, \"Input value must be in integer representation.\")\n except TypeError:\n raise InputError(num, \"Input must be a number, string, or a bytes-like object.\")\n if float(num) != float(num_int):\n raise InputError(num, \"Input cannot be a non-integer decimal value.\")\n else:\n num = int(num)\n if not 0 < num < 5000:\n raise InputError(num, \"Input must be an integer in [1,4999] range.\")\n\n res = \"\"\n for r, i in __extended_map:\n while num >= i:\n res += r\n num -= i\n return res", "def int2dec(n: int) -> str:" ]
[ "0.7711457", "0.736089", "0.6570956", "0.65348077", "0.6504594", "0.6472066", "0.6465786", "0.6318002", "0.63022107", "0.6287715", "0.6235496", "0.6192539", "0.61917514", "0.6169387", "0.6149354", "0.6142758", "0.61054987", "0.60986495", "0.60660845", "0.60206014", "0.5872346", "0.5815992", "0.576974", "0.57589585", "0.5754434", "0.575249", "0.57281697", "0.5724484", "0.572187", "0.56664413" ]
0.7366785
1
Solves [a]{b} = {x} by Gauss elimination.
def gaussElimin(a,b): a=float64(a) b=float64(b) n=len(b) x=zeros((n,1),dtype=float) for k in range(n-1): for i in range(k+1,n): l=float(a[i][k])/a[k][k] a[i][k]=0 for j in range(k+1,n): a[i][j]=a[i][j]-l*a[k][j] b[i]=b[i]-l*b[k] x[n-1]=float(b[n-1])/a[n-1][n-1] for i in range(n-2,-1,-1): sum=b[i] for j in range(i+1,n): sum=sum-a[i][j]*x[j] x[i]=float(sum)/a[i][i] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_elimination(A, b):\n \n m, n = A.shape\n U = A.copy() \n b = b.copy()\n\n # forward sweep, reduce A to a upper triangular matrix\n for k in range(min(m, n)):\n swap = np.argmax(np.abs(U[k:, k])) + k\n if U[swap, k] == 0:\n raise ValueError('Singular matrix')\n U[[k, swap], :] = U[[swap, k], :]\n b[[k, swap]] = b[[swap, k]]\n \n for i in range(k + 1, m):\n factor = U[i, k] / U[k, k]\n b[i] = b[i] - factor*b[k]\n U[i, k+1:] = U[i, k+1:] - U[k, k+1:] * factor\n U[i, k] = 0\n \n # solve by back subistitution\n x = rbackwardsolve(U, b, m)\n\n return x", "def gaussian_elimination(A, b):\n n = len(b)\n # Join A and b\n ab = np.c_[A,b]\n # Gaussian Elimination\n for i in range(n-1):\n if ab[i,i] == 0:\n raise ZeroDivisionError('Zero value in matrix..')\n\n for j in range(i+1, n):\n ratio = ab[j,i] / ab[i,i]\n\n for k in range(i, n+1):\n ab[j,k] = ab[j,k] - ratio * ab[i,k]\n\n # Backward Substitution\n X = np.zeros((n,1))\n X[n-1,0] = ab[n-1,n] / ab[n-1,n-1]\n\n for i in range(n-2,-1,-1):\n knowns = ab[i, n]\n for j in range(i+1, n):\n knowns -= ab[i,j] * X[j,0]\n X[i,0] = knowns / ab[i,i]\n return X", "def gaussian_elimination_pivots(A, b):\n\n P, L, U = PLU(A)\n n,_ = A.shape\n y = rforwardsolve(L, (P.T).dot(b), n)\n x = rbackwardsolve(U, y, n)\n\n return x", "def Gauss_Seidel_Solve(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n #only change from before is that I use x_new in the update\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def gaussian_solve(a, b):\n g = np.zeros((len(a), len(a[0]) + len(b[0])))\n for i in range(len(a)):\n for j in range(len(a[0])):\n g[i][j] = a[i][j]\n for i in range(len(b)):\n for j in range(len(b[0])):\n g[i][j + len(a[0])] = b[i][j]\n for i in range(len(a)):\n for j in range(i+1, len(a)):\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n for i in range(len(a)):\n i = len(a) - i - 1\n for j in range(i):\n j = i - j - 1\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n if g[i][i] != 0:\n g[i] /= g[i][i]\n else:\n return 'error: matrix is not linearly independent'\n out = np.zeros((len(b), len(b[0])))\n for i in range(len(b)):\n for j in range(len(b[0])):\n out[i][j] = g[i][j + len(a[0])]\n return out", "def gaussian_elimination_special_case(b):\n n = len(b)\n # init new (prime) arrays\n beta_prime = np.empty(n)\n beta_prime[0] = 2\n\n b_prime = np.empty(n)\n b_prime[0] = b[0]\n\n v = np.empty(n)\n i_array = np.arange(n)\n beta_prime = (i_array+2) / (i_array+1)\n\n for i in range(1,n):\n b_prime[i] = b[i] + (b_prime[i-1] / beta_prime[i-1])\n\n v[-1] = b_prime[-1] / beta_prime[-1]\n\n for i in range(n-2, -1, -1):\n v[i] = (b_prime[i] + v[i+1])/ beta_prime[i]\n\n return v", "def solve(self,b):\n nrows = self.nrows\n ncols = self.ncols\n newmatrix = Matrix(nrows,ncols+b.ncols) #Account for b not being just a column vector\n for i in range(nrows):\n for j in range(ncols):\n newmatrix[i,j]= self[i,j]\n for j in range(b.ncols):\n newmatrix[i,ncols+j] = b[i,j]\n newmatrix.gaussianelimination()\n x = Matrix(nrows,b.ncols)\n for i in range(x.nrows):\n for j in range(b.ncols):\n x[i,j] = newmatrix[i,j+ncols]\n return x", "def elimination(A, b):\n n = len(A)\n for j in range(n):\n if A[j][j] <= 0:\n raise ValueError('Matrix A is not positive definite.')\n A[j][j] = math.sqrt(A[j][j])\n b[j][0] = b[j][0] / A[j][j]\n for i in range(j + 1, n):\n A[i][j] = A[i][j] / A[j][j]\n b[i][0] = b[i][0] - A[i][j] * b[j][0]\n for k in range(j + 1, i + 1):\n A[i][k] = A[i][k] - A[i][j] * A[k][j]", "def polynomial(x, y):\n \n var = copy(x)\n known = copy(y)\n V = vandermonde_matrix(var)\n a = gauss_elimination(V, known)\n return a", "def gauss_seidel(a, b, n=None, x=None, delta=None, actual=np.array([]), max_iterations=default_max_iterations):\n # Make sure that both delta and actual are passed in\n if (delta and not actual.any()) or (actual.any() and not delta):\n raise SyntaxError(\"Must pass in both delta and actual if one is passed in\")\n # Make sure that only N or delta is passed in\n if delta and n:\n raise SyntaxError(\"Can only pass delta or N option\")\n\n # Create an initial guess if needed\n if x is None:\n x = np.zeros(len(a[0]))\n\n # Iterate for N times if N is passed in\n if n:\n L = np.tril(a)\n U = a - L\n for i in range(n):\n x = np.dot(np.linalg.inv(L), b - np.dot(U, x))\n\n # Iterate until error is found or max_iterations is exceeded if delta and actual are passed in\n elif delta and actual.any():\n n = 0\n actual_norm = np.linalg.norm(actual)\n L = np.tril(a)\n U = a - L\n\n while True:\n x = np.dot(np.linalg.inv(L), b - np.dot(U, x))\n x_norm = np.linalg.norm(x)\n n += 1\n # Compare norms of actual matrix with Jacobian-calculated matrix and if difference is within error, return\n # the number of iterations it took to get within the error\n if abs(Decimal(actual_norm) - Decimal(x_norm)) <= delta or n >= max_iterations:\n break\n # If neither N or delta was passed in\n else:\n raise SyntaxError(\"Must pass in either N or delta options to function\")\n\n # Return the result and the number of iterations taken to find it\n return [x, n]", "def solve_triangular(a, b, lower=False):\n # TODO maybe commit this to gvar.linalg\n # TODO can I raise a LinAlgError if a[i,i] is 0, and still return the\n # result and have it assigned to a variable using try...finally inside this\n # function?\n x = np.copy(b)\n a = a.reshape(a.shape + (1,) * len(x.shape[1:]))\n if lower:\n x[0] /= a[0, 0]\n for i in range(1, len(x)):\n x[i:] -= x[i - 1] * a[i:, i - 1]\n x[i] /= a[i, i]\n else:\n x[-1] /= a[-1, -1]\n for i in range(len(x) - 1, 0, -1):\n x[:i] -= x[i] * a[:i, i]\n x[i - 1] /= a[i - 1, i - 1]\n return x", "def sparse_gauss_seidel(A, b, tol=1e-8, maxiters=29):\n \n\n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m): \n rowstart = A.indptr[i]\n rowend = A.indptr[i+1]\n aii=A[i,i]\n xj[i]=(b[i]-(np.dot(A.data[rowstart:rowend], xi[A.indices[rowstart:rowend]])-aii*xi[i]))/(aii)\n xi[i]=xj[i]\n return xj\n \n #Aix = np.dot(A.data[rowstart:rowend], x[A.indices[rowstart:rowend]])\n\n m=len(b)\n xk=np.zeros((m,))\n for i in xrange(0,maxiters):\n xk=iter(xk)\n if (la.norm(A.dot(xk)-b,ord=np.inf)<tol) or (i==maxiters-1):\n return xk", "def legendreGauss (func, deg, a, b, ind, bsp, ind2=0):\n\n\tx, w = np.polynomial.legendre.leggauss(deg)\n\tt = 0.5*(x+1)*(b-a)+ a\n\t\n\tgauss = sum(w + func(t, bsp, ind, ind2))*( 0.5*(b-a))\n\n\treturn gauss", "def gem_solve(A, b):\r\n\tstart = time()\r\n\tn = len(A)\r\n\tU = [[0.0 for k in range(n)] for k in range(n)]\r\n\tfor k in range(n):\r\n\t\tfor i in range(k+1,n):\r\n\t\t\tA[i][k] = A[i][k]/A[k][k]\r\n\t\t\tb[i] = b[i] - A[i][k]*b[k]\r\n\t\tfor j in range(k+1,n):\r\n\t\t\tfor i in range(k+1, n):\r\n\t\t\t\tA[i][j] = A[i][j]-A[i][k]*A[k][j]\r\n\t\t\t\t\r\n\tfor i in range(n):\r\n\t\tfor j in range(n):\r\n\t\t\tif i>j:\r\n\t\t\t\tU[i][j] = 0\r\n\t\t\telse:\r\n\t\t\t\tU[i][j] = A[i][j]\r\n\t\r\n\tx, place = backward(U, b)\r\n\tend = time()\r\n\treturn x, (end-start)", "def gaussseidel_poissoneq(A, x0):\n return 1", "def gauss(x,p):\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))", "def gauss(x, y, ax, ay, x0, y0, phase):\n g_x = ((ax / sqrt(pi)) ** 0.5\n * exp(-0.5 * ((x - x0) * ax) ** 2))\n g_y = ((ay / sqrt(pi)) ** 0.5\n * exp(-0.5 * ((y - y0) * ay) ** 2))\n\n gxy = np.zeros((len(x),len(y)), dtype=float)\n for i, _gx in enumerate(g_x):\n for j, _gy in enumerate(g_y):\n gxy[i,j] = _gx * _gy \n\n gxy2 = (1.0 / sqrt(1.0+abs(phase))) * np.array([gxy, phase*gxy], dtype=float) \n\n return gxy2", "def gauss_seidel(coeficientes, semilla, b, i, n):\n suma = 0\n for j in range(n):\n if j != i and coeficientes[j] != 0:\n suma += (coeficientes[j] * semilla[j]) / coeficientes[i]\n return (b / coeficientes[i]) - suma", "def gaussian_reduce(w, a, b):\n u = (0, 1)\n v = (1, 0)\n\n if dot(u, v, w, a, b) < 0:\n v = (-v[0], -v[1])\n\n if norm(u, w, a, b) < norm(v, w, a, b):\n u, v = v, u\n\n while norm(u, w, a, b) > norm(v, w, a, b):\n k = dot(u, v, w, a, b) // dot(v, v, w, a, b)\n u, v = v, (u[0]- k*v[0], u[1]- k*v[1])\n\n u, v = v, u\n\n if dot(u, v, w, a, b) < dot(v, v, w, a, b)/2 or norm((u[0]-v[0], u[1]-v[1]), w, a, b) > norm(v, w, a, b):\n c = v\n else:\n c = (u[0] - v[0], u[1] - v[1])\n\n return c[0]*w + b*c[1], c[0]", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def gauss_naive (M, b) -> list:\n dim = len(b)\n\n #Itero sulle Incognite da Trovare\n for i in range(dim):\n\n #Itero sulle righe su cui devo cancellare un elemento\n for j in range(i+1,dim):\n m__j_i = M[j][i] / M[i][i]\n M[j][i] = 0.0\n\n for k in range (i+1,dim):\n M[j][k] = M[j][k] - m__j_i * M[i][k]\n \n b[j] = b[j] - m__j_i * b[i]\n\n return M,b", "def solve(a, b):\n #-> getrf + getrs\n a, _, _ = get_computation_matrix(a)\n b, cv2, isM2 = get_computation_matrix(b)\n if a.get_dtype() != b.get_dtype():\n raise TypeError(\"solve: dtype of a and b are not compatible!\")\n if a.numRows() != a.numCols():\n raise ValueError(\"solve: input a is not a square matrix!\")\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n (_, _, x, _) = gesv(a, b, overwrite_a=1, overwrite_b=1, dtype=t_dtype)\n\n if cv2:\n if isM2:\n return x.to_numpy_matrix()\n else:\n return x.to_numpy_array()\n else:\n return x", "def LUsolve(a,b):\n b=float64(b)\n n=len(b)\n LU=LUdecomp(a)\n y=zeros((n,1))\n x=zeros((n,1))\n y[0]=b[0]\n for i in range(1,n):\n sum=b[i]\n for j in range(i):\n sum=sum-LU[i][j]*y[j]\n y[i]=sum\n x[n-1]=float(y[n-1])/LU[n-1][n-1]\n for i in range(n-2,-1,-1):\n sum=y[i]\n for j in range(i+1,n):\n sum=sum-LU[i][j]*x[j]\n x[i]=float(sum)/LU[i][i]\n return x", "def MatrixFreeCG(A, b, x, tol=1e-6, maxiter=5000, quiet=True):\n\n if b.dtype != x.dtype:\n raise TaichiTypeError(f\"Dtype mismatch b.dtype({b.dtype}) != x.dtype({x.dtype}).\")\n if str(b.dtype) == \"f32\":\n solver_dtype = ti.f32\n elif str(b.dtype) == \"f64\":\n solver_dtype = ti.f64\n else:\n raise TaichiTypeError(f\"Not supported dtype: {b.dtype}\")\n if b.shape != x.shape:\n raise TaichiRuntimeError(f\"Dimension mismatch b.shape{b.shape} != x.shape{x.shape}.\")\n\n size = b.shape\n vector_fields_builder = ti.FieldsBuilder()\n p = ti.field(dtype=solver_dtype)\n r = ti.field(dtype=solver_dtype)\n Ap = ti.field(dtype=solver_dtype)\n Ax = ti.field(dtype=solver_dtype)\n if len(size) == 1:\n axes = ti.i\n elif len(size) == 2:\n axes = ti.ij\n elif len(size) == 3:\n axes = ti.ijk\n else:\n raise TaichiRuntimeError(f\"MatrixFreeCG only support 1D, 2D, 3D inputs; your inputs is {len(size)}-D.\")\n vector_fields_builder.dense(axes, size).place(p, r, Ap, Ax)\n vector_fields_snode_tree = vector_fields_builder.finalize()\n\n scalar_builder = ti.FieldsBuilder()\n alpha = ti.field(dtype=solver_dtype)\n beta = ti.field(dtype=solver_dtype)\n scalar_builder.place(alpha, beta)\n scalar_snode_tree = scalar_builder.finalize()\n succeeded = True\n\n @ti.kernel\n def init():\n for I in ti.grouped(x):\n r[I] = b[I] - Ax[I]\n p[I] = 0.0\n Ap[I] = 0.0\n\n @ti.kernel\n def reduce(p: ti.template(), q: ti.template()) -> solver_dtype:\n result = solver_dtype(0.0)\n for I in ti.grouped(p):\n result += p[I] * q[I]\n return result\n\n @ti.kernel\n def update_x():\n for I in ti.grouped(x):\n x[I] += alpha[None] * p[I]\n\n @ti.kernel\n def update_r():\n for I in ti.grouped(r):\n r[I] -= alpha[None] * Ap[I]\n\n @ti.kernel\n def update_p():\n for I in ti.grouped(p):\n p[I] = r[I] + beta[None] * p[I]\n\n def solve():\n A._matvec(x, Ax)\n init()\n initial_rTr = reduce(r, r)\n if not quiet:\n print(f\">>> Initial residual = {initial_rTr:e}\")\n old_rTr = initial_rTr\n new_rTr = initial_rTr\n update_p()\n if sqrt(initial_rTr) >= tol: # Do nothing if the initial residual is small enough\n # -- Main loop --\n for i in range(maxiter):\n A._matvec(p, Ap) # compute Ap = A x p\n pAp = reduce(p, Ap)\n alpha[None] = old_rTr / pAp\n update_x()\n update_r()\n new_rTr = reduce(r, r)\n if sqrt(new_rTr) < tol:\n if not quiet:\n print(\">>> Conjugate Gradient method converged.\")\n print(f\">>> #iterations {i}\")\n break\n beta[None] = new_rTr / old_rTr\n update_p()\n old_rTr = new_rTr\n if not quiet:\n print(f\">>> Iter = {i+1:4}, Residual = {sqrt(new_rTr):e}\")\n if new_rTr >= tol:\n if not quiet:\n print(\n f\">>> Conjugate Gradient method failed to converge in {maxiter} iterations: Residual = {sqrt(new_rTr):e}\"\n )\n succeeded = False\n\n solve()\n vector_fields_snode_tree.destroy()\n scalar_snode_tree.destroy()\n return succeeded", "def gauss2(x,a1,c1,w1,a2,c2,w2):\n return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)", "def gauss_seidel(self):\n for i in range(1,self.size[0]-1):\n for j in range(1,self.size[1]-1):\n for k in range(1,self.size[2]-1):\n self.A[(i,j,k)] = ((1/6)*(self.A[(i+1,j,k)] + self.A[(i-1,j,k)] + self.A[(i,j+1,k)] + self.A[(i,j-1,k)] + self.A[(i,j,k+1)] + self.A[(i,j,k-1)] + self.J[(i,j,k)]) - self.A[(i,j,k)])*self.omega + self.A_0[(i,j,k)]", "def gauss_vect_mult(v):\n Jv = T.Rop(output, params, v)\n HJv = T.Rop(T.grad(opt_cost,output), output, Jv)\n JHJv = T.Lop(output, params, HJv)\n if not isinstance(JHJv,list):\n JHJv = [JHJv]\n JHJv = [a+ridge*b for a,b in zip(JHJv,v)]\n return JHJv", "def cg(A, b, x=None):\n n = len(b)\n if not x:\n x = np.ones([n,1])\n r = np.dot(A, x) - b\n p = - r\n # r_k_norm = np.dot(r, r)\n r_k_norm = np.linalg.norm ( r )*np.linalg.norm ( r )\n for i in range(2*n):\n Ap = np.dot(A, p)\n alpha = r_k_norm / p.T@Ap\n try:\n x += alpha * p\n except:\n pass\n r += alpha * Ap\n r_kplus1_norm = np.linalg.norm ( r )*np.linalg.norm ( r )\n beta = r_kplus1_norm / r_k_norm\n r_k_norm = r_kplus1_norm\n if r_kplus1_norm < 1e-5:\n break\n p = beta * p - r\n return x", "def g_rosenbrock(x, a=1, b=100):\n\n g = np.array(\n [\n -2 * a - 4 * b * x[0] * (-x[0] ** 2 + x[1]) + 2 * x[0],\n b * (-2 * x[0] ** 2 + 2 * x[1]),\n ]\n )\n\n return g", "def gauss(x, x0, gamma):\n sigma = gamma / sqrt(2.0)\n \n A = 1/ (sigma * sqrt(2*pi))\n return (A * exp (-0.5 * (x-x0)**2/sigma**2))" ]
[ "0.7353518", "0.71408343", "0.71251684", "0.70046204", "0.6569199", "0.64925224", "0.6439031", "0.63340366", "0.62848866", "0.6225892", "0.6141184", "0.61222094", "0.6090073", "0.6016392", "0.5969952", "0.5954091", "0.59377366", "0.5927988", "0.59233207", "0.59059817", "0.5830629", "0.5821877", "0.5712208", "0.5692663", "0.56926626", "0.5683645", "0.56747377", "0.5672946", "0.56709397", "0.5662022" ]
0.7767935
0
Solves [L][U]{x} = b, where [a] = [L\U] is the matrix returned from LUdecomp.
def LUsolve(a,b): b=float64(b) n=len(b) LU=LUdecomp(a) y=zeros((n,1)) x=zeros((n,1)) y[0]=b[0] for i in range(1,n): sum=b[i] for j in range(i): sum=sum-LU[i][j]*y[j] y[i]=sum x[n-1]=float(y[n-1])/LU[n-1][n-1] for i in range(n-2,-1,-1): sum=y[i] for j in range(i+1,n): sum=sum-LU[i][j]*x[j] x[i]=float(sum)/LU[i][i] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LU_solve(A, d, b):\n \n\n L, U = L1U(A, d)\n\n y = rforwardsolve(L, b, d)\n x = rbackwardsolve(U, y, d)\n\n return x", "def lu_decom(A,b):\n # init\n n = len(b)\n L = np.eye(n)\n U = np.zeros((n,n))\n x = np.zeros(n)\n y = np.zeros(n)\n\n # decomposition A = LU\n\n U[0,:] = A[0,:]\n L[1:,0] = A[1:,0] / U[0,0]\n\n for i in range(1,n):\n for j in range(i,n):\n\n U[i,j] = A[i,j] - np.dot(L[i,:i],U[:i,j])\n\n if j != n-1:\n L[j+1,i] = (A[j+1,i] - np.dot(L[j+1,:i],U[:i,i])) / U[i,i]\n\n # solve Ly=b\n y[0] = b[0]\n\n for k in range(1,n):\n y[k] = b[k] - np.dot(L[k,:k],y[:k])\n\n # solve Ux=y\n x[-1] = y[-1] / U[-1,-1]\n\n for k in range(n-2,-1,-1):\n x[k] = (y[k] - np.dot(U[k,k+1:],x[k+1:])) / U[k,k]\n\n return x,L,U", "def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape, b.dtype)\n return jax.numpy.linalg.solve(A, b)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)\n A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)\n return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)\n else:\n raise NotImplementedError", "def solveLU(A, b):\n utils._checkDimensions(A, b)\n if utils.isSingular(A):\n raise utils.SingularityError(\"Input matrix is singular.\")\n L, U = LU(A)\n x_calculated = _solveX(L, U, b)\n\n acc = 10e-14\n accuracy_achieved = False\n while not accuracy_achieved:\n delb = b - np.matmul(A, x_calculated)\n delX = _solveX(L, U, delb)\n x_calculated = np.subtract(x_calculated, delX)\n if [x < acc for x in x_calculated]:\n accuracy_achieved = True\n return x_calculated", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]", "def SolveLU(matrix, vector):\r\n matrixU = UMatrix(matrix)\r\n matrixL = LMatrix(matrix)\r\n return MultiplyMatrix(InverseMatrix(matrixU), MultiplyMatrix(InverseMatrix(matrixL), vector))", "def lu_solve(A,b,tol=10**(-14)):\n\n\t# LU decomposition -- raise ValueError for singular matrix A\n\ttry:\n\t\tLU, piv = scipy.linalg.lu_factor(A)\n\n\t\t# enforce magnitude of diagonal values are above given tolernce (round off)\n\t\tfor di in np.diag(LU):\n\t\t\tif abs(di) <= tol: raise ValueError\n\n\texcept ValueError:\n\t\tlogger.error(\"Error 'Singular Matrix' passed method: %s\" % inverse.__name__)\n\n\t# Use decomposition to solve for x\n\tx = scipy.linalg.lu_solve((LU, piv), b)\n\n\t# return solution vector x\n\treturn x", "def LUdecomp(Ainput):\n\n n, m = np.shape(Ainput)\n \n if n != m:\n return 'Error: Please enter an invertible matrix.'\n \n U = Ainput.copy() # make copies so as not to write over originals\n L = np.zeros((np.shape(Ainput)))\n \n for i in range(0,n):\n L[i,i] = 1\n for i in range(0,n-1): # loop over pivot rows from row 1 to row n-1 (i to n-2)\n for j in range(i+1,n): # loop over row to be zero'ed from row j+1 to n (j+1 to n-1)\n c = U[j,i]/U[i,i] # multiplicative factor to zero point\n L[j,i] = c\n U[j,i] = 0.0 # we know this element goes to zero\n U[j,i+1:n]=U[j,i+1:n]-c*U[i,i+1:n] # do subtraction of two rows\n\n return (L,U) # return lower and upper decompositions", "def solve(A, b, pivoting='partial'):\n M, N = A.shape\n Z = len(b)\n\n error_msg = \"[!] A must be square.\"\n assert (M == N), error_msg\n\n error_msg = \"[!] b must be {}D\".format(M)\n assert (Z == N), error_msg\n\n solver = LU(A, pivoting=pivoting)\n\n # solve for x\n x = solver.solve(b)\n\n return x", "def decomposeLU(self):\n self.check_square()\n\n N = self.rows\n L = make_matrix(N, N)\n U = make_matrix(N, N)\n A = self #for more math friendly notation\n\n\n for j in range(N):\n L[j, j] = 1.0 #Doolittle factorization\n\n #e.g., if you are in column = 5, you go down 6 rows\n for i in range(j+1):\n U[i, j] = A[i, j] - sum(L[i, k] * U[k, j] for k in range(i))\n #e.g., if you are in column = 5,\n # you start at row 5 and go down for the lower triangular matrix\n for i in range(j, N):\n L[i, j] = (A[i, j] - sum(L[i, k] * U[k, j] for k in range(j))) / U[j, j]\n\n self.L = L\n self.U = U\n return L, U", "def solveU(U, b):\n # validate input\n if np.allclose(U,np.triu(U))==False or np.linalg.det == 0:\n raise TypeError(\"U is not an upper regular triangular matrix\")\n \n elif len(U.shape) != 2 or len(b.shape) != 1:\n raise TypeError(\"unsuitable object\")\n \n else:\n un, um = U.shape\n n, = b.shape\n if un != um or un != n:\n raise TypeError((\"dimensions do not fullfill requirements\"))\n\n # solve \n x = np.zeros(n, dtype=complex)\n x[-1] = (b[-1]) / U[n - 1, n - 1]\n for i in range(1, n):\n t = U[(n - (i + 1)):(n - i)] @ x\n x[-(i + 1)] = (b[-(i + 1)] - t) / U[n - (i + 1), n - (i + 1)]\n\n return x", "def decompose_to_LU(a):\n # create emtpy LU-matrix\n lu_matrix = np.matrix(np.zeros([a.shape[0], a.shape[1]]))\n n = a.shape[0]\n\n for k in range(n):\n # calculate all residual k-row elements\n for j in range(k, n):\n lu_matrix[k, j] = a[k, j] - lu_matrix[k, :k] * lu_matrix[:k, j]\n # calculate all residual k-column elemetns\n for i in range(k + 1, n):\n lu_matrix[i, k] = (a[i, k] - lu_matrix[i, : k] * lu_matrix[: k, k]) / lu_matrix[k, k]\n\n return lu_matrix", "def trisolve(l, u, c, b):\n n = shape(b)[0]\n for k in range(1, n):\n b[k] -= l[k-1]*b[k - 1]\n b[n-1] /= u[n-1]\n for k in range(n-2,-1,-1):\n b[k] -= c[k]*b[k + 1]\n b[k] /= u[k]", "def test_LU(self):\n A = np.random.rand(10, 10)\n MA = to_matrix(A)\n ML, MU = MA.decomposeLU()\n self.assertEqual(ML*MU, MA)\n self.assertTrue(ML.is_lower_triangular())\n self.assertTrue(MU.is_upper_triangular())", "def test_lu_factor():\n\t#[A, b] = lu_read('test1.txt')\n\t# it is poor form to read an external file into a test function, as above\n\tA = np.array([\n\t\t[ 2., 3., -4., 2.],\n\t\t[-4., -5., 6., -3.],\n\t\t[ 2., 2., 1., 0.],\n\t\t[-6., -7., 14., -4.]])\t\n\tLU,p = lu_factor(A, pivot=False)\n\tLU_soln = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\tassert norm(LU - LU_soln) < 1.e-10\t\n\n\n\t# test 2\n\t[A2, b2] = lu_read('test2.txt')\t\t\t\t\t\t# read a matrix and RHS vector\n\tLU2,p2 = lu_factor(A2) \t\t\t\t\t\t\t\t# change display to False when LU_FACTOR working\n\tLU_soln2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\tassert norm(LU2 - LU_soln2) < 1.e-10", "def usolve(self, ub):\n raise NotImplementedError", "def lu(matrix):\n SIZE = matrix.shape[0]\n BS = np.BLOCKSIZE\n\n if matrix.shape[0] != matrix.shape[0]:\n raise Exception(\"LU only supports squared matricis\")\n if not matrix.dist():\n raise Exception(\"The matrix is not distributed\")\n\n if(SIZE % np.BLOCKSIZE != 0):\n raise Exception(\"The matrix dimensions must be divisible \"\\\n \"with np.BLOCKSIZE(%d)\"%np.BLOCKSIZE)\n\n (prow,pcol) = matrix.pgrid()\n A = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True);A += matrix\n L = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n U = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpL = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpU = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n for k in xrange(0,SIZE,BS):\n bs = min(BS,SIZE - k) #Current block size\n kb = k / BS # k as block index\n\n #Compute vertical multiplier\n slice = ((kb,kb+1),(kb,kb+1))\n for a,l,u in zip(A.blocks(slice), L.blocks(slice), U.blocks(slice)):\n (p,tl,tu) = linalg.lu(a)\n if not (np.diag(p) == 1).all():#We do not support pivoting\n raise Exception(\"Pivoting was needed!\")\n #There seems to be a transpose bug in SciPy's LU\n l[:] = tl.T\n u[:] = tu.T\n\n #Replicate diagonal block horizontal and vertical\n for tk in xrange(k+bs,SIZE,BS):\n tbs = min(BS,SIZE - tk) #Current block size\n L[tk:tk+tbs,k:k+bs] = U[k:k+tbs,k:k+bs]\n U[k:k+bs,tk:tk+tbs] = L[k:k+bs,k:k+tbs]\n\n if k+bs < SIZE:\n #Compute horizontal multiplier\n slice = ((kb,kb+1),(kb+1,SIZE/BS))\n for a,u in zip(A.blocks(slice), U.blocks(slice)):\n u[:] = np.linalg.solve(u.T,a.T).T\n\n #Compute vertical multiplier\n slice = ((kb+1,SIZE/BS),(kb,kb+1))\n for a,l in zip(A.blocks(slice), L.blocks(slice)):\n l[:] = np.linalg.solve(l,a)\n\n #Apply to remaining submatrix\n A -= pyHPC.summa(L[:,:k+bs],U[:k+bs,:], ao=(k+bs,k),\n bo=(k,k+bs), co=(k+bs,k+bs))\n\n return (L, U)", "def lu_decomposition(self):\n if self.rows_count() != self.columns_count():\n raise ValueError(\"Matrix needs to me square for LU decomposition.\")\n for i in range(self.rows_count() - 1):\n for j in range(i + 1, self.rows_count()):\n if self[i, i] == 0: # or abs(self[i, i]) <= 0.000001):\n raise ValueError(\"Can't divide by 0\")\n self[j, i] = self[j, i] / self[i, i]\n for k in range(i + 1, self.rows_count()):\n self[j, k] -= self[j, i] * self[i, k]", "def LU(A):\n m, n = A.shape\n L, U = np.zeros([m, n]), np.zeros([m, n])\n for i in range(n):\n L[i][i] = 1\n\n for i in range(n):\n\n # Upper triangular matrix\n for j in range(i, n):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*U[k][j]\n U[i][j] = A[i][j] - summ\n\n # Lower triangular matrix\n for j in range(i+1, n):\n summ = 0\n for k in range(0, i):\n summ += L[j][k]*U[k][i]\n L[j][i] = (A[j][i] - summ)/U[i][i]\n return L, U", "def L1U(A, d):\n \n\n n, _ = A.shape\n L = np.eye(n, n, dtype=A.dtype)\n U = np.zeros((n, n), dtype=A.dtype)\n\n U[0, 0] = A[0, 0]\n for k in range(1, n):\n km = max(0, k-d)\n L[k, km : k] = np.transpose(rforwardsolve(np.transpose(U[km:k, km:k]),\\\n np.transpose(A[k, km:k]), d))\n U[km:k+1, k] = rforwardsolve(L[km:k+1, km:k+1], A[km:k+1, k], d)\n return L, U", "def L1U(A, d):\n n = shape(A)[0]\n L = eye(n)\n U = matrix(zeros((n,n))); U[0,0] = A[0,0]\n for k in range(1,n):\n km = array([0, k - d]).max()\n if km < k:\n L[k, km:k] = A[k, km:k]\n rforwardsolve(U[km:k, km:k].T, L[k, km:k].T, d) # L\n U[km:(k + 1), k] = A[km:(k + 1), k]\n rforwardsolve(L[km:(k + 1), km:(k + 1)], U[km:(k + 1), k], d) # U\n return L, U", "def solve_lin(matrix_u,vector_d):\n m_np = np.array(matrix_u)\n v_np = np.array(vector_d)\n\n return np.linalg.solve(m_np, v_np)", "def test_lu_forward_sub():\t\n\t# test 1\n\tL = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\n\tb = np.array([4, -8, 9, 6])\n\n\ty = lu_forward_sub(L, b) \t\t\n\ty_soln = np.array([4,0,5,8])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y - y_soln) < 1.e-10\n\n\t# test 2\n\tL2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\n\tb2 = np.array ([[1.01], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [0.]])\n\n\ty2 = lu_forward_sub(L2, b2) \t\t\n\ty_soln2 = np.array([1.01, -101.99, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 99])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y2 - y_soln2) < 1.e-10", "def solve(self, A, B):\n return tf.matrix_solve_ls(matrix=A, rhs=B)", "def solve_L(L, b):\n n = b.size\n assert L.shape == (n,n)\n x = zeros(n)\n for i in range(n):\n x[i] = (b[i] - dot(x[:i], L[i,:i])) / L[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x", "def housetriang_solve(A, b):\n\n n, _ = A.shape\n b = np.reshape(b.copy(), (n, 1))\n R, c = housetriang(A, b)\n x = np.reshape(rbackwardsolve(R, c, n), (n,))\n\n\n return x", "def solver(mesh, model, ele, nodal_load):\r\n A = kinematics.A_matrix(model, ele)\r\n\r\n Ks = stiffness.Ks_matrix(model, ele)\r\n\r\n K = np.dot(A.T, np.dot(Ks, A))\r\n\r\n P = load.P_vector(model, nodal_load)\r\n\r\n Kf, Pf = index.fdof(model, K, P)\r\n\r\n Uf = np.linalg.solve(Kf, Pf)\r\n\r\n U = index.tdof(model, Uf)\r\n\r\n V = np.dot(A, U)\r\n\r\n Q = np.dot(Ks, V)\r\n\r\n return U, Q", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def solve(self):\n is_valid = self.verify_sub_matrixes()\n \n if not is_valid:\n raise ValueError((\n \"El determinante es igual a cero \"\n \"el método no puede continuar\"\n ))\n \n (lower, upper) = self.doolittle_factorization()\n\n lower_solution_vector = lower.solve_matrix(matrix=None, vector=self.vector.vector)\n lower_solution_vector.print_vector()\n upper_solution_vector = upper.solve_matrix(\n matrix=None, vector=lower_solution_vector.vector)\n upper_solution_vector.print_vector()\n\n comprobation = self.matrix.comprobation(upper_solution_vector.vector)\n return comprobation" ]
[ "0.8204323", "0.77781606", "0.7435162", "0.7326853", "0.71451354", "0.7074888", "0.6962283", "0.69174", "0.6882864", "0.6877735", "0.68558407", "0.6839501", "0.67079276", "0.6632435", "0.6581103", "0.6516492", "0.6455007", "0.63229835", "0.62904537", "0.62402534", "0.6206865", "0.61846966", "0.6057433", "0.60542107", "0.5929366", "0.58872217", "0.5857919", "0.5856226", "0.5809597", "0.5795168" ]
0.79200137
1
Constructs a Octave ResNet26 model.
def pre_act_oct_resnet26(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet34(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [3, 4, 6, 3], shortcut_type, num_classes, in_channels)\n return model", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def resnet18(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [2, 2, 2, 2], shortcut_type, num_classes, in_channels)\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def ResNeXt(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def resnet18_custom(input_channels):\n model = ResNet(input_channels, BasicBlock, [2])\n\n return model", "def __init__(self, pretrained=True, freeze_weights=True):\n super(RaisinNet34, self).__init__()\n # Define the model's name for it's output files\n # Load a pre-trained ResNet-34 model and turn off autograd\n # so its weights won't change.\n architecture = resnet34(pretrained=pretrained)\n if freeze_weights:\n for layer in architecture.parameters():\n layer.requires_grad = False\n # Copy the convolutional layers of the model.\n self.conv1 = architecture.conv1\n self.bn1 = architecture.bn1\n self.relu = architecture.relu\n self.maxpool = architecture.maxpool\n self.layer1 = architecture.layer1\n self.layer2 = architecture.layer2\n self.layer3 = architecture.layer3\n self.layer4 = architecture.layer4\n # Copy the average pooling layer of the model.\n self.avgpool = architecture.avgpool\n # Redefine the classification block of ResNet-34.\n # Use LeakyReLU units instead of ReLU units.\n # Output layer has 2 nodes only for the 2 classes in the PCam dataset.\n in_ftrs = architecture.fc.in_features\n self.fc = nn.Linear(in_features=in_ftrs, out_features=2, bias=True)\n # Define a LogSoftmax layer for converting outputs to probabilities\n # Not needed in `forward()` because included in nn.CrossEntropyLoss\n self.log_softmax = nn.LogSoftmax(dim=1)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNN, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n modules = list(resnet.children())[:-2] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n \n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.pooling = nn.MaxPool2d(2,stride = 2)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def __init__(self, embed_size):\n super(Encoder, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules) \n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def __init__(self, out_size=2, freeze=False, pretrained=True, arch='resnet50'):\n\n super().__init__()\n\n if arch == 'resnet50':\n model = torchvision.models.resnet50(pretrained=pretrained)\n self.model_name = 'resnet50'\n elif arch == 'resnet18':\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n elif arch == 'resnet34':\n model = torchvision.models.resnet34(pretrained=pretrained)\n self.model_name = 'resnet34'\n elif arch == 'resnet101':\n model = torchvision.models.resnet101(pretrained=pretrained)\n self.model_name = 'resnet101'\n elif arch == 'resnet152':\n model = torchvision.models.resnet152(pretrained=pretrained)\n self.model_name = 'resnet152'\n elif arch == 'wide_resnet50_2':\n model = torchvision.models.wide_resnet50_2(pretrained=pretrained)\n self.model_name = 'wide_resnet50_2'\n elif arch == 'wide_resnet101_2':\n model = torchvision.models.wide_resnet101_2(pretrained=pretrained)\n self.model_name = 'wide_resnet101_2'\n else:\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n\n if pretrained and freeze:\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_size)\n\n self.model = model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model" ]
[ "0.65736985", "0.6366523", "0.6366468", "0.6340577", "0.6290355", "0.62448686", "0.6211965", "0.62079406", "0.6202202", "0.61859906", "0.617765", "0.6167639", "0.6150246", "0.6131579", "0.61293316", "0.6126124", "0.60872185", "0.60808474", "0.60243255", "0.6022623", "0.6018663", "0.6012224", "0.60103196", "0.59991026", "0.5980352", "0.59795797", "0.5978402", "0.5970605", "0.5970605", "0.5970605" ]
0.64773536
1
Constructs a Octave ResNet200 model.
def pre_act_oct_resnet200(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 24, 36, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet():\n return models.resnet152(pretrained=True)", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def pre_act_oct_resnet50(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def pre_act_oct_resnet101(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def ResNeXt(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def pre_act_oct_resnet152(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "def pre_act_oct_resnet26(pretrained=False, **kwargs):\n model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "def oct_resnet101(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 23, 3], **kwargs)", "def oct_resnet50(**kwargs):\n return _oct_resnet(Bottleneck, [3, 4, 6, 3], **kwargs)", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model" ]
[ "0.6754732", "0.6618465", "0.65908283", "0.63108593", "0.63079596", "0.62873125", "0.62037766", "0.61919415", "0.61760974", "0.61187875", "0.6101522", "0.60922974", "0.6071278", "0.6071278", "0.6071278", "0.6071278", "0.6071278", "0.60647464", "0.6059056", "0.6056413", "0.60291654", "0.60282534", "0.6025971", "0.6019859", "0.60081387", "0.599906", "0.5998235", "0.5995848", "0.59931815", "0.5992309" ]
0.66531056
1
The standard size of a tile sprite in 2D screen space.
def tile_size_2d(self): return 32.0, 32.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)", "def getSize(self):\n return self.screen.get_size()", "def getSize(self):\n return self.__width * self.__height;", "def tileWidth(self):\n return self._tileWidth", "def pix_size(self):\n return self._pix_size", "def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))", "def tileHeight(self):\n return self._tileHeight", "def size(self) -> (float, float):\n\n return self.screen.get_surface().get_size()", "def _rect_size(self):\n bnd = self._bounds\n return (bnd[1][0] - bnd[0][0], bnd[1][1] - bnd[0][1])", "def get_tilesize(self, sampling):\n xsize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n ysize = {\n 'T6': 600000,\n 'T3': 300000,\n 'T1': 100000\n }[self.get_tiletype(sampling)]\n return xsize, ysize", "def expected_width(self):\n\t\treturn self.expected_tile_width * TILE_SIZE", "def getPixelSize(self):\n return (0.000013, 0.000013)", "def get_pixel_size(self):\n raise NotImplementedError", "def get_size(self):\n return self._surf.get_size()", "def getNumTiles(self):\n return self.w * self.h", "def get_size(self) -> Tuple2IntType:\n return self.get_width(), self.get_height()", "def get_tile_size(self, map_size = None, show_info = None):\n if not map_size: map_size = self.map_size\n w,h = self.img_size\n x_tiles,y_tiles = map_size\n\n tile_raw_w = w / x_tiles\n tile_raw_h = h / y_tiles\n\n if self.debug:\n print(f' ► Raw tile width: {tile_raw_w}\\n ► Raw tile height: {tile_raw_h}')\n\n tile_w = int(round(tile_raw_w))\n tile_h = int(round(tile_raw_h))\n\n if show_info:\n print(f' Image Size: {w} x {h} px\\n Tile Size: {tile_w} x {tile_h} px\\n Map Size: {x_tiles} x {y_tiles} tiles')\n\n error_w = tile_w - tile_raw_w\n error_h = tile_h - tile_raw_h\n print(f'\\n -=ERROR INFO=-\\n Tile Size Width Error: {round(error_w,4)} px \\n Tile Size Height Error: {round(error_h,4)} px \\n Total Width Rounding Error: {round(error_w * x_tiles,4)} px \\n Total Height Rounding Error: {round(error_h * y_tiles,4)} px\\n')\n\n return (tile_raw_w,tile_raw_h)", "def get_map_size(self, map_major_dim=None):\n w, h = self.img_size\n mmd = map_major_dim\n if w >= h:\n x_tiles = mmd\n y_tiles = round(h / w * mmd)\n else:\n x_tiles = round(w / h * mmd)\n y_tiles = mmd\n\n return (x_tiles, y_tiles)", "def getSize(self):\n return (int(self.getWidth()), int(self.getHeight()))", "def expected_height(self):\n\t\treturn self.expected_tile_height * TILE_SIZE", "def getNumTiles(self):\n return (self.width) * (self.height)", "def get_display_px(self):\n return self.image.size", "def _size_pixels(self, renderer):\n return renderer.points_to_pixels(self.size)", "def getSize(self):\n return GDimension(frameWidth, frameHeight)", "def get_combined_size(tiles):\n # TODO: Refactor calculating layout to avoid repetition.\n columns, rows = calc_columns_rows(len(tiles))\n tile_size = tiles[0].image.size\n return (tile_size[0] * columns, tile_size[1] * rows)", "def sprite_source_size(self):\n if self.trimmed:\n return {\n 'x': self.trim_offsets[0],\n 'y': self.trim_offsets[1],\n 'w': self.trim_offsets[2] - self.trim_offsets[0],\n 'h': self.trim_offsets[3] - self.trim_offsets[1],\n }\n else:\n return {\n 'x': 0,\n 'y': 0,\n 'w': self.width,\n 'h': self.height\n }", "def size(self):\n return (self.width)", "def getNumTiles(self):\n return self.height * self.width" ]
[ "0.677379", "0.6732188", "0.6701107", "0.6691744", "0.6675488", "0.66745335", "0.666135", "0.6641239", "0.6588583", "0.6561213", "0.6488441", "0.64793986", "0.6465913", "0.64484566", "0.6422894", "0.6421343", "0.6418597", "0.6404673", "0.639991", "0.6364815", "0.63445616", "0.63170356", "0.63035", "0.62958", "0.62863356", "0.6284568", "0.6260523", "0.6237247", "0.62331027", "0.6224137" ]
0.79170084
0
Using the Command_Handler from command module to handle command.
def usingHandler(self, cmd): self.command_handler.handle_command(cmd) while msg_queue.empty() is False: self.writeresponse(msg_queue.get())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _command(self, *cmd, handler=None):", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')", "def handler(self, command, args=[]):\n ###\n # command parsing and handling logic to be implemented by child\n ###\n if not command and not hasattr(self, 'handle_'):\n return f'Service {str(self.__class__.__name__)}: {self.__doc__ or \"\"}'\n methodname = 'handle_{}'.format(command or '')\n logger.info('method name: {}'.format(methodname))\n logger.info('args: {}'.format(args))\n method = self.__getattribute__(methodname)\n return method(args)", "def handle_command(self, command, channel):\n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(\"HI\")\n\n # Finds and executes the given command, filling in response\n handler = self.dispatch_config.get_handler_by_command(command.split(None, 1)[0])\n if handler is None:\n print(\"unrecognized command detected: \" + command.split(None, 1)[0])\n # Sends the response back to the channel\n self.slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response\n )\n else:\n print(\"using: \" + handler[\"fullpath\"] + \" to handle the request\")\n if handler[\"class\"] in self.handlers:\n self.handlers[handler[\"class\"]].handle_command(command, channel)\n else:\n cls = locate(handler[\"fullpath\"])\n print(cls)\n self.handlers[handler[\"class\"]] = cls(self.slack_client, self.config)\n self.handlers[handler[\"class\"]].handle_command(command, channel)", "def command():\n pass", "def on_command(server, user, command, args):", "def do_command(self, args):\n pass", "def _process_command(self, **kwargs):\n return self.run_command(**kwargs)", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def handle_command(self, command, players, user, channel):\r\n response = self.help()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n elif command[0] == self.NEW_GAME_COMMAND:\r\n return self.new_game(players, channel)\r\n \r\n elif command[0] == self.TARGET_COMMAND:\r\n return self.target(user)\r\n \r\n elif command[0] == self.SURVIVORS_COMMAND:\r\n return self.survivors()\r\n \r\n elif command[0] == self.EXPIRE_COMMAND:\r\n return self.expire(channel)\r\n \r\n elif command[0] == self.REMOVE_COMMAND:\r\n return self.remove(command, channel)\r\n \r\n elif command[0] == self.KILL_COMMAND:\r\n (success, response) = self.kill(user, command)\r\n if success and self.game.get_active_channel() != \"\" and channel != self.game.get_active_channel():\r\n post_to_channel(self.game.get_active_channel(), response)\r\n return \"\"\r\n \r\n elif command[0] == self.LOAD_LAST_GAME_COMMAND:\r\n return self.load_last_game(channel)\r\n \r\n return response", "def processCommand(self, command, args):\n\n commandMap = { \n \"new\" : self.createNewList,\n \"view\" : self.trelloView,\n \"add\" : self.trelloAddCard, \n \"remove\" : self.trelloDeleteCard, \n }\n\n if command not in commandMap: return \">> Command not found\" \n \n return commandMap[command](args)", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "async def _run_command(self, command, *args, **kwargs):\n pass", "def get_command(self,command):\n\t\treturn self.command_handlers[command]", "def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name", "def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)", "def handle(self, *args, **options):\n if not self.server:\n print 'Error : %s' % self.init_error\n return\n\n handler_choice = {\n 'proxy': self.proxy_handle,\n 'server': self.server_handle,\n }\n\n sub_command = options['sub_command']\n handler_choice.get(sub_command)(options)", "def handle_command(channel, command):\n print(\"Channel = \", channel)\n print(\"Command = \", command)\n \n # Default response is help text for the user\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\n\n # Finds and executes the given command, filling in response\n response = None\n # This is where you start to implement more commands!\n if command == \"help\":\n response = \"Sure...write some more code then I can do that!\"\n #help command lists all possible commands\n # if command == \"help\":\n # \tresponse = \"\"\n #report command \n elif command == \"report\":\n response = \"Here I will report on stuff...\"\n else:\n response = \"Try typing help to see valid commands\"\n\n # Sends the response back to the channel\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response or default_response\n )", "async def handle_command(server, process, command):\n process.stdout.write(f'Hello {server.username}\\r\\n')\n if server.listeners:\n forwarding(server, process)\n return\n\n if command is None:\n if config.ENABLE_SHELL:\n await shell(server, process)\n\n else:\n process.stderr.write('This server does not support'\n ' interactive sessions.\\r\\n')\n logging.warning('Interactive shell disabled')\n process.exit(1)\n\n elif command not in supported_commands:\n process.stderr.write('Unsupported command\\n')\n process.exit(1)\n\n else:\n eval(f'{command}(server, process)')\n process.exit(0)", "def handle_command(self, command, channel, user):\r\n response = \"Hello. Type \\\"@hexbot help\\\" for more information\"\r\n command = command.split()\r\n \r\n if len(command) == 0:\r\n return response\r\n \r\n if command[0] == self.HELP_COMMAND:\r\n response = self.help()\r\n elif command[0] == self.DEBUG_COMMAND:\r\n response = self.debug(command, channel);\r\n elif command[0] == self.ASSASSIN_COMMAND:\r\n command.pop(0)\r\n response = self.assassin(command, channel, user);\r\n \r\n return response", "def _addCommand(self, command):\n self.updater.dispatcher.add_handler(command)", "def process_command(self, command):\r\n\r\n tokenized_command = command.split() # Splitting the command and the arguments into separate list elements\r\n\r\n # In order to save a lot of code writing, we are making the command appear the same as the ones from single\r\n # iteration modes. This way, the same method that handles the commands in single iteration mode is now able\r\n # to process commands from the looped run mode as well.\r\n sys_argv_emulation = tokenized_command.copy()\r\n sys_argv_emulation.insert(0, \"filler argument\")\r\n\r\n if tokenized_command[0] == \"add_song\":\r\n add_media(tokenized_command[1], 0)\r\n\r\n elif tokenized_command[0] == \"delete_song\":\r\n remove_media(tokenized_command[1])\r\n\r\n elif tokenized_command[0] == \"list_media\":\r\n self.display_media_cli()\r\n\r\n elif tokenized_command[0] == \"media_folder\":\r\n self.configure_media_folder(sys_argv_emulation)\r\n\r\n elif tokenized_command[0] == \"modify_data\":\r\n self.configure_media(tokenized_command[1])\r\n\r\n elif tokenized_command[0] == \"create_save_list\":\r\n self.generate_savelist_cli(sys_argv_emulation)\r\n\r\n elif tokenized_command[0] == \"search\":\r\n self.search_cli(sys_argv_emulation)\r\n\r\n elif tokenized_command[0] == \"play\":\r\n play_media(tokenized_command[1], 1)\r\n\r\n elif tokenized_command[0] == \"load_gui\":\r\n self.run_mode = 0\r\n load_gui()\r\n\r\n elif tokenized_command[0] == \"help\":\r\n self.display_help_cli()\r\n\r\n elif tokenized_command[0] == \"quit\":\r\n sys.exit()\r\n\r\n else:\r\n print(\"\\nUnrecognized command \\\"\" + tokenized_command[0] + \"\\\".\\n\"\r\n \"Use command \\\"Help\\\" for a list of available commands.\")", "def command(self):\n raise NotImplementedError", "def handle_command(self, command, channel, user, msg_type):\n # Default response is help text for the user\n default_response = \"Does not compute. Try `<@{}> help` for command information.\".format(\n self.id)\n\n response = None\n attachment = None\n\n output(f\"Command: '{command}' - User: {user} - Channel: {channel}\")\n\n if self.db_conn:\n # TODO: create a document generator\n doc = {\n 'date': datetime.datetime.utcnow(),\n 'command': command,\n 'user': user,\n 'channel': channel\n }\n\n result = self.db_conn.insert_document(\n doc,\n db=self.db_conn.CONFIG['db'],\n collection=self.db_conn.CONFIG['collections']['cmds']\n )\n\n # TODO: Fix logging output for DB stuff\n output(\n f\"[{self.db_conn.db}: {self.db_conn.collection}] - Inserted: {result.inserted_id}\")\n\n if msg_type == \"message\":\n response, attachment = self.execute_command(\n command, cmds.COMMANDS.items(), user)\n else:\n response, channel = self.execute_command(\n command, cmds.COMMANDS_HIDDEN.items(), user)\n\n # TODO: Make a better name for out\n out = Response(channel, response or default_response, attachment)\n\n # Log response\n if self.db_conn:\n response_type = \"attachment\" if out.attachment else \"response\"\n update = {'$set': {\n 'response': {\n 'date': datetime.datetime.now(),\n 'type': response_type,\n 'message': out.attachment or out.message or default_response,\n 'channel': out.channel\n }\n }}\n\n result = self.db_conn.update_document_by_oid(\n result.inserted_id,\n update,\n db=self.db_conn.CONFIG['db'],\n collection=self.db_conn.CONFIG['collections']['cmds']\n )\n\n output(\n f\"[{self.db_conn.db}: {self.db_conn.collection}] - Updated: {result.raw_result}\")\n\n return out", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )", "def process_cmd(self, cmd):\n\n resp = self.COMMANDS[cmd.cmd](cmd)\n\n logger.debug(\"Resp: %s\" % resp)\n # send to resp_queue\n # if type == G.CTRL_TYPE:\n #\n # response = json.dumps((corr_id, routing_key, resp))\n # logger.debug(\"Sending response: %s\" % response)\n # self.out_queue.put(response)\n\n response = cmd.make_response(resp)\n logger.debug(\"Sending response: %s\" % response)\n self.out_queue.put(str(response))", "async def _get_command_handler(self, command_type):\n if isinstance(command_type, str):\n module_name = 'command'\n module = import_module(module_name)\n handler = getattr(module, command_type)\n return command_type, handler", "def handle_user_command(self, command):\n\n out, err = pyautogit.commands.handle_custom_command(command)\n self.show_command_result(out, err, command_name=command)\n self.refresh_status()" ]
[ "0.8338623", "0.75426656", "0.7504038", "0.7325603", "0.7277105", "0.726302", "0.71191317", "0.7092065", "0.70484436", "0.6998338", "0.6996647", "0.6957424", "0.6936447", "0.6885723", "0.683087", "0.6806178", "0.676416", "0.6748807", "0.6729248", "0.6727872", "0.67130303", "0.6700611", "0.6692235", "0.66680294", "0.66654843", "0.6658531", "0.662541", "0.66060925", "0.6582996", "0.65784055" ]
0.77345246
1
Target method used by monitor thread, which polls vbmc status every 3s. If vbmc stops, ipmiconsole will stop.
def monitor(instance="default"): global logger_ic while True: try: with open("{}/{}/.{}-bmc.pid".format( config.infrasim_home, instance, instance), "r") as f: pid = f.readline().strip() if not os.path.exists("/proc/{}".format(pid)): logger_ic.warning("Node {} vBMC {} is not running, " "ipmi-console is ready to quit". format(instance, pid)) break time.sleep(3) except IOError: logger_ic.warning("Node {} workspace is possibly destroyed, " "ipmi-console is ready to quit".format(instance)) break stop(instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitor(self, rms):\n pass", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def monitor(self):", "def watch(self, event):\n\t\tprint ('countdown',self.sec_left )\n\t\tif self.sec_left>-1:\n\t\t\tself.sec_left -= 1\n\t\t\tprint(self.sec_left)\n\t\t\tif self.sec_left in [2,1]:\n\t\t\t\t\n\t\t\t\tprint('ticker', self.sec_left)\n\t\t\t\tself.showMsg(str(self.sec_left-1), 1000, clean =False)\n\t\t\telif self.sec_left in [-1]:\n\t\t\t\tself.Clean()", "def _StatusUpdateThreadMain(self):\n while self._status_update_active:\n self._UpdateStatus()\n time.sleep(self._status_update_interval)", "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def updateStatus(self):\n done = False\n if not self.pg.is_alive():\n done = True\n while not self.pg.msgQueue.empty():\n msg = str(self.pg.msgQueue.get(False))\n self.monitorTextBox.append(msg)\n if done:\n self.timer.stop()\n self.pg.join()\n self.runButton.setEnabled(True)\n self.stopButton.setEnabled(False)\n if self.pg.ex:\n etype, evalue, etrace = self.pg.ex\n el = traceback.format_exception(etype, evalue, etrace)\n for line in el:\n self.monitorTextBox.append(line)\n self.setStatusBar.emit(\n \"Surrogate Failed Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n else:\n self.setStatusBar.emit(\n \"Surrogate Finished, Elapsed Time: {0}\".format(\n hhmmss(math.floor(time.time() - self.timeRunning))\n )\n )\n if self.pg.driverFile != \"\":\n try:\n df = os.path.abspath(self.pg.driverFile)\n except:\n pass\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Driver File Location\")\n msgBox.setText(\n \"The surrogate model driver file path is: {0}\".format(\n os.path.abspath(df)\n )\n )\n msgBox.exec_()\n else:\n self.refreshContents()\n self.setStatusBar.emit(\n \"Surrogate Model Generation, Elapsed Time: {0}s\".format(\n math.floor(time.time() - self.timeRunning)\n )\n )", "def run(self):\n while self.running:\n self.__update_battery()\n self.__update_signal()\n time.sleep(5)", "def periodicCall(self):\n self.gui.processIncoming()\n if not self.running:\n import sys\n sys.exit(1)\n self.master.after(UPDATE_DELAY, self.periodicCall)", "def periodicCall(self):\n self.gui.processIncoming(self.cdLen, self.goHold, self.songLength)\n if not self.running:\n # This is the brutal stop of the system.\n # should do some cleanup before actually shutting it down.\n import sys\n sys.exit(1)\n self.master.after(200, self.periodicCall)", "def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def backgroundStart(): #Background checks thread\n global currentStatus\n while True:\n currentStatus = checkClassChanges()\n time.sleep(10)", "def monitor(self):\n import curses\n import inspect\n\n stdscr = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.cbreak()\n width_split = curses.COLS//3-1\n win_done = curses.newwin(curses.LINES-1, width_split, 0, 0)\n win_running = curses.newwin(curses.LINES-1, width_split,\n 0, width_split+1)\n win_pending = curses.newwin(curses.LINES-1, width_split,\n 0, 2*width_split+1)\n stdscr.addstr(curses.LINES-1, 0,\n 'Monitoring started. Press Ctrl+C to stop.')\n stdscr.refresh()\n win_done.addstr(0, 0, 'DONE')\n win_pending.addstr(0, 0, 'PENDING')\n while True:\n try:\n win_done.addstr(1, 0,\n f'{len(self.done)} jobs done')\n list_done = list(self.done)[:curses.LINES-3]\n for idx, fut in enumerate(list_done, start=2):\n fmt_str = f'{id(fut):x} {fut._state}'\n win_done.addstr(idx, 0, fmt_str)\n win_done.refresh()\n\n win_running.clear()\n win_running.addstr(0, 0, 'RUNNING')\n win_running.addstr(1, 0,\n f'{self.running.qsize()} jobs running')\n list_running = list(self.running.items())[:curses.LINES-3]\n for idx, (fut, coro) in enumerate(list_running, start=2):\n coro_state = inspect.getcoroutinestate(coro)\n fmt_str = f'{id(fut):x} {coro_state}'\n win_running.addstr(idx, 0, fmt_str)\n win_running.refresh()\n\n win_pending.clrtoeol()\n win_pending.addstr(1, 0,\n f'{self.pending.qsize()} jobs pending')\n win_pending.refresh()\n time.sleep(.1)\n except KeyboardInterrupt:\n break\n\n curses.nocbreak()\n curses.echo()\n curses.endwin()", "def run(self):\n self.cncLock.acquire()\n self.running = True\n\n # Initialize\n try:\n self.cnc = serial.Serial(self.deviceFile,BAUD_RATE)\n\n self.updaterThread = threading.Thread(target=self.periodic_timer)\n self.updaterThread.start()\n\n # Wake up grbl\n log.info(\"Initializing Grbl...\")\n cmd = \"\\r\\n\\r\\n\"\n self.cnc.write(cmd.encode())\n\n # Wait for grbl to initialize and flush startup text in serial input\n time.sleep(2)\n self.cnc.flushInput()\n self.cncLock.release()\n\n while self.running :\n cmd = self.commandQueue.get().strip() + EOLStr\n if self.running == False:\n break\n self.cncLock.acquire()\n self.cnc.write(cmd.encode())\n\n out = str(self.cnc.readline().strip()) # Wait for grbl response\n if out.find('ok') >= 0 :\n log.debug(f'MSG: {out}') # Debug response\n elif out.find('error') >= 0 :\n log.error(f'ERROR: {out}')\n else:\n log.info(out)\n self.cncLock.release()\n except:\n raise\n finally:\n log.debug(\"CNC main loop left\")\n self.cnc.close()", "def status_watcher(cs, line):\n #print('status watcher watching')\n\n # from the mullvad code, should watch for\n # things like:\n # \"Initialization Sequence Completed\"\n # \"With Errors\"\n # \"Tap-Win32\"\n\n if \"Completed\" in line:\n cs.change_to(cs.CONNECTED)\n return\n\n if \"Initial packet from\" in line:\n cs.change_to(cs.CONNECTING)\n return", "def run(self):\n self.monitor.start()", "def block(self):\n while self.running:\n time.sleep( 1 )", "def pause_while_moving(self,c, ADDR):\r\n\r\n while True:\r\n status = yield self.status(c,ADDR)\r\n if status.startswith(\"STATUS : STOP\"):\r\n break\r\n returnValue('Success!')", "def run():\n logger.verbose(\"bwmon: Thread started\")\n while True:\n lock.wait()\n logger.verbose(\"bwmon: Event received. Running.\")\n database.db_lock.acquire()\n nmdbcopy = copy.deepcopy(database.db)\n database.db_lock.release()\n try:\n if getDefaults(nmdbcopy) and len(bwlimit.tc(\"class show dev %s\" % dev_default)) > 0:\n # class show to check if net:InitNodeLimit:bwlimit.init has run.\n sync(nmdbcopy)\n else: logger.log(\"bwmon: BW limits DISABLED.\")\n except: logger.log_exc(\"bwmon failed\")\n lock.clear()", "def control_c(self) -> None:\n time.sleep(0.1) # sometimes it's better to wait a bit\n send_control_c(self.proc, True)", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def poll(self):\n\tself.met = self.button.poll()", "def check_main_stop(notifier):\n pass", "def periodicCall(self):\r\n if self.queue.qsize() != 0:\r\n self.action = self.queue.get()\r\n print(self.action)\r\n \r\n if not self.running:\r\n # This is the brutal stop of the system. You may want to do\r\n # some cleanup before actually shutting it down.\r\n import sys\r\n sys.exit(1)\r\n self.master.after(100, self.periodicCall)", "def wm_update(self):\n readback = self.get_pvobj(\"readback\")\n show_pos = self._update_cb(0)\n show_pos()\n with CallbackContext(readback, show_pos):\n try:\n while True:\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass", "def _watch(self):\n # self._popen.wait()\n lines_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in lines_iterator:\n line = line.strip()\n # log.log(\"raw\",self.name.upper()+\" SAYS: \"+line)\n # cmd = line.split(' ')[0]\n # args = line.split(' ')[1:]\n if line[0] == '#':\n self.onEvent(line.split(' '))\n if self.onClose:\n self.onEvent([self.onClose])\n self._running.clear()\n if self.stderr is not None:\n self.stderr.close()", "def start_monitoring(self):\n pass" ]
[ "0.6409184", "0.627425", "0.62064976", "0.61520576", "0.61488384", "0.608878", "0.6028219", "0.60113215", "0.58789355", "0.586533", "0.5813013", "0.5793242", "0.57749766", "0.5774655", "0.5764802", "0.57504106", "0.57363623", "0.5703783", "0.57020646", "0.56902844", "0.5681848", "0.56774795", "0.56361425", "0.56279284", "0.56196636", "0.5615766", "0.5613882", "0.56128395", "0.55970484", "0.5590245" ]
0.65879107
0
Stop ipmiconsole of target instance specified by its name
def stop(instance="default"): global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) try: file_ipmi_console_pid = "{}/{}/.ipmi_console.pid".\ format(config.infrasim_home, instance) with open(file_ipmi_console_pid, "r") as f: pid = f.readline().strip() os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) os.remove(file_ipmi_console_pid) except IOError: # When pid file is missing, by e.g., node destroy, # find process id by instance name if instance == "default": process_name = "ipmi-console start$" else: process_name = "ipmi-console start {}".format(instance) ps_cmd = r"ps ax | grep '{}' | grep Sl | awk '{{print $1}}' | head -n1".format(process_name) logger_ic.warning("Fail to find ipmi console pid file, check by:") logger_ic.warning("> {}".format(ps_cmd)) _, pid = run_command(cmd=ps_cmd) logger_ic.warning("ipmi console pid got: {}".format(pid)) if not pid: logger_ic.warning("ipmi console for instance {} is not running".format(instance)) return os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) except Exception: logger_ic.warning(traceback.format_exc()) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n self.scion_sh('stop')", "def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)", "def stop_notebook_instance(NotebookInstanceName=None):\n pass", "def stop(self):\n if self.debug:\n print(\"%s stop\" % self.name)\n self.force_exit()", "def stop_instance(InstanceId=None, Force=None):\n pass", "def stop(self):\r\n self.inst.write(':STOP')", "def stop_run(arn=None):\n pass", "def stop(self):\n print(\"Stopping accessory.\")", "def stop_console(self):\n return", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def stop(self):\n\n self.active = False", "def _stop(self):", "def stop(self):\n self.active = False", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def stop_procedure(self):\n pass", "def stop():", "def stop():", "def stop():", "def stop():", "def stop (self):\n pass", "def stop (self):\n pass", "def stop() -> None:", "def stop(self) -> None:\n ...", "def stop(self):\n self._run = False\n self.IA.stop()", "def stop(self) -> str:\n return self.rpc_call(\"stop\")", "def stop():\n\n crate = get_crate()\n # Tell the thread to stop\n crate.mch_comms.stop = True\n # Stop the ipmitool shell process\n try:\n if crate.mch_comms.ipmitool_shell:\n crate.mch_comms.ipmitool_shell.terminate()\n crate.mch_comms.ipmitool_shell.kill()\n except:\n pass", "def stop(self):\n pass" ]
[ "0.69108975", "0.6825068", "0.6671944", "0.6663248", "0.66268575", "0.65905327", "0.65741503", "0.6553872", "0.6435648", "0.6377335", "0.6354143", "0.63085943", "0.6253596", "0.6247487", "0.62375504", "0.62224996", "0.62192184", "0.61786765", "0.6176067", "0.6176067", "0.6176067", "0.6176067", "0.6175612", "0.6175612", "0.61472714", "0.6140968", "0.6139031", "0.6110157", "0.61087245", "0.6090885" ]
0.6886424
1
Creates a handle to the Ceph Cluster.
def connect(ceph_config_file, timeout = CEPH_TIMEOUT): handle = rados.Rados(conffile = ceph_config_file) LOGGER.info("librados version: " + str(handle.version())) LOGGER.info("Attempting to connect to: " + str(handle.conf_get('mon initial members'))) handle.connect() #timeout shoudl be specified LOGGER.info("Cluster ID" + handle.get_fsid()) return handle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))", "def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()", "def _setup_test_cluster(self, return_cluster, name, create_args):\n stack_name = '{0}_stack'.format(name)\n templ, self.stack = self._setup_test_stack(stack_name, TEMPLATE)\n cluster_instance = cbd.CloudBigData('%s_name' % name,\n templ.resource_definitions(\n self.stack)['cbd_cluster'],\n self.stack)\n self._stubout_create(return_cluster)\n return cluster_instance", "def add(cls, config: Dict) -> None:\n id_ = config[\"id\"]\n client_file = f\"/etc/ceph/ceph.{id_}.keyring\"\n\n # Create client\n cmd = [\"ceph\", \"auth\", \"get-or-create\", f\"{id_}\"]\n [cmd.append(f\"{k} '{v}'\") for k, v in config.get(\"caps\", {}).items()]\n cnt_key, err = cls.shell(args=cmd)\n\n def put_file(client, file_name, content, file_mode, sudo=True):\n file_ = client.remote_file(sudo=sudo, file_name=file_name, file_mode=file_mode)\n file_.write(content)\n file_.flush()\n file_.close()\n\n nodes_ = config.get(\"nodes\", config.get(\"node\"))\n default_version = str(cls.cluster.rhcs_version.version[0])\n use_cdn = cls.cluster.use_cdn\n if nodes_:\n if not isinstance(nodes_, list):\n nodes_ = [{nodes_: {}}]\n\n def setup(host):\n name = list(host.keys()).pop()\n _build = list(host.values()).pop()\n _node = get_node_by_id(cls.cluster, name)\n if _build.get(\"release\"):\n rhcs_version = _build[\"release\"]\n if not isinstance(rhcs_version, str):\n rhcs_version = str(rhcs_version)\n elif use_cdn:\n rhcs_version = default_version\n else:\n rhcs_version = \"default\"\n\n rhel_version = _node.distro_info[\"VERSION_ID\"][0]\n log.debug(\n f\"RHCS version : {rhcs_version} on host {_node.hostname}\\n\"\n f\"with RHEL major version as : {rhel_version}\"\n )\n enable_cmd = \"subscription-manager repos --enable=\"\n disable_all = [\n r\"subscription-manager repos --disable=*\",\n r\"yum-config-manager --disable \\*\",\n ]\n cmd = 'subscription-manager repos --list-enabled | grep -i \"Repo ID\"'\n cdn_ceph_repo = {\n \"7\": {\"4\": [\"rhel-7-server-rhceph-4-tools-rpms\"]},\n \"8\": {\n \"4\": [\"rhceph-4-tools-for-rhel-8-x86_64-rpms\"],\n \"5\": [\"rhceph-5-tools-for-rhel-8-x86_64-rpms\"],\n },\n \"9\": {\n \"5\": [\"rhceph-5-tools-for-rhel-9-x86_64-rpms\"],\n \"6\": [\"rhceph-6-tools-for-rhel-9-x86_64-rpms\"],\n },\n }\n\n rhel_repos = {\n \"7\": [\"rhel-7-server-rpms\", \"rhel-7-server-extras-rpms\"],\n \"8\": [\n \"rhel-8-for-x86_64-baseos-rpms\",\n \"rhel-8-for-x86_64-appstream-rpms\",\n ],\n \"9\": [\n \"rhel-9-for-x86_64-appstream-rpms\",\n \"rhel-9-for-x86_64-baseos-rpms\",\n ],\n }\n\n # Collecting already enabled repos\n out, _ = _node.exec_command(sudo=True, cmd=cmd, check_ec=False)\n enabled_repos = list()\n if out:\n out = out.strip().split(\"\\n\")\n for entry in out:\n repo = entry.split(\":\")[-1].strip()\n enabled_repos.append(repo)\n log.debug(f\"Enabled repos on the system are : {enabled_repos}\")\n\n if rhcs_version != \"default\":\n # Disabling all the repos and enabling the ones we need to install the ceph client\n for cmd in disable_all:\n _node.exec_command(sudo=True, cmd=cmd, timeout=1200)\n\n # Enabling the required CDN repos\n for repos in rhel_repos[rhel_version]:\n _node.exec_command(sudo=True, cmd=f\"{enable_cmd}{repos}\")\n\n for repos in cdn_ceph_repo[rhel_version][rhcs_version]:\n _node.exec_command(sudo=True, cmd=f\"{enable_cmd}{repos}\")\n\n # Clearing the release preference set and cleaning all yum repos\n # Observing selinux package dependency issues for ceph-base\n wa_cmds = [\"subscription-manager release --unset\", \"yum clean all\"]\n for wa_cmd in wa_cmds:\n _node.exec_command(sudo=True, cmd=wa_cmd)\n\n # Copy the keyring to client\n _node.exec_command(sudo=True, cmd=\"mkdir -p /etc/ceph\")\n put_file(_node, client_file, cnt_key, \"w\")\n\n if config.get(\"copy_ceph_conf\", True):\n # Get minimal ceph.conf\n ceph_conf, err = cls.shell(\n args=[\"ceph\", \"config\", \"generate-minimal-conf\"]\n )\n # Copy the ceph.conf to client\n put_file(_node, \"/etc/ceph/ceph.conf\", ceph_conf, \"w\")\n\n # Copy admin keyring to client node\n if config.get(\"copy_admin_keyring\"):\n admin_keyring, _ = cls.shell(\n args=[\"ceph\", \"auth\", \"get\", \"client.admin\"]\n )\n put_file(\n _node, \"/etc/ceph/ceph.client.admin.keyring\", admin_keyring, \"w\"\n )\n\n # Install ceph-common\n if config.get(\"install_packages\"):\n for pkg in config.get(\"install_packages\"):\n _node.exec_command(\n cmd=f\"yum install -y --nogpgcheck {pkg}\", sudo=True\n )\n if config.get(\"git_clone\", False):\n log.info(\"perform cloning operation\")\n role = config.get(\"git_node_role\", \"client\")\n ceph_object = cls.cluster.get_ceph_object(role)\n node_value = ceph_object.node\n utils.perform_env_setup(config, node_value, cls.cluster)\n\n out, _ = _node.exec_command(cmd=\"ls -ltrh /etc/ceph/\", sudo=True)\n log.info(out)\n\n # Hold local copy of the client key-ring in the installer node\n if config.get(\"store-keyring\"):\n put_file(cls.installer, client_file, cnt_key, \"w\")\n\n with parallel() as p:\n for node in nodes_:\n if not isinstance(node, dict):\n node = {node: {}}\n p.spawn(\n setup,\n node,\n )\n time.sleep(20)", "async def open(cls, loop, *, aliases=None, configfile=None, **config):\n cluster = cls(loop, aliases=aliases, **config)\n if configfile:\n cluster.config_from_file(configfile)\n await cluster.establish_hosts()\n return cluster", "def __create(self):\n pass\n\n # create at cluster-provider\n # get kubeconfig\n # wait for api\n # ^ could be async and seperate steps?", "def _create_cluster(self, server_instance):\n return Cluster([server_instance])", "def run(ceph_cluster, **kw):\n config = kw[\"config\"]\n\n build = config.get(\"build\", config.get(\"rhbuild\"))\n ceph_cluster.rhcs_version = build\n\n # Manage Ceph using ceph-admin orchestration\n command = config.pop(\"command\")\n log.info(\"Executing client %s\" % command)\n orch = Orch(cluster=ceph_cluster, **config)\n method = MAP_[command]\n method(orch, config)\n return 0", "def create_database():\n # This should make a connection to a Cassandra instance your local machine \n # (127.0.0.1)\n\n from cassandra.cluster import Cluster\n cluster = Cluster()\n\n # To establish connection and begin executing queries, need a session\n session = cluster.connect()\n \n #Create a Keyspace \n try:\n session.execute(\"\"\"\n CREATE KEYSPACE IF NOT EXISTS cassandra_project \n WITH REPLICATION = \n { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }\"\"\"\n )\n\n except Exception as e:\n print(e)\n \n # Set the Keyspace\n try:\n session.set_keyspace(\"cassandra_project\")\n except Exception as e:\n print(e)\n \n return session, cluster", "def cluster_connected(hacluster):\n with charms_openstack.charm.provide_charm_instance() as placement_charm:\n placement_charm.configure_ha_resources(hacluster)\n placement_charm.assess_status()", "def initialize_cluster(cluster):\n logger.info('Creating a new cluster for %s...', cluster)\n\n configuration = ClusterConfiguration(version=__version__)\n ztransaction = cluster.zookeeper.transaction()\n ztransaction.create(cluster.path, BinaryCodec(ClusterConfiguration).encode(configuration))\n ztransaction.create(cluster.get_set_path())\n commit(ztransaction)", "def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))", "def createServer():\n cd('/')\n srv = cmo.createServer(managedServername) \n srv.setCluster(getMBean('/Clusters/%s' % cluster_name))\n srv.setListenPort(managedServerPort)\n return srv", "def __cassandra_connect(self):\n self.cluster = Cluster()\n self.session = self.cluster.connect('demo')", "def create_cluster(module, switch_list):\n global CHANGED_FLAG\n output = ''\n new_cluster = False\n\n node1 = switch_list[0]\n node2 = switch_list[1]\n\n name = node1 + '-' + node2 + '-cluster'\n\n cli = pn_cli(module)\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli)\n\n if cluster_list is not None:\n cluster_list = cluster_list.split()\n if name not in cluster_list:\n new_cluster = True\n\n if new_cluster or cluster_list is None:\n cli = pn_cli(module)\n cli += ' switch %s cluster-create name %s ' % (node1, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created cluster %s\\n' % (node1, name)\n\n return output", "def _connect(self):\n cluster = Cluster('http://{}:{}'.format(self.host, self.port))\n authenticator = PasswordAuthenticator('Administrator', self.password)\n cluster.authenticate(authenticator)\n self.client = cluster.open_bucket(self.bucket)", "def create_cluster(self, cluster: dict) -> None:\n if self.master_url:\n return\n try:\n self._cluster_client.create_cluster(\n request={\n 'project_id': self.cluster_metadata.project_id,\n 'region': self.cluster_metadata.region,\n 'cluster': cluster\n })\n _LOGGER.info(\n 'Cluster created successfully: %s',\n self.cluster_metadata.cluster_name)\n self.master_url = self.get_master_url(self.cluster_metadata)\n except Exception as e:\n if e.code == 409:\n _LOGGER.info(\n 'Cluster %s already exists. Continuing...',\n ie.current_env().clusters.default_cluster_name)\n elif e.code == 403:\n _LOGGER.error(\n 'Due to insufficient project permissions, '\n 'unable to create cluster: %s',\n self.cluster_metadata.cluster_name)\n raise ValueError(\n 'You cannot create a cluster in project: {}'.format(\n self.cluster_metadata.project_id))\n elif e.code == 501:\n _LOGGER.error(\n 'Invalid region provided: %s', self.cluster_metadata.region)\n raise ValueError(\n 'Region {} does not exist!'.format(self.cluster_metadata.region))\n else:\n _LOGGER.error(\n 'Unable to create cluster: %s', self.cluster_metadata.cluster_name)\n raise e", "def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")", "def create_cluster(self, name, cluster_type, params, ssh_key, *args, **kwargs):\n raise NotImplementedError", "def create_flink_cluster(self) -> None:\n cluster = {\n 'project_id': self.cluster_metadata.project_id,\n 'cluster_name': self.cluster_metadata.cluster_name,\n 'config': {\n 'software_config': {\n 'optional_components': ['DOCKER', 'FLINK']\n },\n 'gce_cluster_config': {\n 'metadata': {\n 'flink-start-yarn-session': 'true'\n },\n 'service_account_scopes': [\n 'https://www.googleapis.com/auth/cloud-platform'\n ]\n },\n 'endpoint_config': {\n 'enable_http_port_access': True\n }\n }\n }\n self.create_cluster(cluster)", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_obj = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_obj)\n checksum = \"crc32c\"\n\n def restart_osd_service():\n osd_services = rados_obj.list_orch_services(service_type=\"osd\")\n for osd_service in osd_services:\n cephadm.shell(args=[f\"ceph orch restart {osd_service}\"])\n time.sleep(30)\n\n def create_pool_write_iops(param, pool_type):\n try:\n pool_name = f\"{pool_type}_pool_{param}\"\n assert (\n rados_obj.create_pool(pool_name=pool_name)\n if \"repli\" in pool_type\n else rados_obj.create_erasure_pool(\n name=pool_name, **{\"pool_name\": pool_name}\n )\n )\n if param == checksum:\n # set checksum value for the pool\n rados_obj.set_pool_property(\n pool=pool_name, props=\"csum_type\", value=param\n )\n # verify checksum value for the pool\n assert (\n param\n == rados_obj.get_pool_property(pool=pool_name, props=\"csum_type\")[\n \"csum_type\"\n ]\n )\n # rados bench will perform IOPs and also verify the num of objs written\n assert rados_obj.bench_write(\n pool_name=pool_name, **{\"max_objs\": 500, \"verify_stats\": False}\n )\n except Exception:\n raise\n finally:\n assert rados_obj.detete_pool(pool=pool_name)\n\n def modify_cache_size(factor):\n cache_value = int(1073741824 * factor)\n cache_cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_cache_size_hdd\",\n \"value\": cache_value,\n }\n assert mon_obj.set_config(**cache_cfg)\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_hdd\")\n log.info(\n f\"bluestore_cache_size_hdd modified value - {out} | Expected {cache_value}\"\n )\n assert int(out.strip(\"\\n\")) == cache_value\n\n cache_value = int(3221225472 * factor)\n cache_cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_cache_size_ssd\",\n \"value\": cache_value,\n }\n assert mon_obj.set_config(**cache_cfg)\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_ssd\")\n log.info(\n f\"bluestore_cache_size_ssd modified value - {out} | Expected {cache_value}\"\n )\n assert int(out.strip(\"\\n\")) == cache_value\n\n if config.get(\"checksums\"):\n doc = (\n \"\\n #CEPH-83571646\"\n \"\\n\\t Apply all the applicable different checksum algorithms on pools backed by bluestore\"\n \"\\n\\t\\t Valid algos: none, crc32c, crc32c_16, crc32c_8, xxhash32, xxhash64\"\n \"\\n\\t 1. Create individual replicated pools for each checksum\"\n \"\\n\\t 2. Verify the default checksum algorithm is crc32c\"\n \"\\n\\t 3. Set different checksum algorithm as global and for each pool\"\n \"\\n\\t 4. Verify the checksum algo being set correctly\"\n \"\\n\\t 5. Write data to each pool using rados bench\"\n \"\\n\\t 6. cleanup - Remove all the pools created\"\n )\n log.info(doc)\n log.info(\"Running test case to verify BlueStore checksum algorithms\")\n checksum_list = config.get(\"checksums\")\n\n try:\n # verify default checksum value\n out, _ = cephadm.shell([\"ceph config get osd bluestore_csum_type\"])\n log.info(f\"BlueStore OSD default checksum: {out} | Expected: crc32c\")\n assert \"crc32c\" in out\n\n for checksum in checksum_list:\n # create pools with given config when OSD csum_type is default crc32c\n create_pool_write_iops(\n param=checksum, pool_type=\"replicated\"\n ) if \"crc\" in checksum else create_pool_write_iops(\n param=checksum, pool_type=\"ec\"\n )\n\n for checksum in checksum_list:\n # set the global checksum value\n cfg = {\n \"section\": \"osd\",\n \"name\": \"bluestore_csum_type\",\n \"value\": checksum,\n }\n assert mon_obj.set_config(**cfg)\n\n # verify the newly set global checksum value\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_csum_type\")\n assert checksum in out\n log.info(f\"global checksum set verified - {out}\")\n\n # create pools with given config when OSD csum_type is varied\n create_pool_write_iops(\n param=checksum, pool_type=\"replicated\"\n ) if \"crc\" in checksum else create_pool_write_iops(\n param=checksum, pool_type=\"ec\"\n )\n\n except Exception as E:\n log.error(f\"Verification failed with exception: {E.__doc__}\")\n log.error(E)\n log.exception(E)\n return 1\n finally:\n # reset global checksum config\n assert mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_csum_type\"}\n )\n\n # restart osd services\n restart_osd_service()\n wait_for_clean_pg_sets(rados_obj, timeout=300, _sleep=10)\n\n log.info(\"BlueStore Checksum algorithm verification completed.\")\n return 0\n\n if config.get(\"bluestore_cache\"):\n doc = (\n \"\\n #CEPH-83571675\"\n \"\\n\\t Verify BlueStore cache default values.\"\n \"\\n\\t Tune cache parameters and perform IOPS\"\n \"\\n\\t 1. Verify the default value for - bluestore_cache_size(0)\"\n \" | bluestore_cache_size_hdd (1GB) | bluestore_cache_size_ssd (3GB)\"\n \"\\n\\t 2. Modify the value of bluestore_cache_size_ssd and bluestore_cache_size_hdd\"\n \"\\n\\t 3. Verify the values being reflected in ceph config\"\n \"\\n\\t 4. Create replicated and ec pool and perform IOPS\"\n \"\\n\\t 5. cleanup - Remove all the pools created and reset configs modified\"\n )\n log.info(doc)\n log.info(\"Running test case to verify BlueStore Cache size tuning\")\n\n try:\n # verify default value for bluestore cache\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size\")\n log.info(f\"bluestore_cache_size default value - {out} | Expected 0\")\n assert int(out.strip(\"\\n\")) == 0\n\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_hdd\")\n log.info(\n f\"bluestore_cache_size_hdd default value - {out} | Expected 1073741824\"\n )\n assert int(out.strip(\"\\n\")) == 1073741824\n\n out = mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_size_ssd\")\n log.info(\n f\"bluestore_cache_size_ssd default value - {out} | Expected 3221225472\"\n )\n assert int(out.strip(\"\\n\")) == 3221225472\n\n # modify ssd and hdd cache (increase)\n modify_cache_size(factor=1.5)\n\n # restart osd services\n restart_osd_service()\n\n # perform iops\n create_pool_write_iops(param=\"cache_inc\", pool_type=\"replicated\")\n create_pool_write_iops(param=\"cache_inc\", pool_type=\"ec\")\n\n # modify ssd and hdd cache (decrease)\n modify_cache_size(factor=0.7)\n\n # restart osd services\n restart_osd_service()\n\n # perform iops\n create_pool_write_iops(param=\"cache_dec\", pool_type=\"replicated\")\n create_pool_write_iops(param=\"cache_dec\", pool_type=\"ec\")\n\n except Exception as E:\n log.error(f\"Verification failed with exception: {E.__doc__}\")\n log.error(E)\n log.exception(E)\n return 1\n finally:\n # reset modified cache configs\n mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_cache_size_hdd\"}\n )\n mon_obj.remove_config(\n **{\"section\": \"osd\", \"name\": \"bluestore_cache_size_ssd\"}\n )\n\n # restart osd services\n restart_osd_service()\n wait_for_clean_pg_sets(rados_obj, timeout=300, _sleep=10)\n\n log.info(\"BlueStore cache size tuning verification completed.\")\n return 0", "def connect(connstr, # type: str\n *options, # type: ClusterOptions\n **kwargs, # type: Dict[str, Any]\n ) -> Cluster:\n cluster = Cluster(connstr, *options, **kwargs)\n return cluster", "def do_create(self):\n cluster_id = self.entity.cluster_id\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Check cluster size constraint if target cluster is specified\n cluster = cm.Cluster.load(self.context, cluster_id)\n desired = no.Node.count_by_cluster(self.context, cluster_id)\n result = su.check_size_params(cluster, desired, None, None, True)\n if result:\n # cannot place node into the cluster\n no.Node.update(self.context, self.entity.id,\n {'cluster_id': '', 'status': consts.NS_ERROR})\n return self.RES_ERROR, result\n\n res, reason = self.entity.do_create(self.context)\n\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Update cluster's desired_capacity and re-evaluate its status no\n # matter the creation is a success or not because the node object\n # is already treated as member of the cluster and the node\n # creation may have changed the cluster's status\n cluster.eval_status(self.context, consts.NODE_CREATE,\n desired_capacity=desired)\n if res:\n return self.RES_OK, 'Node created successfully.'\n else:\n return self.RES_ERROR, reason", "def run(ceph_cluster, **kw):\n try:\n log.info(f\"MetaData Information {log.metadata} in {__name__}\")\n fs_util = FsUtils(ceph_cluster)\n\n config = kw.get(\"config\")\n build = config.get(\"build\", config.get(\"rhbuild\"))\n clients = ceph_cluster.get_ceph_objects(\"client\")\n clients[0].upload_file(\n \"tests/cephfs/clients/file_lock_utitlity.py\",\n \"/home/cephuser/file_lock_utility.py\",\n sudo=True,\n )\n clients[1].upload_file(\n \"tests/cephfs/clients/file_lock_utitlity.py\",\n \"/home/cephuser/file_lock_utility.py\",\n sudo=True,\n )\n version, rc = clients[0].exec_command(\n sudo=True, cmd=\"ceph version --format json\"\n )\n fs_util.prepare_clients([clients[0]], build)\n fs_util.auth_list([clients[0], clients[1]])\n if not build.startswith((\"3\", \"4\", \"5\")):\n if not fs_util.validate_fs_info(clients[0], \"cephfs\"):\n log.error(\"FS info Validation failed\")\n return 1\n mounting_dir = \"\".join(\n random.choice(string.ascii_lowercase + string.digits)\n for _ in list(range(10))\n )\n fuse_mounting_dir = f\"/mnt/cephfs_fuse{mounting_dir}/\"\n fs_util.fuse_mount([clients[0], clients[1]], fuse_mounting_dir)\n\n kernel_mounting_dir = f\"/mnt/cephfs_kernel{mounting_dir}/\"\n mon_node_ips = fs_util.get_mon_node_ips()\n fs_util.kernel_mount(\n [clients[0], clients[1]], kernel_mounting_dir, \",\".join(mon_node_ips)\n )\n rc = unlink_file(\n clients[0],\n clients[1],\n \"fuse_mount.txt\",\n fuse_mounting_dir,\n validate_from=[kernel_mounting_dir],\n )\n\n if rc:\n raise CommandFailed(\"Unlink of the file is failing when file is locked\")\n rc = unlink_file(\n clients[0],\n clients[1],\n \"kernel_mount.txt\",\n kernel_mounting_dir,\n validate_from=[fuse_mounting_dir],\n )\n if rc:\n raise CommandFailed(\"Unlink of the file is failing when file is locked\")\n\n return 0\n\n except Exception as e:\n log.error(e)\n log.error(traceback.format_exc())\n return 1\n finally:\n log.info(\"---clean up---------\")\n fs_util.client_clean_up(\n \"umount\", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir\n )\n fs_util.client_clean_up(\n \"umount\",\n kernel_clients=[clients[0]],\n mounting_dir=kernel_mounting_dir,\n )", "def cluster_manager(self):\n # Lazily instantiate the cluster manager the first time it is asked for.\n if not hasattr(self, '_cluster_manager'):\n if self._cluster_engine:\n self._cluster_manager = self._cluster_engine.create_manager(\n self._username,\n self._tenancy\n )\n else:\n self._cluster_manager = None\n # If there is still no cluster manager, clusters are not supported\n if not self._cluster_manager:\n raise errors.UnsupportedOperationError(\n 'Clusters are not supported for this tenancy.'\n )\n return self._cluster_manager", "def test_create_cluster_network(self):\n pass", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def getClusterSetup(self):\n data = {}\n data[\"parameters\"] = self.config.getACSParams()\n \n fqdn = {}\n fqdn[\"master\"] = self.getManagementEndpoint()\n fqdn[\"agent\"] = self.getAgentEndpoint()\n data[\"domains\"] = fqdn\n \n data[\"sshTunnel\"] = \"ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N \" + self.config.get('ACS', 'username') + \"@\" + self.getManagementEndpoint() + \" -p 2200\"\n\n azure = {}\n azure['resourceGroup'] = self.config.get('Group', 'name')\n data[\"azure\"] = azure\n\n return data" ]
[ "0.6605776", "0.6418442", "0.6327758", "0.61983645", "0.61876357", "0.61836255", "0.6062055", "0.6059325", "0.60109323", "0.5957398", "0.5952814", "0.58882135", "0.58059293", "0.57884413", "0.5772146", "0.5749063", "0.57396287", "0.5687946", "0.5676263", "0.56761366", "0.56517947", "0.56498843", "0.56191844", "0.5596464", "0.5593158", "0.5583261", "0.5582657", "0.557475", "0.5559109", "0.55313635" ]
0.6422216
1
Gather ceph monitor information
def get_monitor_info(handle, timeout): mon_info = dict() mon_info['stat'] = ceph_mon_command(handle, 'mon stat' , timeout) mon_info['dump'] = ceph_mon_command(handle, 'mon dump' , timeout) mon_info['map'] = ceph_mon_command(handle, 'mon getmap' , timeout) mon_info['metadata'] = ceph_mon_command(handle, 'mon metadata', timeout) return mon_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_monitor_data(self):\n json = await self._api_call(\"app/monitors/%s/overview\" % self.sense_monitor_id)\n if \"monitor_overview\" in json and \"monitor\" in json[\"monitor_overview\"]:\n self._monitor = json[\"monitor_overview\"][\"monitor\"]\n return self._monitor", "def monitor(self):", "def get_host_stats(self, refresh=False):", "def get_monitor_details():\n monitor_id = paranoid_clean(request.args.get('id'))\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n monitor = monitors.find_one({'hashed': monitor_id}, {'_id': 0})\n if not monitor:\n return jsonify({'success': False, 'error': 'Monitor was not found.'})\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n link = monitor['metadata']['rss_link']\n articles = list(articles.find({'feed_source': link}, {'_id': 0}))\n for idx, item in enumerate(articles):\n articles[idx]['title'] = html.unescape(item['title'])\n articles[idx]['date'] = item['collected'][:10]\n articles.sort(key=lambda x: x['collected'], reverse=True)\n return jsonify({'success': True, 'monitor': monitor, 'articles': articles})", "def monitor(self, **kwargs):\n self.show_info(monitor=True, **kwargs)", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "def monitor(frist_invoke=2):\n sdiskio = psutil.disk_io_counters()\n # sleep some time\n\n value_dic = {\n 'iostats': {\n 'io.disks_read': sdiskio.read_bytes/(1024*1024),\n 'io.disks_write': sdiskio.write_bytes/(1024*1024),\n 'io.disks_read_count': sdiskio.read_count/(1024 * 1024),\n 'io.disks_write_count': sdiskio.write_count/(1024 * 1024),\n 'io.disks_read_time': sdiskio.read_time/1000,\n 'io.disks_write_time': sdiskio.write_time/1000,\n 'io.disks_busy_time': sdiskio.write_time/1000,\n }\n }\n\n return value_dic", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def get_health_info(handle, timeout):\n health = dict()\n\n health['stat'] = ceph_mon_command(handle, 'health' , timeout)\n # TODO command not known with ceph_mon_command\n #health['detail'] = ceph_mon_command(handle, 'health detail', timeout)\n health['detail'] = shell_command('ceph health detail') + b'\\n'\n health['df'] = ceph_mon_command(handle, 'df' , timeout)\n health['report'] = ceph_mon_command(handle, 'report' , timeout)\n\n return health", "def monitor(self):\n logging.debug(\"monitor entered\")\n # monitor machines...\n # first, get a list of machine IDs\n res = progress_table(self.machines)\n return res", "def monitor(self):\n\t\tresponse = self._request(\"/demovibes/ajax/monitor/{}/\".format(self.next_event))\n\t\tif not response:\n\t\t\treturn None\n\t\t\n\t\tdata = response.read()\n\t\treturn self.parse_monitor(data)", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def poll_host(self, server, obj, name):\n\n self.log.debug('found host: %s' % (name,))\n\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize',\n 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz',\n ], from_node=obj, obj_type='HostSystem')\n\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n\n stats = {\n 'status': status,\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'cpu_count': cpu_count,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n }\n\n return stats", "def monitor(self, rms):\n pass", "def watch():\n\n try:\n headers = ('CONTAINER ID', 'NAME', 'CPU %', 'MEM USAGE / LIMIT',\n 'MEM %', 'NET I/O', 'BLOCK I/O', 'PIDS')\n column_width = 20\n for element in headers:\n print(element.ljust(column_width)),\n print('')\n\n for container in CLIENT.containers.list():\n column_width = 20\n stats = container.stats(stream=False)\n\n # Block I/O stats\n blkio = stats.get('blkio_stats').get('io_service_bytes_recursive')\n # in case blkio is empty --> IndexError: list index out of range\n if not blkio:\n blkio_read = '0'\n blkio_write = '0'\n else:\n blkio_read = size(blkio[0].get('value'), system=si)\n blkio_write = size(blkio[1].get('value'), system=si)\n\n # Network stats\n rx_stats = size(stats.get('networks').get('eth0').get('rx_bytes'), system=si)\n tx_stats = size(stats.get('networks').get('eth0').get('tx_bytes'), system=si)\n\n # Memory stats\n mem = stats.get('memory_stats')\n mem_usage = mem.get('stats').get('active_anon')\n mem_limit = mem.get('limit')\n mem_percent = (\"%.2f\"%((mem_usage / mem_limit)*100))\n\n # CPU stats\n # this is taken directly from docker CLIENT:\n # https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/CLIENT/stats.go#L309\n cpu_percent = 0.0\n cpu = stats.get('cpu_stats')\n pre_cpu = stats.get('precpu_stats')\n cpu_total = cpu.get('cpu_usage').get('total_usage')\n pre_cpu_total = pre_cpu.get('cpu_usage').get('total_usage')\n cpu_count = cpu.get('online_cpus')\n\n cpu_delta = cpu_total - pre_cpu_total\n system_delta = cpu.get('system_cpu_usage') - pre_cpu.get('system_cpu_usage')\n\n if system_delta > 0.0 and cpu_delta > 0.0:\n cpu_percent = (\"%.2f\"%(cpu_delta / system_delta * 100.0 * cpu_count))\n\n # container attributes\n attrs = [(str(container.short_id), str(container.name), str(cpu_percent),\n str(size((mem_usage), system=si) + \" / \" + size((mem_limit), system=si)),\n str(mem_percent), str(rx_stats + \" / \" + tx_stats),\n str(blkio_read + \" / \" + blkio_write),\n str(stats.get('pids_stats').get('current')))]\n\n for row in attrs:\n for element in row:\n print(element.ljust(column_width)),\n print('')\n\n except (docker.errors.NotFound, KeyError, AttributeError):\n print('No such container or container not running!')", "def getCurrentMetrics(self):\n self.notifyPut('Obtaining Current Display Metrics')\n try:\n data = []\n data = win32api.EnumDisplayMonitors(None, None)\n screens = {}\n scrNum = 0\n for screen in data:\n screens[scrNum] = screen[2]\n scrNum += 1\n return screens \n except Exception, e:\n self.logQ.put('{0} - Unable to capture current metrics'.format(e))", "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def get_monitor_string(self):\n\n return self.reporter.get_overview_string(self.info)", "def get_monitor_info_a(h_monitor):\n return __get_monitor_info(WINDLL.user32.GetMonitorInfoA, h_monitor)", "def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info", "def monitoring_group(ctx):\n pass", "def get_monitor_info_w(h_monitor):\n return __get_monitor_info(WINDLL.user32.GetMonitorInfoW, h_monitor)", "def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "async def start_monitor(self):\n self._logger.info(\"Starting monitor...\")\n org1_admin = self.fabric_client.get_user(org_name='org1.example.com', name='Admin')\n\n self._logger.info(\"Starting monitor...\")\n cmd = \"/home/martijn/go/bin/go run \" \\\n \"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/fabric-cli.go event listenblock \" \\\n \"--cid mychannel --peer localhost:8001 \" \\\n \"--config /home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/config.yaml\"\n out_file = open(\"transactions.txt\", \"w\")\n my_env = os.environ.copy()\n my_env[\"GOPATH\"] = \"/home/martijn/gocode\"\n self.monitor_process = subprocess.Popen(cmd.split(\" \"), env=my_env, stdout=out_file,\n cwd=\"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/\")\n\n async def get_latest_block_num():\n self._logger.info(\"Getting latest block nr...\")\n response = await self.fabric_client.query_info(\n requestor=org1_admin,\n channel_name='mychannel',\n peers=['peer0.org1.example.com'],\n decode=True\n )\n print(response)\n\n latest_block = response.height\n if latest_block > self.latest_block_num:\n self._logger.info(\"Updating to block nr %d\", latest_block)\n old_latest_block_num = self.latest_block_num\n self.latest_block_num = latest_block\n confirm_time = int(round(time.time() * 1000))\n for confirmed_block_num in range(old_latest_block_num + 1, latest_block + 1):\n self.block_confirm_times[confirmed_block_num] = confirm_time\n\n self.monitor_lc = run_task(get_latest_block_num, interval=0.1)", "def start_monitor(self, collector):\n pass", "def getMonitor(self) -> ghidra.util.task.TaskMonitor:\n ...", "def getMonitors(self):\n return [self.monitor]", "def parse_monitor(self):\n return DEFAULT_MONITOR" ]
[ "0.66455966", "0.6420232", "0.64127403", "0.6299886", "0.6260533", "0.6227388", "0.6212769", "0.6193962", "0.6172191", "0.6142712", "0.60478276", "0.6043625", "0.60359365", "0.5971578", "0.5969627", "0.5957598", "0.5948469", "0.5928224", "0.5919851", "0.5902216", "0.58823013", "0.5860545", "0.5846942", "0.58260006", "0.5782762", "0.57789516", "0.5762519", "0.5750942", "0.57418644", "0.5724609" ]
0.7485646
0
GAther ceph device information
def get_device_info(handle, timeout): device_info = dict() device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout) return device_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_info(self) -> Dict[str, Any]:\n via_device = 'meter_adapter'\n if self.toon.gas.is_smart:\n via_device = 'electricity'\n\n return {\n 'name': 'Gas Meter',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'gas'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, via_device),\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, self._actuator.id),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, self._actuator.id))},\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }", "def device_info(self):\n return {\n \"name\": self._alias,\n \"model\": self._model,\n \"manufacturer\": \"TP-Link\",\n \"connections\": {(dr.CONNECTION_NETWORK_MAC, self._mac)},\n \"sw_version\": self._sysinfo[\"sw_ver\"],\n }", "def device():\n return G.DEVICE", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Electricity Meter',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'electricity'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, 'meter_adapter'),\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._uuid)},\n \"name\": self._device.device_data[self._uuid]['name'],\n \"manufacturer\": \"Nest Labs\",\n \"model\": self._device.device_data[self._uuid]['model'],\n }", "def device_info(self):\n if self._mac:\n mac = {(CONNECTION_NETWORK_MAC, self._mac)}\n else:\n mac = {}\n\n device_info = {\n ATTR_IDENTIFIERS: {(DOMAIN, self._item_id)},\n ATTR_NAME: self._name,\n ATTR_CONNECTIONS: mac,\n ATTR_MANUFACTURER: \"Google\",\n ATTR_MODEL: DEV_CLIENT_MODEL,\n \"via_device\": (DOMAIN, self._system_id),\n }\n\n return device_info", "def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info", "def get_device_information(self):\n return self.mycam.devicemgmt.GetDeviceInformation()", "def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, 0),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, 0))},\n \"manufacturer\": MANUFACTURER,\n \"model\": self._data.wiserhub.system.product_type,\n \"sw_version\": self._data.wiserhub.system.firmware_version,\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }", "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {(DOMAIN, str(self.coordinator.gios.station_id))},\n \"name\": DEFAULT_NAME,\n \"manufacturer\": MANUFACTURER,\n \"entry_type\": \"service\",\n }", "def print_device_info(device):\n assert(isinstance(device, Device))\n print(\" Device Name : %s\" % device.name)\n print(\" OS Type : %s\" % device.os_type)\n print(\" IP Address : %s\" % device.ip_addr)\n print(\" Interfaces : %s\" % \", \".join(device.iflist))", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.tesla_device.id())},\n \"name\": self.tesla_device.car_name(),\n \"manufacturer\": \"Tesla\",\n \"model\": self.tesla_device.car_type,\n \"sw_version\": self.tesla_device.car_version,\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.coordinator.data[\"deviceID\"])},\n \"name\": self.coordinator.data[\"deviceName\"],\n \"manufacturer\": self.coordinator.data[\"deviceManufacturer\"],\n \"model\": self.coordinator.data[\"deviceModel\"],\n \"sw_version\": self.coordinator.data[\"appVersionName\"],\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": \"Somfy\",\n \"name\": self.name,\n \"model\": self.tahoma_device.widget,\n \"sw_version\": self.tahoma_device.type,\n }", "def get_device_info(ns, device, human_friendly):\n if device.NumberOfBlocks and device.BlockSize:\n size = size2str(device.NumberOfBlocks * device.BlockSize, human_friendly)\n else:\n size = 'N/A'\n\n fslabel = fs.get_device_format_label(ns, device)\n return (device.DeviceID,\n device.Name,\n device.ElementName,\n size,\n fslabel)", "def device_info(self) -> Dict[str, Any]:\n agreement = self.toon.agreement\n model = agreement.display_hardware_version.rpartition('/')[0]\n sw_version = agreement.display_software_version.rpartition('/')[-1]\n return {\n 'identifiers': {\n (DOMAIN, agreement.id),\n },\n 'name': 'Toon Display',\n 'manufacturer': 'Eneco',\n 'model': model,\n 'sw_version': sw_version,\n }", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self.config_entry.entry_id)},\n \"name\": NAME,\n \"model\": VERSION,\n \"manufacturer\": NAME,\n }", "def device_info(self) -> dict:\n return {\n \"connections\": {(DOMAIN, self._unique_id)},\n \"name\": self._host,\n \"manufacturer\": \"IMAP E-Mail\",\n \"sw_version\": VERSION,\n }", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.device_id)},\n \"name\": self.name,\n \"manufacturer\": self.manufacturer,\n \"model\": self._device.device_model,\n \"sw_version\": \"\",\n \"via_device\": (DOMAIN, self._controller_ip),\n }", "def device_info(self) -> dict[str, any]:\n device_information = {\n \"identifiers\": {(DOMAIN, self._dev_id)},\n \"name\": self._device_name,\n \"manufacturer\": self._manufacturer,\n \"model\": self._model,\n \"sw_version\": self._fw_version,\n }\n\n if self._dev_id != self._api.gateway_id:\n device_information[\"via_device\"] = (DOMAIN, self._api.gateway_id)\n else:\n device_information[\"name\"] = f\"Smile {self._api.smile_name}\"\n\n return device_information", "def device_info(self):\n return {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._mac)},\n \"default_name\": self._device_name,\n \"default_model\": self._device[\"device_model\"],\n \"via_device\": (DOMAIN, self._router.unique_id),\n }", "def device_info(self) -> Optional[Dict[str, Any]]:\n return {ATTR_NAME: self.name, \"identifiers\": {(DOMAIN, self._device.device_id)}}", "def device_information(self):\n return self._device_information", "def getDeviceHostDetails(self,device):\n dev_host_det = self.host.get_host_device_ver(device)\n build = dev_host_det['build']\n # redhat version\n os_ver = dev_host_det['version']\n kernel = dev_host_det['kernel']\n \n self.setBuild()\n #self.foundCardbuild = self.setBuild()\n\n str = \"Running '%s' tests on device '%s',build '%s' \\n host kernel '%s'\"%(self.testcaseStr,device,build,kernel) + \\\n \" os version '%s', machine '%s' \"%(os_ver,self.host.name)\n\n return str", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, self._device_id),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, self._device_id))},\n \"manufacturer\": MANUFACTURER,\n \"model\": self._data.wiserhub.devices.get_by_id(self._device_id).model,\n \"sw_version\": self._device.firmware_version,\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }" ]
[ "0.6951333", "0.6848414", "0.6711916", "0.6700383", "0.667151", "0.66525185", "0.6542051", "0.6531767", "0.6522371", "0.6514975", "0.64998937", "0.64957225", "0.6470811", "0.6451061", "0.6438751", "0.6416084", "0.64145595", "0.6406177", "0.63985395", "0.63964", "0.63928473", "0.63734853", "0.63632107", "0.63554406", "0.63509446", "0.6329412", "0.6324491", "0.6317867", "0.63084006", "0.6302769" ]
0.6898668
1
Gather ceph manager information
def get_manager_info(handle, timeout): mgr_info = dict() mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout) mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout) mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout) return mgr_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = cluster['version']\n version = str(version.decode('utf-8')).split(' ')[2].split(\".\")[0]\n\n if int(version) >= 13:\n cluster['versions'] = shell_command('ceph versions') + b'\\n'\n\n\n fsid = handle.get_fsid() + '\\n'\n cluster['fsid'] = str.encode(fsid)\n\n with open(ceph_config, 'r') as f:\n ceph_conf = f.read()\n\n cephconf = str(ceph_conf)\n cluster['ceph_conf'] = str.encode(cephconf)\n\n return cluster", "def get_monitor_info(handle, timeout):\n mon_info = dict()\n mon_info['stat'] = ceph_mon_command(handle, 'mon stat' , timeout)\n mon_info['dump'] = ceph_mon_command(handle, 'mon dump' , timeout)\n mon_info['map'] = ceph_mon_command(handle, 'mon getmap' , timeout)\n mon_info['metadata'] = ceph_mon_command(handle, 'mon metadata', timeout)\n return mon_info", "def manage_info():", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")", "def get_device_info(handle, timeout):\n device_info = dict()\n device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout)\n\n return device_info", "def get_ceph_srv_info():\n services = []\n for name, pid in get_ceph_pids():\n process = psutil.Process(pid)\n services.append(CEPHSrvInfo(name, pid, process.get_cpu_percent(),\\\n process.memory_info().rss))\n return services", "def manager():\n pass", "def info(client):\n\n return client.get_info()", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def bdev_nvme_get_transport_statistics(client):\n return client.call('bdev_nvme_get_transport_statistics')", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def get_health_info(handle, timeout):\n health = dict()\n\n health['stat'] = ceph_mon_command(handle, 'health' , timeout)\n # TODO command not known with ceph_mon_command\n #health['detail'] = ceph_mon_command(handle, 'health detail', timeout)\n health['detail'] = shell_command('ceph health detail') + b'\\n'\n health['df'] = ceph_mon_command(handle, 'df' , timeout)\n health['report'] = ceph_mon_command(handle, 'report' , timeout)\n\n return health", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "def fp_meta(self):\n for server in self.machines:\n s = self.machines[server]\n print \"%s: %s (%s)\" % (s.id, s.adminPass, s)", "def getInfo():", "def all_details(self):\n self.which_owner()\n self.which_security()\n self.zabbix_details()\n\n for service, value in self.service_discovery.items():\n self.details[\"services\"][service] = self.which_service(service, **value)", "def summary(self):\n\n result = dict()\n\n result[\"control_manager\"] = self._control_manager.summary()\n result[\"data_logger\"] = self._db_manager.summary()\n result[\"alarm_manager\"] = self._alarm_manager.summary()\n result[\"machine_manager\"] = self._machine_manager.summary()\n result[\"function_manager\"] = self._function_manager.summary()\n\n return result", "def rpc_info():", "def get_snmp_information(self):\n\n snmp_output = self._send_command('/snmp print')\n snmp_community_output = self._send_command(\n '/snmp community print terse')\n\n snmp = parse_output(snmp_output)\n community_list = parse_terse_output(snmp_community_output)\n\n community = {}\n\n for item in community_list:\n community.setdefault(item.get('name'), {\n 'acl': item.get('addresses'),\n 'mode': u'rw' if item.get('write-access') == 'yes' else u'ro'\n })\n\n return {\n 'contact': snmp.get('contact'),\n 'location': snmp.get('location'),\n 'community': community,\n 'chassis_id': ''\n }", "def retrieve(self):\n\t\timport shelve\n\t\timport sys\n\t\timport glob\n\n\t\td = shelve.open(\".storedmanager\")\n\t\tif not d.has_key(\"storedmanager\"):\n\t\t\t# Check if already is done the file\n\t\t\tif len(glob.glob(\"*.tar.gz\")) != 0:\n\t\t\t\tmessage = \"clustermanager.retrive: The job is already DONE!\"\n\t\t\telse:\n\t\t\t\tmessage = \"\\nclustermanager.retrieve: ERROR Not found the\" \\\n\t\t\t\t\t+\" class stored in .storedmanager file\"\n\t\t\tsys.exit(message)\n\n\t\tcopyself = d[\"storedmanager\"]\n\t\t\n\t\t# Putting the datamembers: FIXME: If you want all the datanames\n\t\t# do it with the __dict__ and __setattr__ methods\n\t\tself.nameID = copyself.nameID\n\t\tself.jobsid = copyself.jobsid\n\t\tself.jobidevt = copyself.jobidevt\n\t\tself.tasksID = copyself.tasksID\n\t\tself.outputfiles = copyself.outputfiles\n\t\tself.njobs = copyself.njobs\n\t\tself.basedir = copyself.basedir\n\t\tself.pkgpath = copyself.pkgpath\n\t\tself.libsdir = copyself.libsdir\n\t\tself.nevents = copyself.nevents\n\t\ttry:\n\t\t\tself.taskstatus = copyself.taskstatus\n\t\texcept AttributeError:\n\t\t\t# It means we have not yet done a previous harvest\n\t\t\tpass\n\t\t\n\t\td.close()", "def manager_status(self, msg):\n result = {\n 'success': 0,\n 'msg': 'Service Manager Status',\n 'result': {\n 'status': 'running',\n 'uname': platform.uname(),\n 'frontend_endpoint': self.frontend_endpoint,\n 'backend_endpoint': self.backend_endpoint,\n 'sink_endpoint': self.sink_endpoint,\n 'mgmt_endpoint': self.mgmt_endpoint,\n 'result_publisher_port': self.result_pub_port,\n }\n }\n\n return result", "def bdev_nvme_get_discovery_info(client):\n return client.call('bdev_nvme_get_discovery_info')", "def manager_config(self, manager):\n _, body = self.request('/v1.1/managers/configs/%s' % manager, 'GET')\n return body", "def get_manager_stats(self):\n try:\n names, quantities, types, passwords = zip(*[(manager.name,\n manager.transports_in_fleet, manager.fleet_type, manager.password)\n for manager in self.manager_agents.values()])\n except ValueError:\n names, quantities, types, passwords = [], [], [], []\n\n df = pd.DataFrame.from_dict(\n {\"password\": passwords, \"name\": names, \"transports_in_fleet\": quantities, \"fleet_type\": types})\n return df", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def check_manager_status(brief=True):\n ret = {\n \"manager\": {\n \"manager_id\": None,\n \"queues\": [],\n \"queue_len\": [],\n \"status\": \"stopped\",\n },\n \"workers\": [],\n \"fabrics\": [],\n \"total_queue_len\": 0,\n }\n seq = get_random_sequence()\n msg = eptMsg(MSG_TYPE.GET_MANAGER_STATUS, seq=seq, data={\"brief\": brief})\n #logger.debug(\"get manager status (seq:0x%x) brief:%r\", seq, brief)\n redis = get_redis()\n p = redis.pubsub(ignore_subscribe_messages=True)\n p.subscribe(MANAGER_CTRL_RESPONSE_CHANNEL)\n redis.publish(MANAGER_CTRL_CHANNEL, msg.jsonify())\n start_ts = time.time()\n timeout = AppStatus.MANAGER_STATUS_TIMEOUT \n try:\n if brief:\n timeout = AppStatus.MANAGER_STATUS_BRIEF_TIMEOUT\n while start_ts + timeout > time.time():\n data = p.get_message(timeout=1)\n if data is not None:\n channel = data[\"channel\"]\n if channel == MANAGER_CTRL_RESPONSE_CHANNEL:\n msg = eptMsg.parse(data[\"data\"]) \n if msg.msg_type == MSG_TYPE.MANAGER_STATUS:\n #logger.debug(\"received manager status (seq:0x%x)\", msg.seq)\n ret[\"manager\"] = msg.data[\"manager\"]\n ret[\"workers\"] = msg.data[\"workers\"]\n ret[\"fabrics\"] = msg.data[\"fabrics\"]\n ret[\"total_queue_len\"] = msg.data[\"total_queue_len\"]\n return ret\n except Exception as e:\n logger.debug(\"Traceback:\\n%s\", traceback.format_exc())\n logger.debug(\"error: %s\", e)\n finally:\n if redis is not None and hasattr(redis, \"connection_pool\"):\n redis.connection_pool.disconnect()\n\n logger.warn(\"no manager response within timeout(%s sec)\", timeout)\n return ret", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")" ]
[ "0.65334594", "0.6489982", "0.6204253", "0.60092634", "0.6008082", "0.5893398", "0.5754568", "0.5699375", "0.56716347", "0.5611492", "0.5582611", "0.5579014", "0.5577621", "0.5576179", "0.55696046", "0.5542833", "0.55382943", "0.55256027", "0.5521256", "0.55209965", "0.5510726", "0.5487898", "0.54478604", "0.54429203", "0.54427075", "0.5438311", "0.5398613", "0.53938276", "0.538157", "0.5380126" ]
0.7890347
0
Ensure that graveyard_removal.py correctly removes the graveyard from an h5m file.
def test_default_graveyard_removal(): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file) size = os.path.getsize(test_file[:-4] + "_no_grave.h5m") assert size == 5748780
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")", "def remove_group(self):\n try:\n with open_hdf5(self.file_name, mode=\"a\") as hdf_file:\n del hdf_file[self.h5_path]\n except KeyError:\n pass", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def test_print_graveyard_removal(capfd):\n os.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file + \" -p\")\n out, err = capfd.readouterr()\n assert (\"12682136550675318127\" in out) == True", "def __del__(self):\n self.h5file.close()", "def test_locate_graveyard():\n groups_to_write, graveyard_sets = locate_graveyard(mb)\n assert groups_to_write == [12682136550675318125, 12682136550675318126,\n 12682136550675318128, 12682136550675318129]", "def __delitem__(self, key):\n if self.file_exists:\n try:\n with open_hdf5(self.file_name, mode=\"a\") as store:\n del store[self._get_h5_path(key)]\n except (AttributeError, KeyError):\n pass", "def tearDown(self):\n\n self.h5file.close()\n self.h5file = None\n Path(self.h5fname).unlink() # comment this for debug only\n super().tearDown()", "def test_d_remove_database(self):\n\n if os.path.isfile(location):\n os.remove(location)\n\n assert(True)", "def remove_file_from_cache(self, md5_hash):\n self.used_space -= len(self.storage[md5_hash])\n self.storage.pop(md5_hash)\n self.remove_from_usage_queue(md5_hash)", "def test_exc(self):\n g = h5g.open(self.fid, '/')\n g._close()\n self.assertEqual(h5i.get_type(g), h5i.BADID)", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def remove():", "def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def clean_database(databasePathname):\n print '# loading database ' + databasePathname\n try:\n db = gdbm.open(databasePathname, 'w')\n except:\n print \"# \" + databasePathname + \" could not be loaded\"\n sys.exit(-1)\n\n # even though gdbm supports memory efficient iteration over\n # all keys, I want to order my traversal across similar\n # paths to leverage caching of directory files:\n allKeys=db.keys()\n print '# finished loaded keys from ' + databasePathname\n allKeys.sort()\n print '# finished sorting keys from ' + databasePathname\n print '# deleting dead nodes'\n count=0\n for currKey in allKeys:\n try:\n os.stat(currKey)\n sys.stdout.write('.')\n except OSError:\n del db[currKey]\n sys.stdout.write('*')\n count=count+1\n sys.stdout.flush()\n print \"\\n# reorganizing \" + databasePathname\n db.reorganize()\n db.sync()\n db.close()\n print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'", "def purge_outdated(self):\n todelete = []\n sql = \"select rowid, path, mtime from pictures\"\n cur = self.con.execute(sql)\n for rowid, path_str, mtime in cur:\n if mtime and op.exists(path_str):\n picture_mtime = os.stat(path_str).st_mtime\n if int(picture_mtime) <= mtime:\n # not outdated\n continue\n todelete.append(rowid)\n if todelete:\n sql = \"delete from pictures where rowid in (%s)\" % ','.join(map(str, todelete))\n self.con.execute(sql)", "def verify_no_snapshot_reingestion(c: Composition) -> None:\n c.run(\"testdrive\", \"wait-for-snapshot.td\", \"postgres-disable-select-permission.td\")\n\n restart_mz(c)\n\n c.run(\n \"testdrive\",\n \"delete-rows-t1.td\",\n \"delete-rows-t2.td\",\n \"alter-table.td\",\n \"alter-mz.td\",\n )", "async def process_fixup(self, reference: Optional[str] = None) -> None:\n full_snapshots = [\n x for x in self.sys_snapshots.list_snapshots if x.sys_type == SNAPSHOT_FULL\n ]\n\n if len(full_snapshots) < MINIMUM_FULL_SNAPSHOTS:\n return\n\n _LOGGER.info(\"Starting removal of old full snapshots\")\n for snapshot in sorted(full_snapshots, key=lambda x: x.date)[:-1]:\n self.sys_snapshots.remove(snapshot)", "def test_removal_mount_dependency(self):\n from chroma_core.models import ManagedMgs\n\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"mounted\")\n try:\n # Make it so that the mount unconfigure operations will fail\n MockAgentRpc.succeed = False\n\n # -> the TargetMount removal parts of this operation will fail, we\n # want to make sure that this means that Target deletion part\n # fails as well\n self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\", check=False)\n\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertNotEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")\n finally:\n MockAgentRpc.succeed = True\n\n # Now let the op go through successfully\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\")\n with self.assertRaises(ManagedMgs.DoesNotExist):\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")", "def test_remove_file_group0(self):\n with copy_of_directory(assets.path_to('SBB0000F29300010000/data')) as tempdir:\n mets = OcrdMets(filename=join(tempdir, 'mets.xml'))\n self.assertEqual(len(mets.file_groups), 17)\n self.assertEqual(len(mets.find_all_files()), 35)\n # print()\n # before = sorted([x.ID for x in mets.find_all_files()])\n with self.assertRaisesRegex(Exception, \"not empty\"):\n mets.remove_file_group('OCR-D-GT-ALTO')\n mets.remove_file_group('OCR-D-GT-PAGE', recursive=True)\n # print([x for x in before if x not in sorted([x.ID for x in mets.find_all_files()])])\n self.assertEqual(len(mets.file_groups), 16)\n self.assertEqual(len(mets.find_all_files()), 33)", "def remove():\n\n db_remove()", "def removeStaleLVM():\n log.debug(\"waiting 5s for activation of stale lvm on new md array %s\", self.path)\n time.sleep(5)\n udev.settle()\n\n try:\n pv_info = lvm.pvinfo(device=self.path)[self.path]\n except (errors.LVMError, KeyError) as e:\n return\n\n vg_uuid = None\n try:\n vg_uuid = udev.device_get_vg_uuid(pv_info)\n except KeyError:\n return\n\n if vg_uuid:\n log.info(\"removing stale LVM metadata found on %s\", self.name)\n try:\n lvm.vgremove(None, vg_uuid=vg_uuid)\n except errors.LVMError as e:\n log.error(\"Failed to remove stale volume group from newly-created md array %s: %s\",\n self.path, str(e))\n raise", "def test_removedFile(self):\n self.write(\"service1.json\", [{\"host\": \"host1\", \"port\": 123},\n {\"host\": \"host2\", \"port\": 124}])\n self.pump()\n self.remove(\"service1.json\")\n self.pump()\n self.assertNodesEqual(knownNodes(self.disco, \"service1\", \"staging\"), [])", "def _clean_up(self):", "def removedb():\n\n try:\n os.remove(rebasedb)\n except OSError:\n pass", "def check_hdf5_files(database):\n\n logger.info(\" Checking dataset Integrity\")\n remove_file = []\n for fname in database:\n try:\n f = h5py.File(fname, 'r')\n mol_names = list(f.keys())\n if len(mol_names) == 0:\n warnings.warn(' -> %s is empty ' % fname)\n remove_file.append(fname)\n f.close()\n except BaseException:\n warnings.warn(' -> %s is corrputed ' % fname)\n remove_file.append(fname)\n\n for name in remove_file:\n database.remove(name)\n if remove_file:\n logger.info(f'\\t -> Empty or corrput databases are removed:\\n'\n f'{remove_file}')\n\n return database", "def tearDownClass(cls):\n path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)),\n 'examples', 'arkane', 'species')\n cls.dump_path = os.path.join(path, 'C2H6')\n cls.load_path = os.path.join(path, 'C2H6_from_yaml')\n cls.extensions_to_delete = ['pdf', 'txt', 'inp', 'csv']\n cls.files_to_delete = ['arkane.log', 'output.py']\n cls.files_to_keep = ['C2H6.yml']\n for path in [cls.dump_path, cls.load_path]:\n for name in os.listdir(path):\n item_path = os.path.join(path, name)\n if os.path.isfile(item_path):\n extension = name.split('.')[-1]\n if name in cls.files_to_delete or \\\n (extension in cls.extensions_to_delete and name not in cls.files_to_keep):\n os.remove(item_path)\n else:\n # This is a sub-directory. remove.\n shutil.rmtree(item_path)", "def database_maintenance():\r\n\r\n logging.debug('database_maintenance()')\r\n\r\n # Check datgabase\r\n all_imagepaths = get_all_images_from_database()\r\n for imagepath in all_imagepaths:\r\n if not os.path.isfile(imagepath):\r\n delete_image_from_database(imagepath)\r\n logging.debug('database_maintenance() - image not in folder, deleted')\r\n\r\n # Check temporary folder\r\n all_imagepaths = get_all_images_from_filesystem()\r\n for imagepath in all_imagepaths:\r\n if not exists_image_in_database(imagepath):\r\n delete_image_from_database(imagepath)\r\n logging.debug('database_maintenance() - image not in database, deleted')", "def test_remove_orphaned_metadata(self):\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini'),\n '[orphaned.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini'),\n '[testdriver.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini'), 'expected: FAIL\\n')\n with self._patch_builtins():\n manifests = load_and_update_manifests(self.finder)\n self.command.remove_orphaned_metadata(manifests)\n self.assertFalse(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini')))", "def deleteShards():\n os.popen('rm *_shard')" ]
[ "0.64098233", "0.6114747", "0.58413917", "0.582247", "0.57496554", "0.56727177", "0.5573327", "0.5490028", "0.54383737", "0.53650856", "0.53048605", "0.5302251", "0.5285933", "0.5256595", "0.52538306", "0.5244757", "0.5230618", "0.5228572", "0.5218584", "0.5204441", "0.51699096", "0.5156562", "0.51382935", "0.5113371", "0.5105278", "0.5100508", "0.5095089", "0.5079535", "0.5077172", "0.506946" ]
0.7458633
0
Ensure that graveyard_removal.py prints the correct entity handle for the graveyard volume.
def test_print_graveyard_removal(capfd): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file + " -p") out, err = capfd.readouterr() assert ("12682136550675318127" in out) == True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def test_delete_snapshot(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snapshot = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'snap10'}\n self.driver.delete_snapshot(snapshot)\n expected = {'name': 'snap10'}\n self.assertDictMatch(expected, self.deleted)", "def drop(self):\n Game.instance.area_map.entities.append(self.owner)\n Game.instance.inventory.remove(self.owner)\n self.owner.x = Game.instance.player.x\n self.owner.y = Game.instance.player.y\n message('You dropped a ' + self.owner.name + '.', palette.yellow)", "def test_default_graveyard_removal():\n\tos.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file)\n\tsize = os.path.getsize(test_file[:-4] + \"_no_grave.h5m\")\n\tassert size == 5748780", "def test_delete_volume_failure_modes(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self._fail_space_delete = True\n # This should not throw an exception, space-delete failure not problem\n self.driver.delete_volume(volume)\n self._fail_space_delete = False\n volume['provider_id'] = None\n # This should also not throw an exception\n self.driver.delete_volume(volume)", "def _delete_disk(self, volume):\n\n # We only do this when we know it's not exported\n # anywhere in the gateway\n lun_name = self._lun_name(volume.name)\n config = self._get_config()\n\n # Now look for the disk on any exported target\n found = False\n for target_iqn in config['targets']:\n # Do we have the volume we are looking for?\n target = config['targets'][target_iqn]\n for client_iqn in target['clients'].keys():\n if lun_name in target['clients'][client_iqn]['luns']:\n found = True\n\n if not found:\n # we can delete the disk definition\n LOG.info(\"Deleting volume definition in iscsi gateway for %s\",\n lun_name)\n self.client.delete_disk(self.configuration.rbd_pool, volume.name,\n preserve_image=True)", "def dropObject(player):\n for treasure in Treasure.List:\n if player.treasureCaptured:\n player.treasureCaptured = False\n treasure.x = player.x\n treasure.y = player.y\n treasure.img = pygame.image.load(Treasure.treasure_img[0])", "def event11510870():\n header(11510870, 1)\n npc, = define_args('i')\n skip_if_this_event_slot_off(2)\n chr.drop_mandatory_treasure(npc)\n end()\n\n if_entity_dead(0, npc)\n end()", "def find_graveyard_inner_box():\n volumes = get_volume_list()\n graveyard = 0\n for v in volumes:\n if volume_is_graveyard( v ): \n graveyard = v\n break\n if graveyard == 0:\n raise DagmcError( 'Could not find a graveyard volume' )\n\n xyz_lo, xyz_hi = volume_boundary( graveyard )\n xyz_mid = numpy.array( [ (hi+lo)/2.0 for (hi,lo) in zip( xyz_hi, xyz_lo) ], dtype=numpy.float64 )\n\n result_lo = numpy.array( [0]*3, dtype=numpy.float64 )\n result_hi = numpy.array( [0]*3, dtype=numpy.float64 )\n\n for i in range(0,3):\n uvw = [0,0,0]\n uvw[i] = 1\n lo_mid = xyz_mid.copy()\n lo_mid[i] = xyz_lo[i]\n _, dist = fire_one_ray( graveyard, lo_mid, uvw )\n result_lo[i] = lo_mid[i] + dist\n uvw[i] = -1\n hi_mid = xyz_mid.copy()\n hi_mid[i] = xyz_hi[i]\n _, dist = fire_one_ray( graveyard, hi_mid, uvw )\n result_hi[i] = hi_mid[i] - dist\n \n return result_lo, result_hi", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def test_create_volume_from_snapshot(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snap = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n volume = {'id': '2', 'name': 'volume2', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_volume_from_snapshot(volume, snap)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume2', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume2'}\n self.assertDictMatch(expected_pid, pid)", "def test_bad_uuid_blockdev(self):\n command_line = [\"blockdev\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_hotplug_storage(self):\n target_vols = [\n {\n \"type\": \"FCP\",\n \"volume_id\": \"1024400000000000\",\n \"boot_device\": True,\n \"specs\": {\n \"multipath\": True,\n \"adapters\": [{\n \"devno\": \"0.0.1800\",\n \"wwpns\": ['300607630503c1ae', '300607630503c1af']\n }, {\n \"devno\": \"0.0.1801\",\n \"wwpns\": ['300607630503c1ae', '300607630503c1af']\n }]\n }\n }\n ]\n # set response from storage pool object\n pool_resp = {target_vols[0]['volume_id']: '/dev/mapper/mpatha'}\n self._mock_pool.return_value.activate.return_value = pool_resp\n\n guest_obj = self._check_init()\n guest_obj.login()\n # validate response\n self.assertEqual(guest_obj.hotplug(vols=target_vols),\n {'vols': pool_resp})", "def test_removal_mount_dependency(self):\n from chroma_core.models import ManagedMgs\n\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"mounted\")\n try:\n # Make it so that the mount unconfigure operations will fail\n MockAgentRpc.succeed = False\n\n # -> the TargetMount removal parts of this operation will fail, we\n # want to make sure that this means that Target deletion part\n # fails as well\n self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\", check=False)\n\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertNotEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")\n finally:\n MockAgentRpc.succeed = True\n\n # Now let the op go through successfully\n self.mgt.managedtarget_ptr = self.set_and_assert_state(self.mgt.managedtarget_ptr, \"removed\")\n with self.assertRaises(ManagedMgs.DoesNotExist):\n ManagedMgs.objects.get(pk=self.mgt.pk)\n self.assertEqual(ManagedMgs._base_manager.get(pk=self.mgt.pk).state, \"removed\")", "def dispense_full_plate(self, ref, reagent, volume):\n columns = []\n for col in range(0,ref.container_type.col_count):\n columns.append({\"column\": col, \"volume\": volume})\n self.instructions.append(Dispense(ref, reagent, columns))", "def database_volume_snapshot_delete(volume_snapshot_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n query.filter(model.VolumeSnapshot.uuid == volume_snapshot_uuid).delete()\n session.commit()", "def test_aws_service_api_volume_delete(self):\n pass", "def test_bad_uuid_filesystem(self):\n command_line = [\"filesystem\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def drop(self, command):\n \n for item in self.inventory:\n if item.name == command[1]:\n self.location.inventory.append(item)\n self.inventory.remove(item)\n print(\"You dropped a\", item.name)\n return \n print(command[1] + \" is not here!\")", "def help_drop(self):\n print(DROP)", "def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()", "def test_delete_voltage_map_item(self):\n pass", "def process_IN_UNMOUNT(self, event):", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def remove_export(self, context, volume):\n pass", "def unmanage(self, volume):\n LOG.debug(\"Unmanaging Cinder volume %s. Changing name to %s\",\n volume['id'], _get_unmanaged(volume['id']))\n data = {'name': _get_unmanaged(volume['id'])}\n self._issue_api_request(URL_TEMPLATES['ai_inst']().format(\n _get_name(volume['id'])), method='put', body=data)", "def volume_down(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)", "def test_pvremove():\n pvdisplay = MagicMock(return_value=False)\n with patch(\"salt.modules.linux_lvm.pvdisplay\", pvdisplay):\n mock = MagicMock(return_value=True)\n with patch.dict(linux_lvm.__salt__, {\"lvm.pvdisplay\": mock}):\n ret = {\n \"stdout\": \"saltines\",\n \"stderr\": \"cheese\",\n \"retcode\": 0,\n \"pid\": \"1337\",\n }\n mock = MagicMock(return_value=ret)\n with patch.dict(linux_lvm.__salt__, {\"cmd.run_all\": mock}):\n assert linux_lvm.pvremove(\"A\") is True", "def test_bad_uuid_pool(self):\n command_line = [\"pool\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_delete_attached_volume(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.delete_volume, volume['id'])" ]
[ "0.5670078", "0.5624108", "0.554459", "0.5481827", "0.5419034", "0.53412575", "0.5311965", "0.52677137", "0.518722", "0.51854277", "0.5156141", "0.5146773", "0.5126611", "0.5102824", "0.50757784", "0.5075659", "0.5067923", "0.50451326", "0.49824572", "0.49812868", "0.4970936", "0.4936046", "0.49182272", "0.48876503", "0.48804823", "0.4878058", "0.48513", "0.48507753", "0.4845009", "0.48424056" ]
0.6111419
0
Validate the regex patterns, but only partially while the user is still typing. Because the 'from' pattern will be where the user specifies captures, changing it also requires revalidating the substitution pattern. However if the user is still typing (as opposed to hitting enter to complete the input) we do the minimal amount of work necessary, i.e we just set the colors back to neutral and disable the Apply button.
def validateRegexFields(self, complete=False): # Assume the patterns aren't valid. self.m_validFromRe = False self.m_validPatterns = False ### Validate the 'from' pattern # regexCtl = self.m_reFromCtl subsCtl = self.m_reToCtl regex, subs = regexCtl.Value, subsCtl.Value regColor, subColor = wx.NullColour, wx.NullColour if complete and regex: regColor = subColor = wx.BLUE try: re.sub(regex, subs, '') except re.error as e: subColor = wx.RED try: re.compile(regex) except re.error as e: regColor = wx.RED else: self.m_validFromRe = True else: self.m_validFromRe = True self.m_validPatterns = bool(subs) self.setTextColor(regexCtl, regColor) self.setTextColor(subsCtl, subColor) if complete: self.populateFileList() else: self.m_applyBtn.Enabled = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onTextChange(self, event):\n\n self.validateRegexFields(complete=False)\n event.Skip()", "def onHitEnterInFrom(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validFromRe:\n self.m_reToCtl.SetFocus()", "def __checkForPattern(self):\n if self._keyCode in self._patterns:\n assert(self.notify.debug(\"Pattern Match: \" + self._keyCode))\n messenger.send(KeyCodes.PATTERN_MATCH_EVENT, [self._keyCode])\n self.reset()\n \n # If the key code is longer than the longest pattern possible,\n # Then reset! \n elif self._keyCodeCount == self._patternLimit or len(self.getPossibleMatchesList()) == 0:\n assert(self.notify.debug(\"No pattern match!\"))\n messenger.send(KeyCodes.PATTERN_NO_MATCH_EVENT)\n self.reset()", "def check_match_pattern(self):\n text = self.ui.plainTextEdit.toPlainText()\n pattern = self.ui.textPattern.text()\n result = re.search(pattern, text)\n group = int(self.ui.spinGroup.text())\n if result:\n self.ui.textMatch.setText(result.group(group))", "def onHitEnterInTo(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validPatterns:\n self.m_fileList.SetFocus()", "def _validate_pattern_fields(self):\n # TO ADD:\n ## check pattern is dict ??\n ## A1 ! check if all vars used in sprintf are declared\n ## check for quoted '%s' in sprintf text (not allowed).\n ## check that only all subs are %s and that the number of %s matches the length of the var list.\n ## re.search(r\"\\%(.)\", , ) => from this get list to check all %s and length to check against var list.\n ## Given these checks - makes more sense to hardwire sprintf subfield names than use config approach.\n \n for field, field_content in self.pattern.items():\n if field not in self.pkey_dict:\n warnings.warn(\"Pattern has unknown field: %s !\" % field)\n \n # The following is quote ugly and hard to follow. Should probably be refactored\n oneOf = False\n oneOf_list = []\n for field, field_spec in self.pkey_dict.items():\n if field_spec['compulsory']:\n if field not in self.pattern:\n warnings.warn(\"Pattern is missing compulsory field: %s !\" % field)\n elif field_spec['OneOf']:\n oneOf_list.append(field)\n if field in self.pattern:\n oneOf = True \n if field_spec['sprintf']:\n if field in self.pattern:\n for subfield in self.pattern[field]:\n if subfield not in self.sprintf_keys:\n warnings.warn(\"The field %s has an unknown subfield %s.\" % (field, subfield))\n for subfield in self.sprintf_keys:\n if subfield not in self.pattern[field]:\n warnings.warn(\"The field %s lacks the compulsory subfield %s.\" % (field, subfield))\n # Check that number of vars matches number %s in text field\n if not len(re.findall('%s', self.pattern[field]['text'])) == len(self.pattern[field]['vars']):\n warnings.warn(\"Wrong number of vars in field '%s' of %s\" % (field, self.pattern['pattern_name']))\n for v in self.pattern[field]['vars']:\n if v not in self.pattern['vars']:\n warnings.warn(\"%s not in varlist %s\" % (v, str(self.pattern['vars'])))\n# Move spec checks down: \n# if field_spec['msExpression']:\n# self._validate_quoted(field['text'])\n# self._validate_ms\n \n if not oneOf:\n warnings.warn(\"Pattern must have at least one of: \" + str(oneOf_list))\n\n # Poss to add: validate number of vars for sprintf subs", "def validate_data(self):\n for pattern in self.patterns:\n if pattern == \"\":\n self.patterns.remove(\"\")\n\n if not self.patterns:\n print(\"WARNING! Missing pattern or empty string!\")\n sys.exit()", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def prepare_regexps(self):\r\n print(\"Preparing regular expressions for this session.\")\r\n privmsg_parse = re.compile(\"\")", "def test_grammar_rules_regex(self) -> None:\n for rule in self.rules.grammar_regex:\n positions: List[Tuple[int, int]] = self.report.get_regex_postions(\n rule[\"regex\"], ignore_case=True\n )\n for position in positions:\n self.add_error(rule[\"message\"], position=position)", "def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)", "def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match", "def _source_matchpattern_field_string_is_valid_as_regex(self):\n if self.source_matchpattern is None:\n raise RuleError(\"'source_matchpattern' must be a valid regex.\")\n if not regex_is_valid(self.source_matchpattern):\n # print(f\"{self}\")\n raise SourceMatchpatternError(\n \"Value for 'source_matchpattern' must be a valid regex.\"\n )\n return True", "def validate(self):\n stguess = self.text.toPlainText()\n if not self.pkfitdlg.checkUserInput(stguess):\n return\n self.stguess = stguess\n self.accept()", "def validate_search_inputs(self):\r\n\r\n debug(\"validate\")\r\n fail = False\r\n msg = \"\"\r\n if self.m_regex_search_checkbox.GetValue():\r\n if self.m_searchfor_textbox.GetValue() == \"\" or self.validate_search_regex():\r\n msg = _(\"Please enter a valid search regex!\")\r\n fail = True\r\n elif self.m_searchfor_textbox.GetValue() == \"\":\r\n msg = _(\"Please enter a valid search!\")\r\n fail = True\r\n if not fail and self.m_fileregex_checkbox.GetValue():\r\n if self.m_filematch_textbox.GetValue().strip() == \"\" or self.validate_regex(self.m_filematch_textbox.Value):\r\n msg = \"Please enter a valid file regex!\"\r\n fail = True\r\n elif self.m_filematch_textbox.GetValue().strip() == \"\":\r\n msg = _(\"Please enter a valid file pattern!\")\r\n fail = True\r\n if not fail and self.m_dirregex_checkbox.GetValue():\r\n if self.validate_regex(self.m_exclude_textbox.Value):\r\n msg = _(\"Please enter a valid exlcude directory regex!\")\r\n fail = True\r\n if not fail and not exists(self.m_searchin_text.GetValue()):\r\n msg = _(\"Please enter a valid search path!\")\r\n fail = True\r\n if (\r\n not fail and\r\n self.m_logic_choice.GetStringSelection() != \"any\" and\r\n re.match(r\"[1-9]+[\\d]*\", self.m_size_text.GetValue()) is None\r\n ):\r\n msg = _(\"Please enter a valid size!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_modified_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a modified date!\")\r\n fail = True\r\n if not fail:\r\n try:\r\n self.m_created_date_picker.GetValue().Format(\"%m/%d/%Y\")\r\n except:\r\n msg = _(\"Please enter a created date!\")\r\n fail = True\r\n if fail:\r\n errormsg(msg)\r\n return fail", "def _line_fits_pattern(self, logline):\n for (fieldname, pattern) in self._excludepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return False\n if len(self._includepatterns) == 0:\n return True # no includepatterns means 'accept everything'\n for (fieldname, pattern) in self._includepatterns:\n try:\n m = pattern.search(str(logline.__dict__[fieldname]))\n except AttributeError:\n warn(\"Exclude patterns must be tuples of a field name and a compiled regex.\")\n warn(\"The object that you provided as a regex seems not to have a 'search' method\")\n exit(-1)\n except KeyError:\n warn(\"You tried to filter for a field that doesn't exist\")\n m = False\n if m:\n return True\n return False", "def constraint_clause_pattern_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n value = getattr(presentation, field.name)\n if value is not None:\n try:\n # From TOSCA 1.0 3.5.2.1:\n #\n # \"Note: Future drafts of this specification will detail the use of regular expressions\n # and reference an appropriate standardized grammar.\"\n #\n # So we will just use Python's.\n re.compile(value)\n except re.error as e:\n context.validation.report(\n u'constraint \"{0}\" is not a valid regular expression in \"{1}\": {2}'\n .format(field.name, presentation._fullname, safe_repr(value)),\n locator=presentation._get_child_locator(field.name), level=Issue.FIELD, exception=e)", "def pre_search(self):\n self.update_status(\"Edit pattern filter\")\n self.patternEditor.show()", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def generate_regex_from_string(self):\n tries = 0\n while tries < self.max_tries:\n try:\n tries += 1\n if tries % 100 == 0:\n print(f\"Tries: {tries}\", end=\"\\r\")\n patterns_to_try = self.generate_regex_pattern()\n for _, pattern in patterns_to_try:\n if re.fullmatch(pattern, self.string):\n self.found_patterns.add(pattern)\n else:\n print(f\"Doesn't Match! {pattern} -> {self.string}\")\n except Exception as e:\n pass\n if self.negative_string:\n self.found_patterns = self.best_pattern()", "def on_test_regex(self, event):\r\n\r\n self.m_regex_test_button.Enable(False)\r\n self.tester = RegexTestDialog(\r\n self,\r\n self.m_case_checkbox.GetValue(),\r\n self.m_dotmatch_checkbox.GetValue(),\r\n self.m_searchfor_textbox.GetValue()\r\n )\r\n self.tester.Show()", "def is_valid(teorema, args):\n if args.ignore_case:\n for value in teorema.values():\n if args.pattern.lower() in value.lower():\n return True\n else:\n for value in teorema.values():\n if args.pattern in value:\n return True\n\n return False", "def find_by_pattern(self):\n while True: \n word = input(\"Enter a regular expression ex: \\d\\d\\w+. Press Q to \"\n \"quit to the main screen: \")\n if word.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n return self.dict_list\n self.find_by_pattern_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(word, value):\n self.find_by_pattern_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_pattern_list)\n break\n self.del_or_edit()", "def __validate_conn_pattern(conns:str)->str:\n pattern1 = re.compile(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n # pattern2 = re.compile(r'^\\w+:\\w+@\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n\n for conn in conns.split(\",\"):\n if not pattern1.match(conn) and not pattern2.match(conn):\n raise argparse.ArgumentTypeError(f'Invalid connection format: {conn}. Supported formats: 127.0.0.1:32049 or user:[email protected]:32049')\n\n return conns", "def _config_regex(self):", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def build_match_and_apply_functions(pattern, search, replace):\n\n def matches_rule(word):\n \"\"\" Check if word contains pattern.\n \"\"\"\n return re.search(pattern, word)\n\n def apply_rule(word):\n \"\"\" Replace text with replacement in word.\n \"\"\"\n return re.sub(search, replace, word)\n\n return (matches_rule, apply_rule)", "def test_patterns(self):\n tests = (\n (\"https://youtu.be/OQwD0QCbxaA\", \"https://www.youtube.com/watch?v=OQwD0QCbxaA&feature=my_favorites\"),\n (\"https://smile.amazon.com/Simons-Cat-Simon-Tofield/dp/0446560065\",\n \"http://www.amazon.com/Simons-Cat-Simon-Tofield/dp/0446560065/ref=sr_1_1?ie=UTF8&qid=1346302386&sr=\"),\n (\"http://example.com/\", \"http://example.com/?feat=directlink\"),\n (\"http://example.com/\", \"http://example.com/?\"),\n (\"http://example.com/?foo=1\", \"http://example.com/?foo=1&\"),\n\n )\n\n config = copyclipper.LoadConfig()\n for test in tests:\n result = copyclipper.ProcessValue(config, test[1])\n self.assertEquals(result, test[0],\n msg=\"Expected\\n%r\\ngot\\n%r\\nfrom\\n%r\\n\" % (test[0], test[1], result))", "def register_patterns(self) -> None:\n\n if (patterns := getattr(self, \"WORDS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(rf\"\\b{k}\\b\", v))\n\n if (patterns := getattr(self, \"PATTERNS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(k, v))\n\n if (replacements := getattr(self, \"REPLACEMENTS\", None)) is not None:\n for replacement in replacements:\n self.register_replacement(replacement)", "def validate(self, document) -> None:\n\n # document.text will have value in two cases, after we pressed enter in the prompt or when navigating down\n # the autocomplete commands list. In the second case there is no need to press enter to trigger this method,\n # but in those cases self.validation_type == ''\n typed = document.text\n\n if typed:\n if self.validation_type == \"number\":\n regex = r\"^-?\\d+$\"\n\n if not re.search(regex, typed):\n\n raise ValidationError(\n message=\"Please input a positive or negative number.\"\n )\n elif self.validation_type == \"yes_no\":\n regex = r\"^[yYnN]$\"\n\n if not re.search(regex, typed):\n raise ValidationError(message=\"Please type y, n, Y or N.\")\n elif self.validation_type == \"text_max_len\":\n if len(typed) > 100:\n raise ValidationError(message=\"La oración debe tener menos de 100 caracteres.\")\n else:\n raise ValidationError(message=\"Internal Error: Wrong validation type\")" ]
[ "0.6140925", "0.6094179", "0.60423654", "0.56661874", "0.5438786", "0.53020585", "0.5268809", "0.51951706", "0.5172561", "0.50293213", "0.501354", "0.49975908", "0.49453467", "0.49100548", "0.49090192", "0.48892468", "0.48374686", "0.48184943", "0.47469115", "0.46745083", "0.46548316", "0.46476665", "0.46304142", "0.46202356", "0.4609982", "0.45809138", "0.45758003", "0.4560009", "0.45496055", "0.45484376" ]
0.7344064
0
Refresh our list of what's on disk.
def updateDiskFileList(self): if self.m_curPath: # Get me just the files please. for _, _, files in os.walk(self.m_curPath): break else: files = [] files.sort() if files != self.m_diskNames: self.m_diskNames[:] = files self.m_newNames[:] = [] self.populateFileList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def refresh(self, list_of_tables):\n self.dismod_file.refresh(list_of_tables)", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self):\n self.__refresh()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def reload(self):", "def reload(self):", "def refresh_all(self) -> None:\n self._update_thread.force_refresh_folder(self.feed_cache)", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def refresh(self):\n f = open(self._filepath, 'r')\n self._raw_sysfs_data = f.read()\n f.close()\n self._process_raw_data()", "def reload(self):\n\n pass", "def refresh(self):\r\n self.metadata = self.db.read(self.path).json()", "def Refresh(self):\n pass", "def reload(self):\n if len(self.files) > 0:\n self.load(self.files, regfiles=self.regions)", "def reload(self):\n self.known_stations = {}\n self.read_noaa_stations()\n self.read_table_stations()\n self.last_reload_check_time = datetime.datetime.utcnow()\n LOGGER.info('Have %s known stations', len(self.known_stations.keys()))", "def refresh_source(self):\n pass", "def refresh(self):\n self.Refresh()", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def refreshMTimes(self):\n del self.mtimesReset[:]\n for fileName, fileInfo in self.data.items():\n oldMTime = self.mtimes.get(fileName,fileInfo.mtime)\n self.mtimes[fileName] = oldMTime\n #--Reset mtime?\n if fileInfo.mtime != oldMTime and oldMTime != -1:\n fileInfo.setMTime(oldMTime)\n self.mtimesReset.append(fileName)", "def refresh(self):\n self.dir = dirs['app']\n ssBase = GPath(mwIniFile.getSetting('General','Screen Shot Base Name','ScreenShot'))\n if ssBase.head:\n self.dir = self.dir.join(ssBase.head)\n newData = {}\n reImageExt = re.compile(r'\\.(bmp|jpg)$',re.I)\n #--Loop over files in directory\n for fileName in self.dir.list():\n filePath = self.dir.join(fileName)\n maImageExt = reImageExt.search(fileName.s)\n if maImageExt and filePath.isfile(): \n newData[fileName] = (maImageExt.group(1).lower(),filePath.mtime)\n changed = (self.data != newData)\n self.data = newData\n return changed", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def invalidate_for_files(self):\r\n return []", "def reload(self):\n if os.path.exists(FileStorage.__file_path):\n with open(FileStorage.__file_path, \"r\", encoding=\"utf-8\") as f:\n loaded = json.load(f)\n for _id, v in loaded.items():\n cls = loaded[_id].pop(\"__class__\", None)\n try:\n loaded[_id][\"created_at\"] = datetime.strptime(\n loaded[_id][\"created_at\"], dt_format)\n loaded[_id][\"updated_at\"] = datetime.strptime(\n loaded[_id][\"updated_at\"], dt_format)\n except:\n pass\n FileStorage.__objects[_id] = FileStorage.class_models[cls](**v)" ]
[ "0.71091926", "0.7044655", "0.700236", "0.68334323", "0.68334323", "0.6765857", "0.66914636", "0.66914636", "0.66914636", "0.65789855", "0.65704066", "0.65704066", "0.6506549", "0.6506549", "0.6505286", "0.6480908", "0.6439831", "0.6422957", "0.64214545", "0.64141774", "0.6324581", "0.62873477", "0.6237474", "0.6180018", "0.6178493", "0.6177689", "0.6163327", "0.6157545", "0.61551976", "0.6139173" ]
0.70745224
1
Uses the list of filesondisk and the regex patterns to build a list of what the directory will look like if we renamed the files. Because we're justusing a simple text list, we use symbols to show the user which filenames would change and whether they would produce any duplicates, substituting "." with "\1.txt".
def populateFileList(self): self.m_fileList.SetForegroundColour(wx.NullColour) # We'll need to track which file names are modified and which # file names duped. applicable, dupes = set(), set() if not self.m_validPatterns: # Regex's don't compile yet, just use the raw filename list. newNames = self.m_diskNames else: # Apply the substitution to the filename list to produce a # destination-name list, and identify whether the patterns # actually affect anything. # newNames, modifiedIndexes = [], [] matcher = re.compile(self.m_reFromCtl.Value).subn subs = self.m_reToCtl.Value for filename in self.m_diskNames: # Perform the sub (filename, numChanges) = matcher(subs, filename) # Was there a modification? if numChanges: # Record the affected name. applicable.add(filename) if filename in newNames: dupes.add(filename) # Add to the primary list newNames.append(filename) # Does this produce a different list than we already had? If so, # clear the file list and replace it with the new one. # if newNames != self.m_newNames: self.m_fileList.Clear() # Figure out the longest name so we can create a cleanly-formatted # set of prefix/suffix characters for the modified/duped annotation. # maxLen = max(map(len, newNames)) decorate = '{m} {fn:<{ml}} {m}'.format # Now build a list of display elements. for filename in newNames: mark = ' ' if filename not in applicable else '|' if filename in dupes: mark = '*' self.m_fileList.Append(decorate(m=mark, fn=filename, ml=maxLen)) # Keep the list. self.m_newNames[:] = newNames # Update the apply button, we only want it enabled when the user # has a valid set of patterns that affect any files and have no # dupes produced as a result. # self.m_applyBtn.Enabled = bool(applicable) and not dupes if dupes: # Emphasize the presence of dupes. self.m_fileList.SetForegroundColour(wx.RED) # Draw the list. self.m_fileList.Refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rename_files(file_list, src_dir, pattern, rename=False):\n i = 0\n renamed = regex_group_split(file_list, pattern, False)\n renamed_w_path = [src_dir + fn for fn in renamed]\n orig_fp_list = orig_filepath_list(file_list, src_dir)\n\n for filename in file_list:\n if not (orig_fp_list[i] == renamed_w_path[i]):\n print (colors.BLUE + \"_ORIGINAL_: \" + orig_fp_list[i].replace(src_dir, \"\") + colors.ENDC)\n print (colors.RED + \"__UPDATE__: \" + renamed_w_path[i].replace(src_dir, \"\") + colors.ENDC)\n\n if rename:\n os.rename(orig_fp_list[i], renamed_w_path[i])\n i += 1", "def tidyFileNames(folderToCheck):\n\n filters = list(map(lambda x: \"*.\" + x, expectedExts))\n\n for filter in filters:\n\n for f in getFiles(folderToCheck,filter):\n\n clean = f\n for search in searches:\n clean = replace(clean,search)\n\n if renameFile(f,clean):\n results = list(map(os.path.basename,[f,clean]))\n if results[0] != results[1]:\n print(f\"Renamed: {results[0]} -> {results[1]}\")", "def findDuplicateReleaseFiles(self, initialList, workingTowerName, newInfix):\n Release_Tower_Name = self.getReleaseVersion(workingTowerName, newInfix)\n Duplicate_List = []\n for fname in initialList:\n prefixStream, postfixStream = string.split(fname, workingTowerName)\n A_File_Name = prefixStream + Release_Tower_Name + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def main(root, filelist):\n #print \"got %s: %s\" % (root, filelist)\n rename(root, filelist)", "def rename(root, filelist):\n if not filelist:\n return\n def apply_rules(filename):\n rulez = [('_+' , ' '), # One or more underscores to spaces\n ('-{2,}' , '-'), # Two or more hyphens to single hyphen\n ('&' , 'And'), # An ampersand to 'And'\n ('(-)(\\w*)' ,r' \\1 \\2')]# Spaces around hyphen seperated words\n \n for look_for, replacement in rulez:\n filename = re.sub(look_for, replacement, filename)\n # Capitalize first letter of every word\n filename = \" \".join([ word.capitalize() for word in filename.split() ])\n return filename\n \n names = []\n for filename in filelist:\n basename = os.path.basename(filename)\n names.append(os.path.join(root, apply_rules(filename)))\n try:\n dest = os.tmpnam()\n fl = open(dest, 'w')\n fl.write(\"\\n\".join(names))\n fl.close()\n os.system(\"%s %s\" % (EDITOR, dest))\n ans = 'no'\n for oldname, newname in zip(filelist, open(dest).readlines()):\n oldname = os.path.join(root, oldname)\n newname = newname.strip()\n if oldname == newname:\n print \"No change from %s to %s ...skipping\" % (oldname, newname)\n else:\n print \"Changing %s to %s\" % (oldname, newname)\n if not ans[0].lower == 'a':\n ans = raw_input(\"Contine (Yes/No/All) ? [N] \") or 'no'\n if ans[0].lower() in ('a', 'y'):\n os.rename(oldname, newname)\n else:\n os.rename(oldname, newname)\n finally:\n os.remove(dest)", "def handleFileNames(self):\n \n # expand the wild cards - but do not create the full directory path\n # as the work sub directories have yet to be created.\n if not os.path.exists(self.shareArea):\n m = 'Cannot set self.auxfiles due to non-existent share directory: %s' % self.shareArea\n self.logger.fatal(m)\n raise RTTCodingError(m)\n\n # resolve auxFile patterns to file names\n auxFiles = []\n for pattern in self.auxFilePatterns:\n base, fnpattern = os.path.split(pattern)\n srcDir = os.path.normpath(os.path.join(self.shareArea, base))\n filesInShare = os.listdir(srcDir)\n auxFiles.extend([os.path.join(base,file) for file in filesInShare if fnmatch.fnmatch(file, fnpattern)])\n\n self.auxFiles = unique(auxFiles)", "def file_names(acqfolder):\n log.info('anonymizer.py file_names {0}'.format(acqfolder))\n\n subj_path = path(acqfolder)\n\n done = -1\n for ext in dicom_file_extensions:\n file_lst = subj_path.glob('*' + ext)\n if file_lst:\n rename_file_group_to_serial_nums(file_lst)\n done = 0\n\n return done", "def pele_folders(input_, file_list, dir_=None):\r\n os.chdir(\"../\")\r\n if not dir_:\r\n base = basename(input_)\r\n base = base.replace(\".pdb\", \"\")\r\n else:\r\n base = dir_\r\n count = 0\r\n folder = []\r\n for files in file_list:\r\n name = basename(files)\r\n name = name.replace(\".pdb\", \"\")\r\n if not count:\r\n hold = \"bla\"\r\n count += 1\r\n if name != \"original\" and hold != name[:-1]:\r\n hold = name[:-1]\r\n folder.append(\"mutations_{}/{}\\n\".format(base, hold))\r\n with open(\"dirnames_{}.txt\".format(base), \"w\") as txt:\r\n txt.writelines(folder)", "def getFileNames():\n input_path = \"/Users/tim/OneDrive/Master/Text_Mining/project/texts/glenarvon_html/\"\n temp_list = os.listdir(input_path)\n name_list = [i for i in temp_list if i[-4:] == \"html\"]\n name_list.sort(key=natural_keys) # see http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n return name_list, input_path", "def rename_date_formats(files_list):\n\n count_renamed = 0\n count_skipped = 0\n\n for file in files_list:\n\n # finding DD-DD-DDDD matches\n if date_regex.search(file):\n date_format = date_regex.search(file).group()\n date_split = date_format.split(\"-\")\n\n # detecting MM-DD-YYYY format and renaming to DD-MM-YYYY format\n if 1 <= int(date_split[0]) <= 12 and 1 <= int(date_split[1]) <= 31:\n european_format_date = \"-\".join([date_split[1], date_split[0], date_split[2]])\n new_file_name = file.replace(date_format, european_format_date)\n\n # checking that newly renamed file won't be a duplicate\n if new_file_name not in files_list:\n shutil.move(file, new_file_name)\n print(f\"<{file}> renamed to <{new_file_name}>\")\n count_renamed += 1\n else:\n print(f\"Cannot rename <{file}> because file <{new_file_name}> already exists\")\n count_skipped += 1\n\n # for files with DD-DD-DDDD format, but not MM-DD-YYYY like 89-77-3445\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n # for files with no MM-DD-YYYY format like 12-1221.txt or text.pdf\n else:\n print(f\"<{file}> has no MM-DD-YYYY date in name\")\n count_skipped += 1\n\n print(f\"\\nSUMMARY:\\nRenamed files count - {count_renamed}, not affected files count - {count_skipped}.\")", "def main():\n print(\"Current directory is\", os.getcwd())\n os.chdir('Lyrics/Lyrics')\n\n for dir_name, dir_list, file_list in os.walk(\".\"):\n for filename in file_list:\n file_path = dir_name + \"\\\\\" + filename\n new_name = get_fixed_filename(file_path)\n os.rename(file_path, new_name)", "def findDuplicateWorkingFiles(self, initialList, curInfix, newInfix):\n Duplicate_List = []\n for fname in initialList:\n infixStream = iccs_apex.whatInfixIsStream(fname)\n if (infixStream == curInfix):\n prefixStream, postfixStream = string.split(fname, infixStream)\n A_File_Name = prefixStream + newInfix + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def newDuplicateFiles(self, duplicateList, curInfixStream, newInfixStream):\n Return_List = []\n for fname in duplicateList:\n File_Name_Part = os.path.basename(fname)\n Directory_Name_Part = os.path.dirname(fname)\n Parent_Directory_Name = os.path.dirname(Directory_Name_Part)\n File_Name = os.path.join(Parent_Directory_Name, File_Name_Part)\n prefixStream, postfixStream = string.split(File_Name, curInfixStream)\n New_File_Name = prefixStream + newInfixStream + postfixStream\n Return_List = Return_List + [New_File_Name]\n return Return_List", "def test_paths_to_plates():\n output = filelister_yoko.paths_to_plates(TEST_PATH_YOKO)\n prefix = os.path.abspath(TEST_PATH_YOKO)\n plate_names = [\"screen-name-batch1_20190213_095340/A000002-PC\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def _filenames(self, dir_or_file):\n if os.path.isdir(dir_or_file):\n return glob(os.path.join(dir_or_file, \"*.txt\"))\n else:\n return [dir_or_file]", "def file_matches(self, text):\n \n #print 'Completer->file_matches: <%s>' % text # dbg\n\n # chars that require escaping with backslash - i.e. chars\n # that readline treats incorrectly as delimiters, but we\n # don't want to treat as delimiters in filename matching\n # when escaped with backslash\n \n protectables = ' ()[]{}'\n\n def protect_filename(s):\n return \"\".join([(ch in protectables and '\\\\' + ch or ch)\n for ch in s])\n\n lbuf = self.get_line_buffer()[:self.readline.get_endidx()]\n open_quotes = 0 # track strings with open quotes\n try:\n lsplit = shlex_split(lbuf)[-1]\n except ValueError:\n # typically an unmatched \", or backslash without escaped char.\n if lbuf.count('\"')==1:\n open_quotes = 1\n lsplit = lbuf.split('\"')[-1]\n elif lbuf.count(\"'\")==1:\n open_quotes = 1\n lsplit = lbuf.split(\"'\")[-1]\n else:\n return None\n except IndexError:\n # tab pressed on empty line\n lsplit = \"\"\n\n if lsplit != protect_filename(lsplit):\n # if protectables are found, do matching on the whole escaped\n # name\n has_protectables = 1\n text0,text = text,lsplit\n else:\n has_protectables = 0\n text = os.path.expanduser(text)\n \n if text == \"\":\n return [protect_filename(f) for f in self.glob(\"*\")]\n\n m0 = self.clean_glob(text.replace('\\\\',''))\n if has_protectables:\n # If we had protectables, we need to revert our changes to the\n # beginning of filename so that we don't double-write the part\n # of the filename we have so far\n len_lsplit = len(lsplit)\n matches = [text0 + protect_filename(f[len_lsplit:]) for f in m0]\n else:\n if open_quotes:\n # if we have a string with an open quote, we don't need to\n # protect the names at all (and we _shouldn't_, as it\n # would cause bugs when the filesystem call is made).\n matches = m0\n else:\n matches = [protect_filename(f) for f in m0]\n if len(matches) == 1 and os.path.isdir(matches[0]):\n # Takes care of links to directories also. Use '/'\n # explicitly, even under Windows, so that name completions\n # don't end up escaped.\n matches[0] += '/'\n return matches", "def eachfilename(dir2list, printfname=0):\n if printfname: print('eachfilename is matching for \\n' + dir2list);\n if isinstance(dir2list,str):\n if not os.path.exists(dir2list): # if not a valid (single) filename\n dir2list=[dir2list] # try it as a list\n if isinstance(dir2list,list) or isinstance(dir2list,tuple):\n for line in dir2list:\n for fname in glob.iglob(line):\n fname = fname.replace('\\\\','/')\n if printfname: print(fname)\n yield fname\n elif isinstance(dir2list,str):\n pp, ff = os.path.split(dir2list); pp+='/';\n for line in open(dir2list):\n line = line.strip()\n if line.startswith('##') : continue ## skip those lines\n for fname in glob.iglob( pp + line ):\n fname=fname.replace('\\\\','/')\n if printfname: print(fname)\n yield fname", "def replsuffix(files, suffix):\n\toutfiles = []\n\tif suffix is None: return\n\tif type(files) is type(\"\"):\n\t\tfiles = [files]\n\tfor f in files:\n\t\tfname, ext = os.path.splitext(f)\n\t\tnewfname = fname + suffix\n\t\toutfiles.append(newfname)\n\treturn outfiles", "def _get_rename_command(self,\r\n out_filenames,\r\n tmp_output_dir,\r\n output_dir):\r\n result = ''\r\n result_filepaths = []\r\n for fn in out_filenames:\r\n tmp_result_filepath = '%s/%s' % (tmp_output_dir, fn)\r\n result_filepath = '%s/%s' % (output_dir, fn)\r\n result += \\\r\n '; mv %s %s' % (tmp_result_filepath, result_filepath)\r\n result_filepaths.append(result_filepath)\r\n return result, result_filepaths", "def createFileNames(nFileNames, seqPrefix):\n nameList = []\n nameList = [seqPrefix+str(i)+\".txt\" for i in range(0, nFileNames)]\n return nameList", "def compile_filename_patterns(pattern_list):\n\n pats=list(pattern_list)\n for i in range(len(pats)):\n if isinstance(pats[i],str):\n if pats[i].startswith('re:'):\n pats[i]=pats[i][3:]\n else:\n pats[i]=fnmatch.translate(pats[i])\n pats[i]=re.compile(pats[i])\n return pats", "def processed_file_names(self):\n if self.force_reprocess == True:\n self.force_reprocess = False\n return 'reprocess.pt'\n \n ''' HR 01/06/22 Workaround to avoid FileNotFoundError '''\n print('self.processed_dir:', self.processed_dir)\n # folder,file = os.path.split(self.processed_dir)\n folder = self.processed_dir\n if not os.path.isdir(folder):\n print(' Making folder', folder)\n os.makedirs(folder)\n \n processedfiles = [f for f in os.listdir(self.processed_dir) if os.path.isfile(\n os.path.join(self.processed_dir, f))]\n if 'pre_filter.pt' in processedfiles:\n processedfiles.remove('pre_filter.pt')\n if 'pre_transform.pt' in processedfiles:\n processedfiles.remove('pre_transform.pt')\n # 'not_implimented.pt' #[f'data_{i}.pt' for i in list(self.data.index)]\n return processedfiles", "def rename_file_group_to_serial_nums(file_lst):\n file_lst.sort()\n c = 1\n for f in file_lst:\n dirname = path.abspath(f.dirname())\n fdest = f.joinpath(dirname, \"{0:04d}\".format(c) + output_dicom_extension)\n log.info('Renaming {0} to {1}'.format(f, fdest))\n f.rename(fdest)\n c += 1", "def handle_filenames(filenames):\n suffixes = [\".mod\", \".dat\", \".run\"]\n if len(filenames) == 1:\n return (filenames[0].with_suffix(suffix) for suffix in suffixes)\n else:\n try:\n return sorted(filenames, key=lambda x: suffixes.index(x.suffix))\n except ValueError:\n click.echo(click.style(f\"Invalid filename.\", fg=\"red\", bold=True))", "def filenamePatterns(self):\n return ['*.'+e for e in self.filenameExtensions]", "def getNames(self):\r\n ListFiles = os.listdir(\"Save\")\r\n centering = \" \"\r\n stringFiles = centering + \"List of {} files in your Save folder : \\n \\n\".format(\r\n \"PVP\" if self.PVP else \"AI\"\r\n )\r\n if self.PVP:\r\n for k in ListFiles:\r\n if self.PVP and \"PVP_mode\" == k[:8]:\r\n realName = k[9:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n else:\r\n stringFiles += \" Files where AI is playing white : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"B\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n stringFiles += \"\\n Files where AI is playing black : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"W\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n self.existingFiles.setText(stringFiles)", "def test_paths_to_plates():\n output = filelister_ix.paths_to_plates(TEST_PATH_IX)\n prefix = os.path.abspath(TEST_PATH_IX)\n plate_names = [\"test-plate-1\", \"test-plate-2\",\n \"test-plate-3\", \"test-plate-4\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def transform_suffix(filenames, suffix_old, suffix_new):\n\n new_filenames = set([])\n len_suffix_old = len(suffix_old) + 1 # add one for the \".\"\n # loop over the list of files and remove the suffix\n for name in filenames:\n name = name[:-len_suffix_old]\n new_filenames.add(name + \".\" + suffix_new)\n \n return new_filenames", "def file_name_search():\n directory = \"/Users/andrewpowers/Documents/server/fastq_pass\"\n\n for file in os.listdir(directory):\n output_file = re.sub('fastq', 'fasta', file)\n os.system(bash_command.format(directory+\"/\"+file, output_file))\n print('File {} converted to fasta.'.format(file))\n print('Conversion Done.')", "def namedir(l, override=False):\n\tprint(\"naming dir\")\n\n\tglobal torrenteps\n\ttorrenteps = []\n\n\tfiles = l\n\n\tprint(\"###\")\n\n\t# filenums: {episode: [numpos(number, index)]}\n\tfilenums = {}\n\n\tfor f in files: # {episode: [numpos,]}\n\t\tfilenums[f] = getnumbers(stripname(f.getpathname(), False))\n\t\tprint(f, filenums[f])\n\n\tallfilenums = [fnum for f in files for fnum in filenums[f]] # list of all numpos\n\tprint(allfilenums)\n\tfilenumcounter={}\n\tfor fnum in allfilenums:\n\t\tif fnum in filenumcounter:\n\t\t\tfilenumcounter[fnum] += 1\n\t\telse:\n\t\t\tfilenumcounter[fnum] = 1\n\n\tprint(filenumcounter)\n\n\n\ttoremove = []\n\n\tindexes = [fnum.strindex for f in files for fnum in filenums[f]] # get all indexes\n\tremoveindexes = set(indexes)\n\tindexnums = {}\n\tfor f in files: # remove bad indexes\n\t\tfor fnum in filenums[f]:\n\t\t\tif fnum.strindex in indexnums:\n\t\t\t\tif indexnums[fnum.strindex] != fnum.num:\n\t\t\t\t\tif fnum.strindex in removeindexes:\n\t\t\t\t\t\tremoveindexes.remove(fnum.strindex)\n\t\t\telse:\n\t\t\t\tindexnums[fnum.strindex] = fnum.num\n\n\tindextonumbers = {}\n\tfor index in set(indexes):\n\t\tnumbers = []\n\t\tfor f in files:\n\t\t\tfor fnum in filenums[f]:\n\t\t\t\tif fnum.strindex == index:\n\t\t\t\t\tnumbers.append(fnum.num)\n\t\tindextonumbers[index] = numbers\n\tprint(\"indextonumbers\", indextonumbers)\n\n\ttoremove += removeindexes\n\n\tfor fnum in filenumcounter:\n\t\ttimes = filenumcounter[fnum]\n\t\tif times >= len(files)-1:\n\t\t\tprint(\"removing index\", str(fnum.strindex), \"because it's all files\")\n\t\t\ttoremove.append(fnum.strindex)\n#\t\telif float(fnum.num) > 200:\n#\t\t\tprint \"removing index\", str(fnum.strindex), \"because it's over 200\"\n#\t\t\ttoremove.append(fnum.strindex)\n\n\tprint(\"toremove\", toremove)\n\tfor f in files:\n\t\tfilenums[f] = [fnum for fnum in filenums[f] if not fnum.strindex in toremove and not \"NCOP\" in f.getpathname() and not \"NCED\" in f.getpathname()]\n\tprint(\"filenums\", filenums)\n\n\tfilenumsstrindex = [fnum.strindex for f in files for fnum in filenums[f]] # get most common index\n\tprint(\"strindexes\", filenumsstrindex)\n\tepnumpos = None\n\tif len(filenumsstrindex) != 0:\n\t\tfilenumsstrindex = Counter(filenumsstrindex)\n\t\tcommonlist = [index for index, amount in filenumsstrindex.most_common()]\n\t\tprint(\"commonlist\", commonlist)\n\t\tamtuniquenumbers = {index: len(set(indextonumbers[index])) for index in commonlist }\n\t\tprint(\"amtuniquenumbers\", amtuniquenumbers)\n\t\tmostuniquenumbers = max(amtuniquenumbers.values())\n\t\tif mostuniquenumbers < 3.0/4.0 * filenumsstrindex.most_common()[0][1]:\n\t\t\t# just one number isn't good enough - probably contains both season and episode\n\t\t\tmostcommon = sorted(commonlist, key = lambda index: amtuniquenumbers[index])\n\t\t\tepnumpos = [mostcommon[0], mostcommon[1]]\n\t\t\tprint(\"attempting to describe with 2 numbers. Indexes:\", epnumpos)\n\t\telse:\n\t\t\tmostcommonlist = [index for index, amtunique in list(amtuniquenumbers.items()) if amtunique == mostuniquenumbers]\n\t\t\tprint(\"mostcommonlist 2\", mostcommonlist)\n\t\t\tepnumpos = [mostcommonlist[0]]\n\t\t\tprint(\"epnumpos\", epnumpos, mostcommonlist)\n\n\tnames = copy.copy(l)\n\teps = [None for f in l]\n\n\tfor index, name in enumerate(names):\n\t\tpath = l[index]\n\t\tchangedname = files[index]\n\t\tnewname = path.getpathname()\n\t\tif epnumpos != None:\n\t\t\tif len(epnumpos) == 1:\n\t\t\t\tnumpos = epnumpos[0]\n\t\t\t\tnumbers = filenums[changedname]\n\t\t\t\tnumber = [num for num in numbers if num.strindex == numpos]\n\t\t\t\tif number != []:\n\t\t\t\t\tnumber = number[0].num\n\t\t\t\t\tif number.endswith(\".\"):\n\t\t\t\t\t\tnumber = number[:-1]\n\t\t\t\t\tif \".\" in number:\n\t\t\t\t\t\tnumber = float(number)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnumber = int(number)\n\t\t\t\t\teps[index] = number\n\t\t\telif len(epnumpos) == 2:\n\t\t\t\tnumbers = filenums[changedname]\n\t\t\t\tfirstnumber = [num for num in numbers if num.strindex == epnumpos[0]]\n\t\t\t\tsecondnumber = [num for num in numbers if num.strindex == epnumpos[1]]\n\t\t\t\tfirstnumber = int(firstnumber[0].num)\n\t\t\t\tsecondnumber = int(secondnumber[0].num)\n\t\t\t\tnumber = firstnumber + float(secondnumber) / 100\n\t\t\t\teps[index] = number\n\t\tnames[index] = newname\n\n\tnumbereps = sum([ep != None for ep in eps])\n\tif numbereps <= 1:\n\t\teps = [None for ep in eps]\n\n\tfor index, path in enumerate(l):\n\t\tif not path.getkey() in save.names or override:\n\t\t\tif isinstance(path, episode):\n\t\t\t\tname = names[index]\n\t\t\t\tepnumber = eps[index]\n\t\t\t\tpath.setname([name, epnumber])" ]
[ "0.62912023", "0.6283787", "0.61243826", "0.61238414", "0.6057", "0.6056427", "0.60261464", "0.6002019", "0.58697075", "0.58516043", "0.58399916", "0.5831643", "0.5815127", "0.5750396", "0.57445604", "0.57320064", "0.5677666", "0.56666535", "0.56408477", "0.5636326", "0.56356955", "0.5615281", "0.5612887", "0.56112707", "0.560815", "0.55919605", "0.5589841", "0.5588777", "0.5588556", "0.5584646" ]
0.6573242
0
When the user hits 'enter' in the 'from' field.
def onHitEnterInFrom(self, event): self.validateRegexFields(complete=True) if self.m_validFromRe: self.m_reToCtl.SetFocus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_enter():\n enter_event = QtGui.QKeyEvent(\n QEvent.KeyPress, Qt.Key_Enter, Qt.KeyboardModifiers())\n QtGui.QApplication.sendEvent(self, enter_event)", "def hit_enter():\n keyboard.press_and_release('Enter')", "def enter(self):\n\t\tself.actionObject().key_down(Keys.ENTER).key_up(Keys.ENTER).perform()", "def enter():\n pass", "def enterKey_cb(widget, dialog):\n dialog.response(gtk.RESPONSE_ACCEPT)", "def enter_press_log_watcher(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.choose_watcher_num()", "def on_lineInput_returnPressed(self):\n self.input = self.lineInput.text()\n self.accept()", "def enter():\n from pynput.keyboard import Key, Controller\n kb = Controller()\n kb.press(Key.enter)\n kb.release(Key.enter)", "def _OnPressEnter1(self):\n\t self.epsg1.set( self.epsg1.get() )\n\t self.epsg1_entry.focus_set()\n\t self.epsg1_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg1.get())))", "def enter(self):\n\t\tself._translate(True)\n\t\tinputCore.manager.emulateGesture(keyboardHandler.KeyboardInputGesture.fromName(\"enter\"))", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def onHitEnterInTo(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validPatterns:\n self.m_fileList.SetFocus()", "def enter():\n input(\"\\nClick Enter to continue \")", "def keyPressEvent(self, e):\n super(PhyloVisApp, self).keyPressEvent(e)\n if e.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:\n if (self.stackedWidget.currentIndex() == 5):\n if (self.lStatisticStackedWidget.currentIndex() == 0):\n self.login(self.lStatPasswordLineEdit.text())", "def _OnPressEnter2(self):\n\t self.epsg2.set( self.epsg2.get() )\n\t self.epsg2_entry.focus_set()\n\t self.epsg2_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg2.get())))", "def keystroke(input):\n if input == 'q':\n raise urwid.ExitMainLoop()\n if input is 'enter':\n listbox.get_focus()[0].original_widget\n raise Selected()", "def enter_message(self, message):\n self.selib.input_text(self.locator.message, message)", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return:\r\n self.manejo_boton_2()", "def keyPressEvent(self, event):\r\n if event.key() == Qt.Key_Return:\r\n self.manejo_boton_2()", "def _OnPressEnter4(self):\n\t self.epsg4.set( self.epsg4.get() )\n\t self.epsg4_entry.focus_set()\n\t self.epsg4_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg4.get())))", "def signal_from_subjects_pad(self, event):\n self.keyPressEvent(event)", "def enter_press_log_show(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.show_game(self.game_number.get())", "def click_signal_from_subjects_pad(self, subject):\n sendEventSignal = pyqtSignal(QEvent)\n q = QKeyEvent(QEvent.KeyPress, Qt.Key_Enter, Qt.NoModifier, text=\"#subject#\" + subject)\n self.keyPressEvent(q)", "def _OnPressEnter3(self):\n\t self.epsg3.set( self.epsg3.get() )\n\t self.epsg3_entry.focus_set()\n\t self.epsg3_entry.selection_range(0, Tkinter.END)\n\t print('epsg code set to %s ' % (str(self.epsg3.get())))", "def on_input_returnPressed(self):\n self.intercept = True\n self.on_sendButton_clicked()", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in (self.text[0].lower(),'\\n','\\r'):\n self.okButton()\n \n return \"break\"", "def _on_key_press(self, event):", "def fromCalendarHandler(self):\n\n self.last_clicked = \"from\"\n self.updateUI()", "def keyPressEvent(self, event):\n self.Serial.send_keystroke(event.text())", "def enter_notify_event(self, widget, event):\n enter_focus = self.t_.get('enter_focus', False)\n if enter_focus:\n # set focus on widget\n pass\n return self.make_callback('enter')" ]
[ "0.6778405", "0.6758216", "0.67262506", "0.6404956", "0.6386746", "0.63825494", "0.63070136", "0.62395203", "0.6209298", "0.62009835", "0.61862344", "0.61658704", "0.616317", "0.6027476", "0.6005713", "0.5913", "0.5905714", "0.58879393", "0.58879393", "0.58485764", "0.5829572", "0.58240134", "0.58203185", "0.5799894", "0.5797084", "0.57786727", "0.5703246", "0.570324", "0.5697584", "0.5661082" ]
0.7331321
0
When the user modifies the content of either regex field.
def onTextChange(self, event): self.validateRegexFields(complete=False) event.Skip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_field(self, **kwargs):\n if self.regex:\n if not 'regex' in self.field_args:\n self.field_args = self.field_args + ('regex', )\n self.field_klass = forms.RegexField\n return super(StringSetting, self).to_field(**kwargs)", "def get_regex_mismatch_error_text(field_name, source_regex):\n\n\treturn(\"Value entered for '{0}' does not match regex '{1}'\"\n\t\t .format(field_name, source_regex.pattern))", "def set_regex_validator(self, field, regex):\n valexp = QtCore.QRegExp(regex)\n validator = QtGui.QRegExpValidator(valexp)\n field.setValidator(validator)", "def on_regex_search_toggle(self, event):\r\n\r\n if self.m_regex_search_checkbox.GetValue():\r\n update_autocomplete(self.m_searchfor_textbox, \"regex_search\")\r\n else:\r\n update_autocomplete(self.m_searchfor_textbox, \"literal_search\")\r\n event.Skip()", "def address_regex(self) -> Any:", "def _validate_field(self, value, regex):\n match = re.match(regex, value)\n\n if match:\n return value\n raise ValueError(_(\"Value %s does not match regex: %s\") %\n (value, regex))", "def validate_value(self, key, new_value): # pylint: disable=unused-argument\n\n if self.family == ExclFamily.network:\n ip_network(new_value)\n if self.family == ExclFamily.regex:\n try:\n re.compile(new_value)\n except re.error:\n raise ValueError('Invalid regex')\n\n return new_value", "def RegEx(self, regex):\n if len(regex) > 0:\n try:\n regexreplaced = regex.replace(\"%TARGET%\", self._target)\n self._regex = regexreplaced\n except AttributeError:\n regexreplaced = []\n for r in regex:\n regexreplaced.append(r.replace(\"%TARGET%\", self._target))\n self._regex = regexreplaced\n else:\n self._regex = \"\"", "def on_replacetextCombo_editTextChanged(self, text):\n self.__enableFindButton()", "def on_fileregex_toggle(self, event):\r\n\r\n if self.m_fileregex_checkbox.GetValue():\r\n update_autocomplete(self.m_filematch_textbox, \"regex_file_search\", default=[\".*\"])\r\n else:\r\n update_autocomplete(self.m_filematch_textbox, \"file_search\", default=[\"*?\"])\r\n event.Skip()", "def _config_regex(self):", "def validateRegexFields(self, complete=False):\n\n # Assume the patterns aren't valid.\n self.m_validFromRe = False\n self.m_validPatterns = False\n\n ### Validate the 'from' pattern\n #\n regexCtl = self.m_reFromCtl\n subsCtl = self.m_reToCtl\n\n regex, subs = regexCtl.Value, subsCtl.Value\n\n regColor, subColor = wx.NullColour, wx.NullColour\n\n if complete and regex:\n\n regColor = subColor = wx.BLUE\n try:\n re.sub(regex, subs, '')\n except re.error as e:\n subColor = wx.RED\n try:\n re.compile(regex)\n except re.error as e:\n regColor = wx.RED\n else:\n self.m_validFromRe = True\n else:\n self.m_validFromRe = True\n self.m_validPatterns = bool(subs)\n\n self.setTextColor(regexCtl, regColor)\n self.setTextColor(subsCtl, subColor)\n\n if complete:\n self.populateFileList()\n else:\n self.m_applyBtn.Enabled = False", "def _callback(self, matcher):\n matched_field = matcher.group(self.field)\n replacement = self.lookup.get(matched_field)\n if not replacement:\n return matcher.group(0)\n\n fields = list(f or \"\" for f in matcher.groups())\n fields[self.field - 1] = replacement\n\n return \"\".join(fields)", "def name_line_edit_changed(self, text):\n if re.findall(r\"[^a-zA-Z0-9\\-_ ]+\", text):\n self.name_line_edit.set_invalid(\"Invalid character\")\n else:\n if text == \"\":\n self.name_line_edit.set_invalid(\"Enter a name\")\n else:\n self.name_line_edit.set_valid()", "def matches(self, change):\n\n return False", "def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match", "def _validate_fields(self, change_fields):\n pass", "def test_substitutions_with_regex_chars(self):\n m = strutils.MultiReplace({'cat.+': 'kedi', r'purple': 'mor', })\n self.assertEqual(m.sub('The cat.+ is purple'), 'The kedi is mor')", "def _validator_regex(self, field, value):\n try:\n re.compile(value)\n except re.error:\n self._error(field, \"{} is not a valid regex\".format(value))", "def updateFilterRegExp(self, regExp):\n self.logsView.updateFilterRegExp(regExp=regExp)", "def process_IN_MODIFY(self, event):", "def on_edit(self, event, text):\n return None", "def check_match_pattern(self):\n text = self.ui.plainTextEdit.toPlainText()\n pattern = self.ui.textPattern.text()\n result = re.search(pattern, text)\n group = int(self.ui.spinGroup.text())\n if result:\n self.ui.textMatch.setText(result.group(group))", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def integrated_address_regex(self) -> Any:", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def run(self):\n # If the change type doesn't match, do nothing.\n if not self.regex.match(self.chgtype): return 0\n\n # Perform the child actions.\n return super(FilterChgType, self).run()", "def allow_fieldtype_change(self, old_type: str, new_type: str) -> bool:\n\n\t\tdef in_field_group(group):\n\t\t\treturn (old_type in group) and (new_type in group)\n\n\t\treturn any(map(in_field_group, ALLOWED_FIELDTYPE_CHANGE))", "def on_filterEdit_textEdited(self, text):\n self.__enableFindButton()" ]
[ "0.59722745", "0.58232623", "0.5730046", "0.5724786", "0.5445473", "0.53800493", "0.53737646", "0.53433657", "0.53141534", "0.5252366", "0.52106875", "0.52057683", "0.5202676", "0.515042", "0.51475894", "0.51414645", "0.51303077", "0.5116394", "0.5103767", "0.50991905", "0.50948995", "0.5078652", "0.5069826", "0.5043185", "0.50295824", "0.5016066", "0.50088143", "0.49877837", "0.4964794", "0.49564743" ]
0.6521979
0
Returns the updated log_conf, taking into account new log files present on the instance as well as modifications made to the corresponding logentries host.
def update_instance_conf(log_paths, log_conf): log_client = LogClient.Client(account_key) instance_id, config = get_ssh_config(env.host) if log_conf is None and len(log_paths)>0: print 'log_conf is None' log_conf = create_host_logs(log_client,instance_id,log_paths) elif log_conf is not None: print 'log_conf is not None' conf_host = log_conf.get_host() if conf_host is None: print 'Error. This instance configuration is missing the corresponding model!! instance_id=%s'%instance_id logger.error('Error. This instance configuration is missing the corresponding model!! instance_id=%s',instance_id) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf if conf_host.get_key() is None: print 'Host %s has an logentries-rsyslog config file but no account key!!'%host.get_name() logger.warning('Host %s has an logentries-rsyslog config file but no account key!!',host.get_name()) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf account = log_client.get_account() matching_host = None for host in account.get_hosts(): if host.get_key() == conf_host.get_key(): matching_host = host break # If there is no matching host, then it is assumed that it was deleted from Logentries and that no configuration should be associated to this instance. if matching_host is None: log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf for new_log in get_new_logs(log_paths, log_conf): # Update matching host so that each new log becomes part of it. matching_host = log_client.create_log_token(host=matching_host,log_name=new_log) log_conf.set_host(matching_host) return log_conf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instance_log_conf(instance_id):\n # Retrieve current log config file\n log_conf_file = None\n\n filename = 'logentries_%s.conf'%instance_id\n rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename\n local_conf_name = '/tmp/%s'%filename\n \n # Clean file present\n try:\n local('rm %s'%local_conf_name)\n except:\n print 'Could not remove %s. It may not exist'%(local_conf_name)\n logger.warning('Could not remove %s. It may not exist'%(local_conf_name))\n # Get remote conf file or return None if it cannot be retrieved\n try:\n get(rsyslog_conf_name,local_conf_name)\n except:\n print '%s does not exist on instance %s'%(rsyslog_conf_name,instance_id)\n logger.warning('%s does not exist on instance %s',rsyslog_conf_name,instance_id)\n return None\n # Open conf file or return None if it cannot be opened\n try:\n log_conf_file = open(local_conf_name,'r')\n except:\n print 'Cannot open %s from instance %s'%(local_conf_name,instance_id)\n logger.warning('Cannot open %s from instance %s',local_conf_name,instance_id)\n return None\n return log_conf_file", "def get_new_logs(log_paths,log_conf):\n if log_conf is None or log_conf.get_host() is None:\n return log_paths\n conf_logs = log_conf.get_host().get_logs()\n new_logs = [log_path for log_path in log_paths if log_path not in conf_logs]\n print 'New logs detected on %s: %s'(log_conf.get_host().get_name(), new_logs)\n logger.info('New logs detected on %s: %s',log_conf.get_host().get_name(), new_logs)\n return new_logs", "def update_rally_logs(res_dir, rally_conf='/etc/rally/rally.conf'):\n if not os.path.exists(res_dir):\n os.makedirs(res_dir)\n rconfig = configparser.RawConfigParser()\n rconfig.read(rally_conf)\n rconfig.set('DEFAULT', 'debug', True)\n rconfig.set('DEFAULT', 'use_stderr', False)\n rconfig.set('DEFAULT', 'log-file', 'rally.log')\n rconfig.set('DEFAULT', 'log_dir', res_dir)\n with open(rally_conf, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def update_config(update):\n global _config\n new_config = copy.deepcopy(_config)\n _update_dict_recursive(new_config, update)\n logging.config.dictConfig(new_config)\n _configure_ulog_bridge()\n _config = new_config", "def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))", "def update_log_config(self, monitor_name, log_config):\n pass", "def get_log_config(conf_file: str):\n with open(conf_file, 'r') as c:\n config = json.load(c)\n if not os.path.exists('log'):\n os.mkdir('log')\n logging.config.dictConfig(config)\n # disable urllib3 DEBUG messages\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)", "def merge_config(log_conf: LogConf, conf: Config) -> Config:\n #pylint: disable=too-many-locals\n\n name = conf.name # take individual conf value, ignore common log_conf value\n filename = _ITEM_OR_DEFAULT(log_conf.filename, conf.filename)\n logger_level = _ITEM_OR_DEFAULT(log_conf.logger_level, conf.logger_level)\n log_fmt = _ITEM_OR_DEFAULT(log_conf.log_fmt, conf.log_fmt)\n log_datefmt = _ITEM_OR_DEFAULT(log_conf.log_datefmt, conf.log_datefmt)\n log_level = _ITEM_OR_DEFAULT(log_conf.log_level, conf.log_level)\n log_enabled = _ITEM_OR_DEFAULT(log_conf.log_enabled, conf.log_enabled)\n cout_fmt = _ITEM_OR_DEFAULT(log_conf.cout_fmt, conf.cout_fmt)\n cout_datefmt = _ITEM_OR_DEFAULT(log_conf.cout_datefmt, conf.cout_datefmt)\n cout_level = _ITEM_OR_DEFAULT(log_conf.cout_level, conf.cout_level)\n cout_enabled = _ITEM_OR_DEFAULT(log_conf.cout_enabled, conf.cout_enabled)\n propagate = _ITEM_OR_DEFAULT(log_conf.propagate, conf.propagate)\n log_dir = _ITEM_OR_DEFAULT(log_conf.log_dir, conf.log_dir)\n sub_dir = _ITEM_OR_DEFAULT(log_conf.sub_dir, conf.sub_dir)\n override_allowed = conf.override_allowed # take individual conf value, ignore common log_conf value\n\n n_conf: Config = Config(name, filename, logger_level, log_fmt, log_datefmt, log_level, log_enabled, cout_fmt,\n cout_datefmt, cout_level, cout_enabled, propagate, log_dir, sub_dir, override_allowed)\n\n return n_conf", "def get_hash_log_curr(self):\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n try:\n log = open(self.log_path + r'\\hash_log.txt', 'U')\n #first line is header, skip\n log.readline()\n for line in log:\n try:\n line = line.replace('\\n','')\n # log maintenance. only keep number of days designated\n line = line.split('|')\n if len(line) != 6:\n raise Exception\n if line[4] > self.log_cut_off_date:\n self.hash_log_curr[line[2]] = line\n except:\n self.print_to_log('Bad log Line: ' + str(line))\n self.print_to_log('Hash Log read Successfully')\n except IOError:\n self.print_to_log('No log found')\n self.hash_log_curr = None\n except IndexError:\n self.print_to_log('Bad Log File')\n raise\n except:\n self.print_to_log('Unknown Error, Exiting ')\n raise", "def conf_update(self):\n pass", "def log_config(self) -> 'outputs.ConnectorsLogConfigResponse':\n return pulumi.get(self, \"log_config\")", "def set_rsyslog_old_configuration():\n add_udp = False\n add_tcp = False\n # Do the configuration lines exist\n is_exist_udp_conf = False\n is_exist_tcp_conf = False\n with open(rsyslog_conf_path, \"rt\") as fin:\n for line in fin:\n if \"imudp\" in line or \"UDPServerRun\" in line:\n is_exist_udp_conf = True\n add_udp = True if \"#\" in line else False\n elif \"imtcp\" in line or \"InputTCPServerRun\" in line:\n is_exist_tcp_conf = True\n add_tcp = True if \"#\" in line else False\n fin.close()\n if add_udp or not is_exist_udp_conf:\n append_content_to_file(rsyslog_old_config_udp_content, rsyslog_conf_path)\n if add_tcp or not is_exist_tcp_conf:\n append_content_to_file(rsyslog_old_config_tcp_content, rsyslog_conf_path)\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def _load_config_log(self):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n if not os.path.isfile(config_path):\n return {}\n with open(config_path, 'r') as f:\n data = yaml.load(f)\n return data", "def update_log_files(self, control_log, helper_log, server_log):\n if control_log is not None:\n self.control_log_file.update(\n control_log, \"server_config.control_log_file\")\n if helper_log is not None:\n self.helper_log_file.update(\n helper_log, \"server_config.helper_log_file\")\n if server_log is not None:\n for index, server_params in enumerate(self.server_params):\n log_name = list(os.path.splitext(server_log))\n if len(self.server_params) > 1:\n log_name.insert(1, \"_{}\".format(index))\n server_params.log_file.update(\n \"\".join(log_name),\n \"server_config.server[{}].log_file\".format(index))", "def get_logger_config(log_dir,\r\n logging_env=\"no_env\",\r\n tracking_filename=\"tracking.log\",\r\n edx_filename=\"edx.log\",\r\n dev_env=False,\r\n syslog_addr=None,\r\n debug=False,\r\n local_loglevel='INFO',\r\n console_loglevel=None,\r\n service_variant=None):\r\n\r\n # Revert to INFO if an invalid string is passed in\r\n if local_loglevel not in LOG_LEVELS:\r\n local_loglevel = 'INFO'\r\n\r\n if console_loglevel is None or console_loglevel not in LOG_LEVELS:\r\n console_loglevel = 'DEBUG' if debug else 'INFO'\r\n\r\n if service_variant is None:\r\n # default to a blank string so that if SERVICE_VARIANT is not\r\n # set we will not log to a sub directory\r\n service_variant = ''\r\n\r\n hostname = platform.node().split(\".\")[0]\r\n syslog_format = (\"[service_variant={service_variant}]\"\r\n \"[%(name)s][env:{logging_env}] %(levelname)s \"\r\n \"[{hostname} %(process)d] [%(filename)s:%(lineno)d] \"\r\n \"- %(message)s\").format(service_variant=service_variant,\r\n logging_env=logging_env,\r\n hostname=hostname)\r\n\r\n handlers = ['console', 'local'] if debug else ['console',\r\n 'syslogger-remote', 'local']\r\n\r\n logger_config = {\r\n 'version': 1,\r\n 'disable_existing_loggers': False,\r\n 'formatters': {\r\n 'standard': {\r\n 'format': '%(asctime)s %(levelname)s %(process)d '\r\n '[%(name)s] %(filename)s:%(lineno)d - %(message)s',\r\n },\r\n 'syslog_format': {'format': syslog_format},\r\n 'raw': {'format': '%(message)s'},\r\n },\r\n 'handlers': {\r\n 'console': {\r\n 'level': console_loglevel,\r\n 'class': 'logging.StreamHandler',\r\n 'formatter': 'standard',\r\n 'stream': sys.stderr,\r\n },\r\n 'syslogger-remote': {\r\n 'level': 'INFO',\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': syslog_addr,\r\n 'formatter': 'syslog_format',\r\n },\r\n 'newrelic': {\r\n 'level': 'ERROR',\r\n 'class': 'lms.lib.newrelic_logging.NewRelicHandler',\r\n 'formatter': 'raw',\r\n }\r\n },\r\n 'loggers': {\r\n 'tracking': {\r\n 'handlers': ['tracking'],\r\n 'level': 'DEBUG',\r\n 'propagate': False,\r\n },\r\n '': {\r\n 'handlers': handlers,\r\n 'level': 'DEBUG',\r\n 'propagate': False\r\n },\r\n }\r\n }\r\n\r\n if dev_env:\r\n tracking_file_loc = os.path.join(log_dir, tracking_filename)\r\n edx_file_loc = os.path.join(log_dir, edx_filename)\r\n logger_config['handlers'].update({\r\n 'local': {\r\n 'class': 'logging.handlers.RotatingFileHandler',\r\n 'level': local_loglevel,\r\n 'formatter': 'standard',\r\n 'filename': edx_file_loc,\r\n 'maxBytes': 1024 * 1024 * 2,\r\n 'backupCount': 5,\r\n },\r\n 'tracking': {\r\n 'level': 'DEBUG',\r\n 'class': 'logging.handlers.RotatingFileHandler',\r\n 'filename': tracking_file_loc,\r\n 'formatter': 'raw',\r\n 'maxBytes': 1024 * 1024 * 2,\r\n 'backupCount': 5,\r\n },\r\n })\r\n else:\r\n # for production environments we will only\r\n # log INFO and up\r\n logger_config['loggers']['']['level'] = 'INFO'\r\n logger_config['handlers'].update({\r\n 'local': {\r\n 'level': local_loglevel,\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': '/dev/log',\r\n 'formatter': 'syslog_format',\r\n 'facility': SysLogHandler.LOG_LOCAL0,\r\n },\r\n 'tracking': {\r\n 'level': 'DEBUG',\r\n 'class': 'logging.handlers.SysLogHandler',\r\n 'address': '/dev/log',\r\n 'facility': SysLogHandler.LOG_LOCAL1,\r\n 'formatter': 'raw',\r\n },\r\n })\r\n\r\n return logger_config", "def sanitize_new_config(self):\n config_log = self._load_config_log()\n if 'new' in config_log:\n for cfg in config_log['new']:\n with open(cfg, 'r+') as f:\n data = yaml.load(f)\n f.seek(0)\n yaml.safe_dump(data, f, default_flow_style=False)\n f.truncate()\n del config_log['new']\n\n self._save_config_log(config_log)", "def getLogs():", "def getLogs():", "def _config_log(self):\n config_worker = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'handlers': {\n 'queue': {\n 'class': 'hqc_meas.utils.log.tools.QueueHandler',\n 'queue': self.log_queue,\n },\n },\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['queue']\n },\n }\n logging.config.dictConfig(config_worker)", "def logging_config(self) -> 'outputs.LoggingConfigResponse':\n return pulumi.get(self, \"logging_config\")", "def logging(self):\n conf = self.get(\"logging\")\n level = conf[\"level\"]\n if os.environ.get(\"DEBUG_FG21SIM\"):\n print(\"DEBUG: Force 'DEBUG' logging level\", file=sys.stderr)\n level = \"DEBUG\"\n # logging handlers\n handlers = []\n stream = conf[\"stream\"]\n if stream:\n handlers.append(StreamHandler(getattr(sys, stream)))\n logfile = conf[\"filename\"]\n filemode = conf[\"filemode\"]\n if logfile:\n handlers.append(FileHandler(logfile, mode=filemode))\n #\n logconf = {\n \"level\": getattr(logging, level),\n \"format\": conf[\"format\"],\n \"datefmt\": conf[\"datefmt\"],\n \"filemode\": filemode,\n \"handlers\": handlers,\n }\n return logconf", "def config_logger( self, ):\r\n logger = logging.getLogger( self.logger_id )\r\n\r\n logger.handlers = []\r\n logger.setLevel( self.parameters.logging_level ) # DEBUG , INFO WARNING ERROR CRITICAL\r\n\r\n # create the logging file handler.....\r\n fh = logging.FileHandler( self.parameters.pylogging_fn )\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fh.setFormatter( formatter )\r\n logger.addHandler( fh )\r\n\r\n msg = \"Done config_logger\"\r\n print( msg )\r\n logger.info( msg ) # .debug .info .warn .error\r\n AppGlobal.set_logger( logger )\r\n\r\n return logger", "def __init__(self):\n# ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG_FILENAME = os.path.join(ROOT_DIR,__config__)\n self.CONFIG.read(self.CONFIG_FILENAME)\n\n self.LISTEN_IP = self.CONFIG.get('server', 'ip') if self.CONFIG.has_option('server', 'ip') else '0.0.0.0'\n self.LISTEN_PORT = self.CONFIG.getint('server', 'port') if self.CONFIG.has_option('server', 'port') else 6210\n \n self.LOG_DIR = self.CONFIG.get('log','dir') if self.CONFIG.has_option('log', 'dir') else 'log'\n if self.LOG_DIR.find(':') == -1 and not self.LOG_DIR.startswith('/'):\n self.LOG_DIR = os.path.join(ROOT_DIR,self.LOG_DIR)\n \n self.LOG_NAME = self.CONFIG.get('log','name') if self.CONFIG.has_option('log', 'name') else 'test.log'\n self.MAXFILESIZE = self.CONFIG.get('log','maxfilesize') if self.CONFIG.has_option('log', 'maxfilesize') else '20*1024*1024'\n self.MAXFILESIZE = eval(self.MAXFILESIZE)\n self.MAXBACKUP = self.CONFIG.getint('log','maxbackup') if self.CONFIG.has_option('log', 'maxbackup') else 20\n self.SEPARATOR = self.CONFIG.get('log','separator') if self.CONFIG.has_option('log', 'separator') else '\\\\n'\n self.SEPARATOR = self.SEPARATOR.strip().replace('\\'','').replace(\"\\\"\",'')\n self.FORMAT = self.CONFIG.get('log','format') if self.CONFIG.has_option('log','format') else '%(message)s'\n self.BUFSIZE = self.CONFIG.get('log','bufsize') if self.CONFIG.has_option('log','bufsize') else '1024*4'\n self.BUFSIZE = eval(self.BUFSIZE) \n self.SEPARATOR = eval('\\''+self.SEPARATOR+'\\'')\n self.DELETELOG = self.CONFIG.getint('log','cleanlog') if self.CONFIG.has_option('log','cleanlog') else 0\n if self.DELETELOG and os.path.exists(self.LOG_DIR):\n try:\n shutil.rmtree(self.LOG_DIR)\n except:\n print('remove dir error')\n self.LOG_DIR = os.path.join(self.LOG_DIR,datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n print(self.LOG_DIR)\n self.STDSHOW = self.CONFIG.getint('std','show') if self.CONFIG.has_option('std', 'show') else 0", "def config_logger(log_cfg_file, experiment_name=None, output_dir='logs'):\n timestr = time.strftime(\"%Y.%m.%d-%H%M%S\")\n exp_full_name = timestr if experiment_name is None else experiment_name + '___' + timestr\n logdir = os.path.join(output_dir, exp_full_name)\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n log_filename = os.path.join(logdir, exp_full_name + '.log')\n if os.path.isfile(log_cfg_file):\n logging.config.fileConfig(log_cfg_file, defaults={'logfilename': log_filename})\n msglogger = logging.getLogger()\n msglogger.logdir = logdir\n msglogger.log_filename = log_filename\n msglogger.info('Log file for this run: ' + os.path.realpath(log_filename))\n\n # Create a symbollic link to the last log file created (for easier access)\n try:\n os.unlink(\"latest_log_file\")\n except FileNotFoundError:\n pass\n try:\n os.unlink(\"latest_log_dir\")\n except FileNotFoundError:\n pass\n try:\n os.symlink(logdir, \"latest_log_dir\")\n os.symlink(log_filename, \"latest_log_file\")\n except OSError:\n msglogger.debug(\"Failed to create symlinks to latest logs\")\n return msglogger", "def _get_logs(self):\n logstart = self.LOGSTART%(self.session.uuid, self.session.run_counter)\n logend = self.LOGEND%(self.session.uuid, self.session.run_counter)\n log = self.container.logs().decode('UTF-8')\n while log.find(logstart) == -1 or log.find(logend) == -1:\n log = self.container.logs().decode('UTF-8')\n cleaned_log = self._get_cleaned_logs(log, logstart, logend)\n self.session.run_counter = self.session.run_counter + 1\n self.session.save()\n return cleaned_log", "def logging_conf_dict(mocker: MockerFixture) -> dict:\n return mocker.patch.dict(logging_conf_module.LOGGING_CONFIG)", "async def new_guild_log(guild_config_dict: dict) -> \"GuildEventLog\":\n try:\n guild_log = GuildEventLog(guild_config_dict)\n\n # Retrieve server channels\n guild_log.approval_channel = guild_config_dict[\"approval_channel\"]\n guild_log.calendar_channel = guild_config_dict[\"calendar_channel\"]\n\n # Get staff role\n guild_log.staff_role = guild_config_dict[\"staff_role\"]\n\n # Parse events and edits\n guild_log.approval_events = guild_config_dict[\"approval_events\"]\n guild_log.upcoming_events = guild_config_dict[\"upcoming_events\"]\n guild_log.ongoing_events = guild_config_dict[\"ongoing_events\"]\n guild_log.approval_edits = guild_config_dict[\"approval_edits\"]\n\n return guild_log\n\n except (\n GuildEventInvalidConfig, EventLoadError, KeyError, ValueError\n ) as e:\n raise GuildEventInvalidConfig from e", "def last_update(self):\n # get modification time of QWC2 themes config file\n config_updated_at = None\n if os.path.isfile(self.themes_config_path):\n config_updated_at = datetime.utcfromtimestamp(\n os.path.getmtime(self.themes_config_path)\n )\n\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query timestamp\n LastUpdate = self.config_models.model('last_update')\n query = session.query(LastUpdate.updated_at)\n last_update = query.first()\n if last_update is not None:\n if config_updated_at is not None:\n # use latest of both timestamps\n updated_at = max(last_update.updated_at, config_updated_at)\n else:\n # use timestamp from ConfigDB\n updated_at = last_update.updated_at\n else:\n # no entry in ConfigDB, use config timestamp or now\n updated_at = config_updated_at or datetime.utcnow()\n\n # close session\n session.close()\n\n return {\n 'permissions_updated_at': updated_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n }", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()" ]
[ "0.7078796", "0.6454262", "0.6153468", "0.609547", "0.6065723", "0.6051836", "0.60207915", "0.6019159", "0.59815073", "0.59241617", "0.5814336", "0.58045876", "0.5802499", "0.5684129", "0.56608063", "0.5597701", "0.5580219", "0.55438983", "0.55438983", "0.54896456", "0.5426725", "0.5403126", "0.53940386", "0.5368825", "0.5354504", "0.5310601", "0.5284269", "0.52640504", "0.5241901", "0.5240994" ]
0.77249867
0
Given a distribution, given by the list p_list, returns the entropy of the distribution.
def entropy(p_list): assert len(p_list) > 0 E = 0.0 for p in p_list: if p == 0.0: continue E += p*math.log(p) return E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(l):\n\n probabilities = np.bincount(l) / len(l)\n with np.errstate(divide='ignore'): # ignore log(0) errors, we'll handle\n log_probabilities = np.log2(probabilities)\n log_probabilities[~np.isfinite(log_probabilities)] = 0\n return -np.sum(probabilities * log_probabilities)", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def entropy_py(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))", "def entropy(p):\n assert (p >= 0).all()\n assert abs(np.sum(p)-1) < 1e-6\n return -np.sum(p*np.log(p+1e-12))", "def calEntropy(vList):\n from collections import Counter\n counter = Counter(vList)\n entropy, N = 0, len(vList)\n for v in counter:\n p = counter[v] / N\n entropy += - p * np.log(p)\n return entropy", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def entropy(p: torch.Tensor):\n nz = (p > 0).to(p.device)\n\n eps = torch.finfo(p.dtype).eps\n p_stable = p.clone().clamp(min=eps, max=1 - eps)\n\n out = torch.where(\n nz,\n p_stable * torch.log(p_stable),\n torch.tensor(0.0, device=p.device, dtype=torch.float),\n )\n\n return -(out).sum(-1)", "def entropy_(P):\n res = 0.0\n\n mask = P != 0.0 # avoid 0 in log\n f = lambda x: x*np.log2(x)\n # map-reduce strategy (likely to be more optimized than loops)\n temp = list(map(f, P[mask]))\n res = -np.sum(temp, dtype=float)\n return res", "def entropy(data, idxList):\n df = data.loc[idxList]\n counts = df.value_counts().to_numpy()\n counts = counts.reshape(1, -1).astype(np.float32)\n counts /= np.sum(counts)\n log_sum = counts @ np.log2(counts.T)\n return -log_sum[0, 0]", "def entropy(p):\n ent = tf.where(p > np.finfo(np.float32).eps, -p * tf.log(p), tf.zeros_like(p))\n ent = tf.reduce_sum(ent, axis=1)\n return ent", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def entropy(strength=256, wordlist=wordlist):\n return os.urandom(strength // 8)", "def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy", "def entropy_numba(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))", "def entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))", "def entropy(self, policy_params):\n return self.head.entropy(policy_params)", "def entropy(group_counts):\n total = sum(group_counts)\n entro = 0\n for item_count in group_counts:\n entro += item_entropy(item_count, total)\n return entro", "def entropy(self, priors=None):\n def entropy_f(x):\n x[x != 0] *= np.log(x[x != 0])\n return -x.sum(axis=0)\n return self.utility(entropy_f, priors)", "def getEntropy(self, pVal, nVal):\n totVal = pVal + nVal\n if pVal == 0 or nVal == 0:\n return 0\n\n pProb = pVal/totVal\n nProb = 1 - pProb\n entropy = - (pProb * math.log(pProb, 2) + nProb * math.log(nProb, 2))\n return entropy", "def entropy_function(c, n):\n return -(c*1.0/n)*math.log(c*1.0/n,2)", "def entropy(self, args):\n mean, stddev = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n entropy = dist.entropy()\n return entropy", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def __compute_entropy_probability(probability:np.ndarray) -> float:\n entropy = -np.sum(probability * np.log2(probability))\n return entropy", "def entropy(counts):\n assert (counts >= 0).all()\n probs = counts / counts.sum()\n probs = probs[probs > 0] # Avoid log(0)\n return - np.sum(probs * np.log2(probs))", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def entropy(class_probabilities):\n return sum(-p * math.log(p,2)\n for p in class_probabilities\n if p)" ]
[ "0.75446194", "0.73901826", "0.7331276", "0.7134225", "0.70520824", "0.7050444", "0.69640076", "0.6912087", "0.68127155", "0.68039405", "0.6800982", "0.6751854", "0.6749378", "0.6710503", "0.6699265", "0.6682273", "0.6634976", "0.6633226", "0.6585759", "0.6569177", "0.6564183", "0.6562543", "0.6555286", "0.6552651", "0.654478", "0.651475", "0.6499428", "0.6495497", "0.6492644", "0.64897996" ]
0.83441865
0
For a list of dictionaries mapping values to counts, returns a cost used for DT splitting that is optimal at 0. Currently uses the negative of information gain.
def split_cost(label_count_list): return -split_information_gain(label_count_list) #this cost value is the misclassification error. return split_misclassification_error(label_count_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def weighted_score(counters, lst, weight):\n if counters == None:\n counters = {}\n\n\n for item in lst:\n if item in counters:\n counters[item] += weight\n else:\n counters[item] = weight\n\n return counters", "def cost_fun(self, specs_dict: Dict[str, float]) -> float:\n cost = 0\n for spec in self.spec_range.keys():\n penalty = self.compute_penalty(specs_dict[spec], spec)[0]\n cost += penalty\n\n return cost", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def greedy(items, maxCost, keyFunction):\n result = []\n itemsCopy = sorted(items, key=keyFunction, reverse=True)\n totalValue , totalCalories = 0.0, 0.0\n for i in range(len(itemsCopy)):\n item = itemsCopy[i]\n if (totalCalories + item.getCalories()) <= maxCost:\n result.append(item)\n totalCalories += item.getCalories()\n totalValue += item.getValue()\n return result, totalValue", "def cost(self) -> float:", "def score(priority_list, totalItemCount, itemUsageDict, threshold):\n scored = list()\n for item in priority_list:\n scored.append((item, itemUsageDict[item][\"winRatio\"] * (itemUsageDict[item][\"totalCount\"]/ totalItemCount) * threshold))\n return scored", "def getCost(dat, rateBlocks, key=\"Lintel\"):\n\n x = dat[:]\n\n if key == \"Lintel\":\n edges = [s*100 for s in [5, 6, 7, 10,\n 11, 12, 15, 16, 17, 20, 21, 22, 25]]\n else:\n edges = [s*100 for s in [10, 11, 12, 15, 16, 17, 20,\n 21, 22, 25, 26, 27, 30, 31, 32, 35, 36, 37, 40]]\n for i in edges:\n if i >= x[2]:\n x[2] = i\n break\n\n vol = x[0]*600*x[2]/float(1000000000)\n return vol*rateBlocks # *x[3]", "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def segmentDict(dict, weights):\n # Normalize weights\n weights = normalize(weights)\n\n segments = {}\n actual_weights = []\n total_instances = 0\n percent_instances = 0\n i = 0\n cat = None\n\n for k,v in dict.items():\n total_instances += v\n if cat == None:\n cat = k[0].upper()\n\n sorted_d = sorted(dict.items(), key=operator.itemgetter(1), reverse=True)\n for k,v in sorted_d:\n percent_instances += v/total_instances\n segments[k] = cat + str(i)\n if percent_instances >= weights[i]:\n actual_weights += [percent_instances]\n percent_instances = 0\n i += 1\n actual_weights += [percent_instances]\n return [segments, actual_weights]", "def best_split(values,labels,nonelabels=None):\n assert len(values) >= 2\n assert len(values) == len(labels)\n N = len(values)\n ilist = sorted((v,l) for (v,l) in zip(values,labels))\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for v,l in ilist:\n rightcount[l] += 1\n bestindex = -1\n bestcost = split_cost([leftcount,rightcount])\n\n cost = bestcost\n #costs = [cost]\n #print \"Split costs:\"\n for i in xrange(len(ilist)):\n v,l = ilist[i]\n rightcount[l] -= 1\n leftcount[l] += 1\n if i+1 >= len(ilist) or v == ilist[i+1][0]:\n #no splits when v is equal to the next value\n continue\n cost = split_cost([leftcount,rightcount])\n #print \" \",v,leftcount.values(),rightcount.values(),cost\n #costs.append(cost)\n if cost < bestcost:\n bestcost = cost\n bestindex = i\n #raw_input()\n if bestindex < 0:\n #no split found... try splitting in half\n splitval = (ilist[0][0]+ilist[-1][0])*0.5\n else:\n splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5\n if nonelabels is None:\n return (splitval,bestcost)\n #reevaluate counts\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for l in nonelabels:\n leftcount[l] += 1\n rightcount[l] += 1\n for v,l in ilist:\n if v <= splitval:\n leftcount[l] += 1\n else:\n rightcount[l] += 1\n return splitval,split_cost([leftcount,rightcount])", "def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)", "def compute_tf(doc_info, freq_dict_all):\n tf_scores = []\n\n for temp_dict in freq_dict_all:\n id = temp_dict['doc_id']\n\n for k in temp_dict['freq_dict']:\n temp = {\n 'doc_id': id,\n 'TF_Score': temp_dict['freq_dict'][k] / doc_info[id - 1]['doc_length'],\n 'key': k\n }\n\n tf_scores.append(temp)\n\n return tf_scores", "def compute_cost_clarans(data, _cur_choice):\n # modified from that of CLARA\n total_cost = 0.0\n medoids = {}\n for idx in _cur_choice:\n medoids[idx] = []\n\n for i in list(data.index):\n choice = -1\n min_cost = np.inf\n for m in medoids:\n # fast_euclidean from CLARA\n tmp = np.linalg.norm(data.loc[m] - data.loc[i])\n if tmp < min_cost:\n choice = m\n min_cost = tmp\n\n medoids[choice].append(i)\n total_cost += min_cost\n # print(\"total_cost: \", total_cost)\n return total_cost, medoids", "def get_duty_cate_score(chosen_duty_list: list) -> pmag.MagicDict:\n res = pmag.MagicDict()\n for w, cate in chosen_duty_list:\n freq = MODEL[cate]['duty'][w]['freq']\n prob = MODEL[cate]['duty'][w]['prob']\n score = prob # freq * prob / DUTY_NF[cate]\n if cate in res:\n res[cate] += score\n else:\n res[cate] = score\n return res", "def sim_average_cost(self, dictionary):\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Running: sim_average_cost\"\n\t\tf = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_estimated_costs.csv', 'a')\n\n\t\tfor p in toggles.CHOSEN_PREDS:\n\t\t\tpred_cost = 0.0\n\t\t\tpred = Predicate.objects.all().get(pk=p+1)\n\t\t\tf.write(pred.question.question_text + '\\n')\n\n\t\t\t#iterate through to find each ip cost\n\t\t\tfor ip in IP_Pair.objects.filter(predicate=pred):\n\t\t\t\titem_cost = 0.0\n\t\t\t\t# sample toggles.COST_SAMPLES times\n\t\t\t\tfor x in range(toggles.COST_SAMPLES):\n\t\t\t\t\t# running one sampling\n\t\t\t\t\twhile ip.status_votes < toggles.NUM_CERTAIN_VOTES:\n\t\t\t\t\t\t# get the vote\n\t\t\t\t\t\tvalue = choice(dictionary[ip])\n\t\t\t\t\t\tif value == True:\n\t\t\t\t\t\t\tip.value += 1\n\t\t\t\t\t\t\tip.num_yes += 1\n\t\t\t\t\t\telif value == False:\n\t\t\t\t\t\t\tip.value -= 1\n\t\t\t\t\t\t\tip.num_no +=1\n\n\t\t\t\t\t\tip.status_votes += 1\n\n\t\t\t\t\t\t# check if ip is done\n\t\t\t\t\t\tif ip.status_votes == toggles.NUM_CERTAIN_VOTES:\n\t\t\t\t\t\t\t\tif ip.value > 0:\n\t\t\t\t\t\t\t\t\tuncertaintyLevel = btdtr(ip.num_yes+1, ip.num_no+1, toggles.DECISION_THRESHOLD)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tuncertaintyLevel = btdtr(ip.num_no+1, ip.num_yes+1, toggles.DECISION_THRESHOLD)\n\t\t\t\t\t\t\t\tif uncertaintyLevel < toggles.UNCERTAINTY_THRESHOLD:\n\t\t\t\t\t\t\t\t\titem_cost += (ip.num_yes + ip.num_no)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tip.status_votes -= 2\n\n\t\t\t\t\t# reset values\n\t\t\t\t\tip.value = 0\n\t\t\t\t\tip.num_yes = 0\n\t\t\t\t\tip.num_no = 0\n\t\t\t\t\tip.status_votes = 0\n\n\t\t\t\titem_cost = item_cost/float(toggles.COST_SAMPLES)\n\t\t\t\tpred_cost += item_cost\n\t\t\t\tf.write(ip.item.name + ': ' + str(item_cost) + \" \")\n\n\t\t\tpred_cost = float(pred_cost)/IP_Pair.objects.filter(predicate=pred).count()\n\t\t\tf.write('\\npredicate average cost: ' + str(pred_cost) + '\\n \\n')\n\t\tf.close()\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_estimated_costs.csv'", "def set_class_distribution(ub_dict, percentage_dict, name):\n tot_percent = 0\n for x in percentage_dict:\n tot_percent += percentage_dict[x]\n label_ctr_dict = defaultdict(int)\n for x in ub_dict['labels']:\n label_ctr_dict[x] += 1\n \n if abs(tot_percent - 1) > 1e-15:\n sys.exit(\"Total percentages != 1\")\n if len(ub_dict['meta_data'][0]) != len(percentage_dict):\n sys.exit(\"Mismatch between expected and given number of classes\")\n if set(ub_dict['meta_data'][0]) != set(percentage_dict):\n sys.exit(\"Mismatch between classes given and those expected\")\n\n batch_size = int(min([label_ctr_dict[x]/percentage_dict[x] for x in percentage_dict]))\n class_trgt_distrib = {x: int(batch_size*percentage_dict[x]) for x in percentage_dict}\n class_actual_distrib = {x: 0 for x in percentage_dict}\n tot_trgts = sum([class_trgt_distrib[x] for x in class_trgt_distrib])\n if tot_trgts < batch_size:\n key, val = min(class_trgt_distrib.iteritems(), key=operator.itemgetter(1))\n class_trgt_distrib[key] += (batch_size - tot_trgts)\n\n tot_rows = batch_size\n\n bal_dict = dict()\n bal_data = np.zeros((tot_rows, 3072), dtype=ub_dict['data'].dtype)\n bal_labels = [0] * tot_rows\n bal_filenames = [\"\"] * tot_rows\n\n bal_ctr = 0\n for idx in range(len(ub_dict['labels'])):\n curr_label = ub_dict['labels'][idx]\n if class_actual_distrib[curr_label] < class_trgt_distrib[curr_label]:\n bal_data[bal_ctr, :] = ub_dict['data'][idx, :]\n bal_labels[bal_ctr] = ub_dict['labels'][idx]\n bal_filenames[bal_ctr] = ub_dict['filenames'][idx]\n \n bal_ctr += 1\n class_actual_distrib[curr_label] += 1\n\n bal_dict['data'] = bal_data\n bal_dict['labels'] = bal_labels\n bal_dict['filenames'] = bal_filenames\n bal_dict['name'] = name\n bal_dict['src_meta_data'] = ub_dict['meta_data']\n\n return bal_dict", "def calc_batter_z_score(batter_list, players_over_zero_dollars, one_dollar_players,\n dollar_per_fvaaz, player_pool_multiplier, add_original_value=False):\n player_pool = int(players_over_zero_dollars * player_pool_multiplier)\n # Standard Calculations\n run_list = []\n hr_list = []\n rbi_list = []\n sb_list = []\n ops_list = []\n avg_list = []\n # weighted_batter_list = []\n batter_dict_list = []\n if not isinstance(batter_list[0], dict):\n for batter in batter_list:\n b = model_to_dict(batter)\n batter_dict_list.append(b)\n else:\n batter_dict_list = batter_list\n for batter in batter_dict_list:\n if add_original_value:\n batter['original_value'] = batter['dollarValue']\n\n run_list.append(batter['r'])\n hr_list.append(batter['hr'])\n rbi_list.append(batter['rbi'])\n sb_list.append(batter['sb'])\n ops_list.append(batter['ops'])\n avg_list.append(batter['avg'])\n run_list_nlargest = heapq.nlargest(player_pool, run_list)\n hr_list_nlargest = heapq.nlargest(player_pool, hr_list)\n rbi_list_nlargest = heapq.nlargest(player_pool, rbi_list)\n sb_list_nlargest = heapq.nlargest(player_pool, sb_list)\n ops_list_nlargest = heapq.nlargest(player_pool, ops_list)\n avg_list_nlargest = heapq.nlargest(player_pool, avg_list)\n # Average Calculation\n r_avg = avg_calc(run_list_nlargest)\n hr_avg = avg_calc(hr_list_nlargest)\n rbi_avg = avg_calc(rbi_list_nlargest)\n sb_avg = avg_calc(sb_list_nlargest)\n ops_avg = avg_calc(ops_list_nlargest)\n avg_avg = avg_calc(avg_list_nlargest)\n # Standard Deviation Calculation\n r_std_dev = std_dev_calc(run_list_nlargest, r_avg)\n hr_std_dev = std_dev_calc(hr_list_nlargest, hr_avg)\n rbi_std_dev = std_dev_calc(rbi_list_nlargest, rbi_avg)\n sb_std_dev = std_dev_calc(sb_list_nlargest, sb_avg)\n ops_std_dev = std_dev_calc(ops_list_nlargest, ops_avg)\n avg_std_dev = std_dev_calc(avg_list_nlargest, avg_avg)\n # zScore Calculation\n for batter in batter_dict_list:\n batter['zScoreR'] = z_score_calc(batter['r'], r_avg, r_std_dev)\n batter['weightedR'] = batter['zScoreR'] * float(batter['ab'])\n batter['zScoreHr'] = z_score_calc(batter['hr'], hr_avg, hr_std_dev)\n batter['weightedHr'] = batter['zScoreHr'] * float(batter['ab'])\n batter['zScoreRbi'] = z_score_calc(batter['rbi'], rbi_avg, rbi_std_dev)\n batter['weightedRbi'] = batter['zScoreRbi'] * float(batter['ab'])\n batter['zScoreSb'] = z_score_calc(batter['sb'], sb_avg, sb_std_dev)\n batter['weightedSb'] = batter['zScoreSb'] * float(batter['ab'])\n batter['zScoreOps'] = z_score_calc(batter['ops'], ops_avg, ops_std_dev)\n batter['weightedOps'] = batter['zScoreOps'] * float(batter['ab'])\n batter['zScoreAvg'] = z_score_calc(batter['avg'], ops_avg, ops_std_dev)\n batter['weightedAvg'] = batter['zScoreAvg'] * float(batter['ab'])\n # weighted_batter_list.append(batter)\n # Weighted Calculations\n weighted_run_list = []\n weighted_hr_list = []\n weighted_rbi_list = []\n weighted_sb_list = []\n weighted_ops_list = []\n weighted_avg_list = []\n # for batter in weighted_batter_list:\n for batter in batter_dict_list:\n weighted_run_list.append(batter['weightedR'])\n weighted_hr_list.append(batter['weightedHr'])\n weighted_rbi_list.append(batter['weightedRbi'])\n weighted_sb_list.append(batter['weightedSb'])\n weighted_ops_list.append(batter['weightedOps'])\n weighted_avg_list.append(batter['weightedOps'])\n weighted_run_list_nlargest = heapq.nlargest(player_pool, weighted_run_list)\n weighted_hr_list_nlargest = heapq.nlargest(player_pool, weighted_hr_list)\n weighted_rbi_list_nlargest = heapq.nlargest(player_pool, weighted_rbi_list)\n weighted_sb_list_nlargest = heapq.nlargest(player_pool, weighted_sb_list)\n weighted_ops_list_nlargest = heapq.nlargest(player_pool, weighted_ops_list)\n weighted_avg_list_nlargest = heapq.nlargest(player_pool, weighted_avg_list)\n # Weighted Average Calculation\n weighted_r_avg = avg_calc(weighted_run_list_nlargest)\n weighted_hr_avg = avg_calc(weighted_hr_list_nlargest)\n weighted_rbi_avg = avg_calc(weighted_rbi_list_nlargest)\n weighted_sb_avg = avg_calc(weighted_sb_list_nlargest)\n weighted_ops_avg = avg_calc(weighted_ops_list_nlargest)\n weighted_avg_avg = avg_calc(weighted_avg_list_nlargest)\n # Weighted Standard Deviation Calculation\n weighted_r_std_dev = std_dev_calc(weighted_run_list_nlargest, weighted_r_avg)\n weighted_hr_std_dev = std_dev_calc(weighted_hr_list_nlargest, weighted_hr_avg)\n weighted_rbi_std_dev = std_dev_calc(weighted_rbi_list_nlargest, weighted_rbi_avg)\n weighted_sb_std_dev = std_dev_calc(weighted_sb_list_nlargest, weighted_sb_avg)\n weighted_ops_std_dev = std_dev_calc(weighted_ops_list_nlargest, weighted_ops_avg)\n weighted_avg_std_dev = std_dev_calc(weighted_avg_list_nlargest, weighted_avg_avg)\n # Weighted zScore Calculation\n for batter in batter_dict_list:\n batter['weightedZscoreR'] = z_score_calc(batter['weightedR'], weighted_r_avg,\n weighted_r_std_dev)\n batter['weightedZscoreHr'] = z_score_calc(batter['weightedHr'], weighted_hr_avg,\n weighted_hr_std_dev)\n batter['weightedZscoreRbi'] = z_score_calc(batter['weightedRbi'], weighted_rbi_avg,\n weighted_rbi_std_dev)\n batter['weightedZscoreSb'] = z_score_calc(batter['weightedSb'], weighted_sb_avg,\n weighted_sb_std_dev)\n batter['weightedZscoreOps'] = z_score_calc(batter['weightedOps'], weighted_ops_avg,\n weighted_ops_std_dev)\n batter['weightedZscoreAvg'] = z_score_calc(batter['weightedAvg'], weighted_avg_avg,\n weighted_avg_std_dev)\n # Calculate Values\n fvaaz_list = []\n for batter in batter_dict_list:\n # TODO: how to handle an avg version of this?\n batter['fvaaz'] = (batter['zScoreR'] + batter['zScoreHr'] + batter['zScoreRbi'] + batter['zScoreSb'] +\n batter['weightedZscoreOps'])\n fvaaz_list.append(batter['fvaaz'])\n players_over_one_dollar = players_over_zero_dollars - one_dollar_players\n fvaaz_list_over_zero = heapq.nlargest(players_over_zero_dollars, fvaaz_list)\n fvaaz_list_over_one = heapq.nlargest(players_over_one_dollar, fvaaz_list)\n for batter in batter_dict_list:\n if batter['fvaaz'] >= fvaaz_list_over_one[players_over_one_dollar - 1]:\n # TODO: dollar_per_fvaaz seems to be a circular reference, how to resolve this?\n batter['dollarValue'] = batter['fvaaz'] * dollar_per_fvaaz\n elif batter['fvaaz'] >= fvaaz_list_over_zero[players_over_zero_dollars - 1]:\n batter['dollarValue'] = 1.0\n else:\n batter['dollarValue'] = 0.0\n return sorted(batter_dict_list, key=operator.itemgetter('fvaaz'), reverse=True)\n # sorts by fvaaz (largest to smallest)", "def _SD_optimal(t):", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def compute_transition_weights(trans_counts, smoothing):\n weights = defaultdict(float)\n \n total_count = {}\n for tag in trans_counts.keys():\n total_count[tag] = sum(trans_counts[tag].values())\n \n\n for prev_tag in trans_counts:\n for curr_tag in (list(trans_counts.keys()) + [END_TAG]):\n if curr_tag in trans_counts[prev_tag]:\n weights[(curr_tag, prev_tag)] = np.log((trans_counts[prev_tag][curr_tag] + smoothing) / (total_count[prev_tag] + len(trans_counts) * smoothing))\n else:\n weights[(curr_tag, prev_tag)] = np.log(smoothing / (total_count[prev_tag] + len(trans_counts) * smoothing))\n\n\n for tag in (list(trans_counts.keys()) + [END_TAG]):\n weights[START_TAG, tag] = -np.inf\n weights[tag, END_TAG] = -np.inf\n\n return weights", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n\n for element in d1:\n total += d1[element]\n\n for item in d2:\n if item in d1:\n score += math.log(d1[item]/total) * (d2[item])\n else:\n score += math.log(0.5/total) * (d2[item])\n return score", "def calEntropy(vList):\n from collections import Counter\n counter = Counter(vList)\n entropy, N = 0, len(vList)\n for v in counter:\n p = counter[v] / N\n entropy += - p * np.log(p)\n return entropy", "def _compute_set_overhead_for_ns(set_stats, ns):\n\n if not ns or not set_stats or isinstance(set_stats, Exception):\n return 0\n\n overhead = 0\n for _k, stats in set_stats.iteritems():\n if not stats or isinstance(stats, Exception):\n continue\n\n ns_name = util.get_value_from_second_level_of_dict(stats, (\"ns\", \"ns_name\"), default_value=None,\n return_type=str).values()[0]\n if ns_name != ns:\n continue\n\n set_name = util.get_value_from_second_level_of_dict(stats, (\"set\", \"set_name\"), default_value=\"\",\n return_type=str).values()[0]\n objects = sum(util.get_value_from_second_level_of_dict(stats, (\"objects\", \"n_objects\"), default_value=0,\n return_type=int).values())\n overhead += objects * (9 + len(set_name))\n\n return overhead", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, target, source, others)\n return uniques", "def create_costs():\n infinity = float(\"inf\")\n costs = {}\n costs['biysk'] = 0\n costs['barnaul'] = infinity\n costs['novosibirsk'] = infinity\n costs['belokurikha'] = infinity\n costs['tomsk'] = infinity\n costs['krasnoyarsk'] = infinity\n costs['omsk'] = infinity\n return costs", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = one_way_skar(d, source, target, others)\n return uniques", "def calculate_training_cost(soldier_list: List[Soldier]):\n total_cost = 0.0\n \n for soldier in soldier_list:\n ################################# YOUR CODE HERE #################################\n if soldier.typecode == \"INF\":\n cost = 2.5 * soldier.weapon + 1.0 * soldier.armor\n elif soldier.typecode == \"ARC\":\n cost = 1.5 * soldier.weapon + 3.0 * soldier.armor\n elif soldier.typecode == \"CVL\":\n cost = 4.0 * soldier.weapon + 6.0 * soldier.armor\n if soldier.vitality < 0.35:\n cost *= 0.5\n total_cost += cost\n ##################################################################################\n return total_cost", "def cost(self):\n cost = {}\n if len(self.nodes) == 0:\n return cost\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.cost[r] for n in self.nodes]\n estimator = AvgAggregatorEstimator(values)\n cost[r] = estimator\n return cost", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions\n )\n # sum all costs in one total cost\n cs['cost'] = sum(v for k, v in cs.items() if k.startswith('cost_'))\n\n return cs" ]
[ "0.58399343", "0.5823463", "0.5779527", "0.55471617", "0.5469961", "0.5425537", "0.53996956", "0.5383586", "0.533916", "0.5299643", "0.52919436", "0.5257763", "0.52473646", "0.52160746", "0.52141124", "0.52082145", "0.51415884", "0.5134601", "0.5126979", "0.5097489", "0.50955534", "0.5092949", "0.50773215", "0.507489", "0.5071505", "0.50704443", "0.5062674", "0.5060795", "0.5043839", "0.5036701" ]
0.6280166
0
Given a list of values and associated labels, optimizes the best split threshold z where dividing the values into z has the lowest split cost. Returns a pair (z,cost) where cost is the split_cost of the threshold z. If nonelabels is given, this indicates the labels of missing values that must be passed down to both subtrees. This does not affect the output z but it does affect the output cost value.
def best_split(values,labels,nonelabels=None): assert len(values) >= 2 assert len(values) == len(labels) N = len(values) ilist = sorted((v,l) for (v,l) in zip(values,labels)) leftcount = defaultdict(int) rightcount = defaultdict(int) for v,l in ilist: rightcount[l] += 1 bestindex = -1 bestcost = split_cost([leftcount,rightcount]) cost = bestcost #costs = [cost] #print "Split costs:" for i in xrange(len(ilist)): v,l = ilist[i] rightcount[l] -= 1 leftcount[l] += 1 if i+1 >= len(ilist) or v == ilist[i+1][0]: #no splits when v is equal to the next value continue cost = split_cost([leftcount,rightcount]) #print " ",v,leftcount.values(),rightcount.values(),cost #costs.append(cost) if cost < bestcost: bestcost = cost bestindex = i #raw_input() if bestindex < 0: #no split found... try splitting in half splitval = (ilist[0][0]+ilist[-1][0])*0.5 else: splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5 if nonelabels is None: return (splitval,bestcost) #reevaluate counts leftcount = defaultdict(int) rightcount = defaultdict(int) for l in nonelabels: leftcount[l] += 1 rightcount[l] += 1 for v,l in ilist: if v <= splitval: leftcount[l] += 1 else: rightcount[l] += 1 return splitval,split_cost([leftcount,rightcount])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def fraction_mislabeled_nodes(labels, labels_pred):\n G1 = partition_indicator(labels)\n G2 = partition_indicator(labels_pred)\n\n # cost is minimized, overlap maximized\n cost_matrix = -G1.T.dot(G2)\n row_ind, col_ind = linear_sum_assignment(cost_matrix.A)\n cost = -cost_matrix[row_ind, col_ind].sum()\n\n return 1 - (cost / len(labels))", "def get_best_thresholds(labels, test_y, outputs, plot=False):\n t_max = [0] * len(labels)\n f_max = [0] * len(labels)\n\n for i, label in enumerate(labels):\n ts = []\n fs = []\n\n for t in np.linspace(0.1, 0.99, num=50):\n p, r, f, _ = precision_recall_fscore_support(test_y[:,i], np.where(outputs[:,i]>t, 1, 0), average='micro')\n ts.append(t)\n fs.append(f)\n if f > f_max[i]:\n f_max[i] = f\n t_max[i] = t\n\n if plot:\n print(f'LABEL: {label}')\n print(f'f_max: {f_max[i]}')\n print(f't_max: {t_max[i]}')\n\n plt.scatter(ts, fs)\n plt.show()\n \n return t_max, f_max", "def heuristic2_label_OBD(n, P, label, critical=None):\n print \"trying to label \" + str(n) + \" with \" + str(label)\n nodes_labeled = []\n if ('critical' in P.node[n].keys()) and (P.node[n]['critical']==True) and (P.node[n]['OBDlabel'] != label) :\n print \"FAIL on critical and not the same label.\"\n return (False, []) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[n]['OBDlabel'] = label\n nodes_labeled.append(n) # this is a list that gets passed through recursions\n if critical == True:\n P.node[n]['critical'] = True\n # labeling part done\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if P.node[neigh]['OBDlabel'] > new_label:\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now the recursive step - labeling all these nodes\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n print \"trying perm: \" + str(perm)\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n break\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n print \"removing label of \" + str(nn)\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n print \"Win in labeling neighbors of \" + str(n)\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)\n print \"FAIL of all permutations from \" + str(n)", "def heuristic2B_label_OBD(n, P, label, critical=None):\n nodes_labeled = []\n\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n \n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys(): # if it has a label\n if P.node[neigh]['OBDlabel'] > new_label: # and it is higher than what I would use for labeling\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n \n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n \n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now labeling all these nodes\n \n for neigh in neighbors_to_label:\n if ('critical' in P.node[neigh].keys()) and (P.node[neigh]['critical']==True) and (P.node[neigh]['OBDlabel'] != new_label) :\n return (False, nodes_labeled) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[neigh]['OBDlabel'] = new_label\n nodes_labeled.append(neigh) # this is a list that gets passed through recursions\n if flag_critical == True:\n P.node[neigh]['critical'] = True\n # labeling part done\n \n # and now recursive step - going into each neighbor to continue, in any order if necessary\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2B_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)", "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()):\n nc = prediction.shape[2] - 5\n xc = prediction[..., 4] > conf_thres\n min_wh, max_wh = 2, 4096\n max_det = 300\n max_nms = 30000\n time_limit = 10.0\n redundant = True\n multi_label &= nc > 1\n merge = False\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction):\n x = x[xc[xi]]\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5]\n v[:, 4] = 1.0\n v[range(len(l)), l[:, 0].long() + 5] = 1.0\n x = torch.cat((x, v), 0)\n if not x.shape[0]:\n continue\n x[:, 5:] *= x[:, 4:5]\n box = xywh2xyxy(x[:, :4])\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else:\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n n = x.shape[0]\n if not n:\n continue\n elif n > max_nms:\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n c = x[:, 5:6] * (0 if agnostic else max_wh)\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = torchvision.ops.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det:\n i = i[:max_det]\n if merge and 1 < n < 3000.0:\n iou = box_iou(boxes[i], boxes) > iou_thres\n weights = iou * scores[None]\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)\n if redundant:\n i = i[iou.sum(1) > 1]\n output[xi] = x[i]\n if time.time() - t > time_limit:\n None\n break\n return output", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def best_cutoff(self,\n split_label):\n split_args = self.sub_split_args[split_label]\n split_data = self.sub_split_data[split_label]\n # This criterion for the use_scipy flag is arbitrary and needs\n # further testing\n n_unique = len(np.unique(split_data[~np.isnan(split_data)]))\n use_scipy = True\n if n_unique > len(split_data)/1000:\n use_scipy = False\n idxcut_below, effects_below, rstats_below, ndata_below =\\\n self.u_data(split_label, use_scipy=use_scipy)\n idxcut_above, effects_above, rstats_above, ndata_above =\\\n self.u_data(split_label, above=True, use_scipy=use_scipy)\n\n # Default cutoff is min(split_data) - 1\n cutoff = split_data[split_args[0]] - 1\n value = 0\n # If no cutoff was possible\n if len(idxcut_below) == 0 or len(idxcut_above) == 0:\n return cutoff, value\n\n # All idx_cutoffs and values for cutoffs, for debugging\n for idx in range(len(idxcut_above)):\n idxcut = idxcut_above[idx]\n if idxcut != idxcut_below[idx]:\n raise NameError('Code error, invalid split')\n value_temp = (abs(effects_above[idx] -\n effects_below[idx]) *\n rstats_above[idx] *\n rstats_below[idx] *\n min(ndata_above[idx]) *\n min(ndata_below[idx]))\n if value_temp > value:\n cutoff = (split_data[split_args[int(idxcut)]] +\n split_data[split_args[int(idxcut)+1]])/2\n value = value_temp\n return cutoff, value", "def best_split(self):\r\n best_splits = [[0, None, None]]\r\n impurity, best_S, best_xj = 0, None, None\r\n \r\n for xj in self.x_names:\r\n for S in self.potential_splits(xj):\r\n ir = float(self.impurity_reduction(xj, S))\r\n if ir > impurity:\r\n impurity, best_S, best_xj = ir, S, xj\r\n best_splits.append([S, xj])\r\n else: \r\n pass\r\n \r\n return best_S, best_xj", "def best_threshold_from_folds(y_tuples, scoring=f1_score, step_size=0.01, maximize=True):\n thresholds, scores = [], []\n for _, y_true, y_pred in y_tuples:\n t, s = find_best_threshold(y_true, y_pred, step_size, scoring, maximize=maximize)\n thresholds.append(t)\n scores.append(s)\n\n mean_threshold = np.mean(thresholds)\n mean_score = np.mean([score_for_threshold(y, y_hat, scoring, mean_threshold) for _, y, y_hat in y_tuples])\n return mean_threshold, mean_score", "def _fit_split_(self, dataset, targets, val_set, val_targets, checkpoints):\n dir = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16))\n dir = '.tmp' + dir + '/'\n os.mkdir(dir)\n grid = self.grid\n if self.folds is not None or self.folds != 0:\n if self.task == 'Classification':\n if self.folds > 1:\n sf = StratifiedKFold(n_splits=self.folds, shuffle=True, random_state=0)\n elif 0 <= self.folds < 1:\n sf = StratifiedShuffleSplit(n_splits=1, test_size=self.folds, random_state=0)\n elif self.task == 'Regression':\n folds, dataset, targets = self.split_regression(dataset, targets)\n results = []\n for params in grid:\n try:\n nn = NeuralNetwork()\n for i in range(len(params['layers'])):\n if i == 0:\n nn.add_layer('dense', params['layers'][i], params['activation'], dataset.shape[1])\n else:\n if i == len(params['layers']) - 1 and self.task == 'Regression':\n nn.add_layer('dense', params['layers'][i], 'linear')\n else:\n nn.add_layer('dense', params['layers'][i], params['activation'])\n curr_res = {'params': params,\n 'metric_stats': [],\n 'test_stats': [],\n 'vl_stats': [],\n 'tr_stats': []}\n\n if self.task == 'Classification':\n folds = sf.split(dataset, targets)\n for train_index, test_index in folds:\n X_train, X_test = dataset[train_index], dataset[test_index]\n Y_train, Y_test = targets[train_index], targets[test_index]\n nested_best = None\n nested_best_metric = None\n nested_tr_pred = None\n nested_vl_pred = None\n for i in range(self.restarts):\n nn.compile(task=self.task,\n loss=self.loss_name,\n l2_lambda=params['l2_lambda'],\n dropout=params['dropout'],\n optimizer=SGD(lr_init=params['lr'],\n momentum=params['momentum'],\n nesterov=params['nesterov'],\n lr_sched=StepDecayScheduler(drop=params['lr_sched'][0],\n epochs_drop=params['lr_sched'][1])))\n\n curr_model, curr_metric, best_epoch = nn.fit(X_train, Y_train,\n val_set=val_set, val_targets=val_targets,\n batch_size=params['batch_size'],\n test_size=params['test_size'],\n epochs=params['epoch'],\n patience=params['patience'],\n save_pred=dir + 'tmp_gs',\n save_model=None)\n\n nested_best_metric = metrics.metric_improve(self.metric, nested_best_metric, curr_metric)\n if nested_best_metric[1]:\n nested_tr_pred = np.load(dir + 'tmp_gs_tr_predictions.npy')[best_epoch]\n nested_vl_pred = np.load(dir + 'tmp_gs_vl_predictions.npy')[best_epoch]\n nested_best = copy.deepcopy(curr_model)\n if nested_best_metric[2]:\n break\n\n Y_pred = nested_best.predict(X_test)\n if self.metric == 'loss':\n curr_metric = np.sum(self.loss(Y_test, Y_pred), axis=0) / len(Y_test)\n else:\n curr_metric = metrics.metric_computation(self.metric, Y_test, Y_pred)\n\n curr_res['metric_stats'].append(curr_metric)\n tr_stats = []\n vl_stats = []\n test_stats = []\n for stat in self.statistics:\n if stat == 'loss':\n\n tr_stats.append(np.mean(self.loss(nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:])))\n vl_stats.append(np.mean(self.loss(nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:])))\n test_stats.append(np.mean(self.loss(Y_test, Y_pred)))\n else:\n tr_stats.append(metrics.metric_computation(stat,\n nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:]))\n vl_stats.append(metrics.metric_computation(stat,\n nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:]))\n test_stats.append(metrics.metric_computation(stat, Y_test, Y_pred))\n curr_res['tr_stats'].append(tr_stats)\n curr_res['vl_stats'].append(vl_stats)\n curr_res['test_stats'].append(test_stats)\n\n results.append(curr_res)\n if checkpoints is not None:\n with open(checkpoints + '.pkl', 'wb') as output:\n pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)\n\n except NesterovError:\n continue\n shutil.rmtree(dir)\n return results", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def _fit_no_split(self, dataset, targets, val_set, val_targets, checkpoints):\n dir = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16))\n dir = '.tmp' + dir + '/'\n os.mkdir(dir)\n grid = self.grid\n results = []\n for params in grid:\n try:\n nn = NeuralNetwork()\n for i in range(len(params['layers'])):\n if i == 0:\n nn.add_layer('dense', params['layers'][i], params['activation'], dataset.shape[1])\n else:\n if i == len(params['layers']) - 1 and self.task == 'Regression':\n nn.add_layer('dense', params['layers'][i], 'linear')\n else:\n nn.add_layer('dense', params['layers'][i], params['activation'])\n curr_res = {'params': params,\n 'vl_stats': [],\n 'tr_stats': []}\n\n nested_best_metric = None\n nested_tr_pred = None\n nested_vl_pred = None\n for i in range(self.restarts):\n nn.compile(task=self.task,\n loss=self.loss_name,\n l2_lambda=params['l2_lambda'],\n optimizer=SGD(lr_init=params['lr'],\n momentum=params['momentum'],\n nesterov=params['nesterov'],\n lr_sched=StepDecayScheduler(drop=params['lr_sched'][0],\n epochs_drop=params['lr_sched'][1])))\n\n curr_model, curr_metric, best_epoch = nn.fit(dataset, targets,\n val_set=val_set, val_targets=val_targets,\n batch_size=params['batch_size'],\n test_size=params['test_size'],\n epochs=params['epoch'],\n patience=params['patience'],\n save_pred=dir + 'tmp_gs',\n save_model=None)\n\n nested_best_metric = metrics.metric_improve(self.metric, nested_best_metric, curr_metric)\n if nested_best_metric[1]:\n nested_tr_pred = np.load(dir + 'tmp_gs_tr_predictions.npy')[best_epoch]\n nested_vl_pred = np.load(dir + 'tmp_gs_vl_predictions.npy')[best_epoch]\n if nested_best_metric[2]:\n break\n\n tr_stats = []\n vl_stats = []\n for stat in self.statistics:\n if stat == 'loss':\n tr_stats.append(np.mean(self.loss(nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:])))\n vl_stats.append(np.mean(self.loss(nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:])))\n else:\n tr_stats.append(metrics.metric_computation(stat,\n nested_tr_pred[:, :targets.shape[1]],\n nested_tr_pred[:, targets.shape[1]:]))\n vl_stats.append(metrics.metric_computation(stat,\n nested_vl_pred[:, :targets.shape[1]],\n nested_vl_pred[:, targets.shape[1]:]))\n curr_res['tr_stats'].append(tr_stats)\n curr_res['vl_stats'].append(vl_stats)\n\n results.append(curr_res)\n if checkpoints is not None:\n with open(checkpoints + '.pkl', 'wb') as output:\n pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)\n\n except NesterovError:\n continue\n shutil.rmtree(dir)\n return results", "def get_best_split_all(x, y) -> Tuple[int, float, float]:\n m = x.shape[1]\n col_best_gin = np.ones(shape=m)\n col_best_val = np.ones(shape=m)\n for c in range(m):\n best = 1\n best_x = 0\n for i in np.unique(x[:, c]):\n gini = Tree.split(x[:, c], y, i)\n if gini < best:\n best = gini\n best_x = i\n col_best_gin[c] = best\n col_best_val[c] = best_x\n\n # Select best feature to split on\n col_idx = np.argmin(col_best_gin)\n # Convert to bool index\n col_idx = np.array(range(x.shape[1])) == col_idx\n\n return col_idx, col_best_val[col_idx], col_best_gin[col_idx]", "def pick_best_label(self,db,labels,ids):\n self.type = 'v'\n if len(labels) > 0:\n self.value = vote([labels[id] for id in ids])\n else:\n self.value = None\n return", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def minimization(current_tally, group_labels=None, seed=None):\n if seed is not None:\n random.seed(seed)\n n_treatments = len(current_tally)\n if n_treatments < 2:\n raise ValueError('current_tally must be a list of lists whose length is greater than 2.')\n target_length = None\n for tally in current_tally:\n if target_length is None:\n target_length = len(tally)\n else:\n if target_length != len(tally):\n raise ValueError('Each list in current_tally must be the same length.')\n\n if group_labels is not None:\n if len(group_labels) != target_length:\n raise ValueError('group_labels must be {} long'.format(target_length))\n\n sums = [0] * n_treatments\n for idx, tally in enumerate(current_tally):\n sums[idx] = sum(tally)\n if sum(sums) == 0:\n # No assignment made yet, so make one at random\n idx = random.randint(0, n_treatments - 1)\n else:\n min_value = min(sums)\n groups = [i for i, j in enumerate(sums) if j == min_value]\n if len(groups) > 1:\n idx = random.choice(groups)\n else:\n idx = groups[0]\n\n if group_labels:\n print(group_labels)\n group = group_labels[idx]\n else:\n group = idx + 1\n print(group)\n return group", "def get_optimal_threshhold(true_label, prediction, iterations=100, size=17):\n best_threshhold = [0.2]*size\n for t in range(size):\n best_fbeta = 0\n temp_threshhold = [0.2]*size\n for i in range(iterations):\n temp_value = i / float(iterations)\n temp_threshhold[t] = temp_value\n temp_fbeta = fbeta(true_label, prediction > temp_threshhold)\n if temp_fbeta > best_fbeta:\n best_fbeta = temp_fbeta\n best_threshhold[t] = temp_value\n return best_threshhold", "def binary_fairness(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n task: Literal[\"demographic_parity\", \"equal_opportunity\", \"all\"] = \"all\",\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> Dict[str, torch.Tensor]:\n if task not in [\"demographic_parity\", \"equal_opportunity\", \"all\"]:\n raise ValueError(\n f\"Expected argument `task` to either be ``demographic_parity``,\"\n f\"``equal_opportunity`` or ``all`` but got {task}.\"\n )\n\n if task == \"demographic_parity\":\n if target is not None:\n rank_zero_warn(\"The task demographic_parity does not require a target.\", UserWarning)\n target = torch.zeros(preds.shape)\n\n num_groups = torch.unique(groups).shape[0]\n group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)\n\n transformed_group_stats = _groups_stat_transform(group_stats)\n\n if task == \"demographic_parity\":\n return _compute_binary_demographic_parity(**transformed_group_stats)\n\n if task == \"equal_opportunity\":\n return _compute_binary_equal_opportunity(**transformed_group_stats)\n\n if task == \"all\":\n return {\n **_compute_binary_demographic_parity(**transformed_group_stats),\n **_compute_binary_equal_opportunity(**transformed_group_stats),\n }\n return None", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def split_by_cost(cost_list, \n comm=None, \n return_work=False,\n return_all=False):\n if comm == None:\n comm = MPI.COMM_WORLD\n \n size = comm.Get_size()\n rank = comm.Get_rank()\n\n ### Total cost of job_list\n total = np.sum(cost_list) \n ### Ideal work for each rank\n max_work = (total / size)*1.01\n \n ### Preparing indices that each rank will use\n work_idx = [[] for x in range(size)]\n work_sum = [0 for x in range(size)]\n current_worker = 0\n withheld_idx_list = []\n withheld_value_list = []\n for idx,value in enumerate(cost_list):\n ## Decide whether to withhold value\n if work_sum[current_worker] + value > max_work*1.05:\n withheld_idx_list.append(idx)\n withheld_value_list.append(value)\n continue\n \n work_idx[current_worker].append(idx)\n work_sum[current_worker] += value\n if work_sum[current_worker] > max_work:\n current_worker += 1\n \n withheld_idx_list = np.array(withheld_idx_list)\n withheld_value_list = np.array(withheld_value_list)\n withheld_sort_idx = np.argsort(withheld_idx_list)\n withheld_idx_list = withheld_idx_list[withheld_sort_idx]\n withheld_value_list = withheld_value_list[withheld_sort_idx]\n for idx,withheld_idx in enumerate(withheld_idx_list):\n min_idx = np.argmin(work_sum)\n work_sum[min_idx] += withheld_value_list[idx]\n work_idx[min_idx].append(withheld_idx)\n \n my_list = work_idx[rank]\n \n if not return_all:\n if not return_work:\n return my_list\n else:\n return my_list,work_sum[rank]\n else:\n if not return_work:\n return work_idx\n else:\n return work_idx,work_sum", "def get_min_across_splits_continuous(\n arr: np.ndarray, y: np.ndarray, splits: np.ndarray, eval_func: Callable\n ):\n n = len(splits)\n if n > 500:\n # If many split points, use some threading\n with multiprocessing.Pool(processes=8) as p:\n # Get evaluation scores across all the splits\n post_split_evals = dict(\n zip(\n range(len(splits)),\n p.starmap(\n BaseTree.get_split_goodness_fit_continuous,\n zip([arr] * n, [y] * n, splits, [eval_func] * n),\n ),\n )\n )\n p.close()\n else:\n # If not too many split points, get scores across all splits\n post_split_evals = dict(\n zip(\n range(len(splits)),\n map(\n lambda x: BaseTree.get_split_goodness_fit_continuous(*x),\n zip([arr] * n, [y] * n, splits, [eval_func] * n),\n ),\n )\n )\n # Get the minimum split based on gain ratio\n min_eval = min(\n post_split_evals,\n key=lambda x: pipe(\n post_split_evals.get(x),\n lambda results: results[0] / results[1], # entropy / intrinsic value\n ),\n )\n\n # Return the best split and the splits scores\n return (splits[min_eval], *post_split_evals.get(min_eval))", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def clean_labels(labels):\n\n llabels, slabels = list(labels), set(labels)\n \n for l in slabels:\n if llabels.count(l) <2 and l != max(slabels):\n llabels[llabels.index(l)] = l+1\n return clean_labels(llabels)\n elif llabels.count(l) <2 and l == max(slabels):\n llabels[llabels.index(l)] = l-1\n return clean_labels(llabels)\n else:\n return np.array(llabels)", "def fit(self, data, targets):\n # update these three\n self.idx = 0\n self.val = None\n self.left = None\n self.right = None\n ### YOUR CODE HERE\n # i have added a slow and a fast version\n \n num_points, num_features = data.shape\n # print('num points, num_features', num_points, num_features)\n \n def feat_score(feat_idx):\n feat = data[:, feat_idx].copy()\n perm = np.argsort(feat)\n s_feat = feat[perm]\n s_targets = targets[perm]\n target_var = ((s_targets - s_targets.mean())**2).sum()\n s_left, s_right = sum_squares(s_targets)\n def score(idx, _vals):\n ## slow version\n #left = _vals[0:idx]\n #right = _vals[idx:]\n #assert len(left) + len(right) == len(_vals), (len(left), len(right), len(_vals))\n #left_mean = np.mean(left)\n #right_mean = np.mean(right)\n #left_error = np.sum((left-left_mean)**2)\n #assert np.allclose(left_error, s_left[idx]) \n #right_error = np.sum((right-right_mean)**2)\n #assert np.allclose(right_error, s_right[idx])\n # return left_error+right_error\n # fast version\n return s_left[idx] + s_right[idx]\n # score for every split\n scores = np.array([score(x, s_targets) for x in range(0, num_points)])\n assert scores.min() <= target_var, target_var\n best_score_idx = np.argmin(scores)\n best_score = scores[best_score_idx]\n val = s_feat[best_score_idx]\n # print('best score', feat_idx, best_score, best_score_idx, val, s_feat[best_score_idx+1])\n \n return best_score, {'val': val, \n 'left': np.mean(s_targets[:best_score_idx]), \n 'right': np.mean(s_targets[best_score_idx:])\n } \n\n split_scores = []\n for f in range(0, num_features):\n total_score, _params = feat_score(f)\n split_scores.append(total_score)\n # print('score of {0} - {1}'.format(feat_names[f], total_score))\n # print('feature scores:', np.array(split_scores))\n best_feat = np.argmin(split_scores)\n best_score = split_scores[best_feat]\n # print('Best Feature idx: {0} - Best Cost: {1}'.format(best_feat, best_score))\n score_again, params = feat_score(best_feat)\n # print('double check score', score_again, best_score)\n self.idx = best_feat\n self.val = params['val']\n self.left = params['left']\n self.right = params['right']\n print(\"idx={}, val={}, left={}, right={}\".format(self.idx, self.val, self.left, self.right))\n assert not np.isnan(self.left)\n assert not np.isnan(self.right)\n ### END CODE", "def _backward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = tuple(features)\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f not in k:\n continue\n candidate_features = tuple([x for x in k if x != f])\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def flatten_binary_scores(self, scores, labels, ignore=None):\n scores = scores.view(-1)\n labels = labels.view(-1)\n if ignore is None:\n return scores, labels\n valid = (labels != ignore)\n vscores = scores[valid]\n vlabels = labels[valid]\n return vscores, vlabels" ]
[ "0.6308325", "0.60905", "0.586987", "0.5617052", "0.5437539", "0.5403514", "0.5329129", "0.52851343", "0.52182657", "0.51694614", "0.5163954", "0.5138482", "0.5117913", "0.5107134", "0.5100632", "0.5094282", "0.50677717", "0.50626785", "0.50522244", "0.5046674", "0.50465786", "0.5039915", "0.49960294", "0.49689108", "0.4961738", "0.49561733", "0.49556974", "0.49542636", "0.4953122", "0.49509645" ]
0.7924888
0
Looks up the leaf node corresponding to the given entry. Does not handle missing values.
def lookup(self,entry): if self.type == 'v': return self v = entry[self.feature] assert v != None if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best return c.lookup(entry) elif self.type == 'i': if v <= self.value: return self.children[0].lookup(entry) else: return self.children[1].lookup(entry) raise RuntimeError("Invalid DecisionTreeNode type?")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_parent_node(self, entry, key):\r\n parent = entry\r\n keys = key.split(\".\")\r\n for k in keys:\r\n try:\r\n parent = parent[k]\r\n except:\r\n raise Exception(\"key \\\"\" + key + \"\\\" was not found in HAR file\")\r\n return parent", "def get(self, entry: ConfigEntry) -> any:\n value = self.root\n if value is None:\n return None\n\n for key in entry.key_path:\n if self.ignore_case_in_keys:\n key = key.lower()\n value = value.get(key)\n if value is None:\n return entry.value\n\n return value", "def lookupVal(self, val):\n pybtlib.lookupVal.restype = ctypes.c_int\n pybtlib.lookupVal.argtypes = [ctypes.POINTER(Tree), ctypes.c_int]\n return pybtlib.lookupVal(ctypes.byref(self), val)", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");", "def check_leaf(leaf_value, dic, entry_list, messages, current_elem):\n value = traverse_dict(dic, entry_list)\n default_value = leaf_value['default']\n required_type = type(default_value)\n required = leaf_value['required']\n # messages.append(\"Checking leaf \" + str(entry_list))\n if required and value is None:\n add_message(\n messages, current_elem, \"The required value in \" + str(entry_list) + \" cannot be found!\"\n )\n if value is not None and not isinstance(value, required_type):\n add_message(\n messages,\n current_elem,\n \"The required value in \"\n + str(entry_list)\n + \" doesn't match expected type \"\n + str(required_type),\n )", "def get_entry(self, entry: str) -> Optional[Union['Directory', NormalFile, VirusFile, Entry]]:\n for e in self.get_entries():\n if e.get_name() == entry:\n return e", "def _extract_leaf(leaf):\n try:\n return re.match(r'leaf-(\\d+)', leaf).group(1)\n except:\n return None", "def find_path(t, entry):\n if t.entry == entry:\n return [t.entry]\n else:\n branches = [find_path(branch, entry) for branch in t.branches]\n for branch in branches:\n if branch:\n return [t.entry] + branch\n return False", "def lookup(self, key):\n k = self.get_position(key)\n\n if self.keys[k] == key:\n return node.values[k]\n\n # Lookup in the child node.\n if self.refs[k+1] == None:\n return None\n return self.refs[k+1].lookup(key)", "def search(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n current_node = self._root\n while current_node:\n if val == current_node._data:\n return current_node\n if val > current_node._data:\n current_node = current_node._rkid\n else:\n current_node = current_node._lkid\n return", "def _lookup(self, data):\n parent, current = None, self.root\n while current:\n if current < data: # data should be in right\n parent, current = current, current.right\n elif current > data: # data should be in left\n parent, current = current, current.left\n else: # equals\n return parent, current\n return parent, current", "def get_leaf(self, descr):\n matches = [x for x in self.leaves if x.descr == descr]\n if matches == []:\n raise RuntimeError(f\"Did not find any leaves matching '{descr}'\")\n if len(matches) > 1:\n raise RuntimeError(f\"Found multiple matching leaves: {matches}\")\n return matches[0]", "def lookup(self, key):\n # check that this tree actually has a root node\n debug.printMsg(\"Call made to Lookup\")\n debug.printMsg(\"checking if we have a BST\")\n if self.root:\n debug.printMsg(\"Calling Recursive Lookup\")\n (result, err) = self.recursiveLookup(key, self.root)\n # if we did not find anything\n if err: \n debug.printMsg(\"Oops, we couldn't find anything\")\n return None\n else: \n # we found a result\n debug.printMsg(\"we found: \")\n return result\n else:\n debug.printMsg(\"Oops, the BST seems to not exist\")\n # root doesnt exist\n return None", "def _lookup(self, key):\n key_hash = self.first_hash(key)\n entry = self.table[key_hash]\n if entry.key is None or entry is key:\n return entry\n free = None\n if entry.key is dummy:\n free = entry\n elif compare(entry.hash, key_hash) and key == entry.key:\n return entry\n\n i = key_hash\n while True:\n i += self.second_hash(key)\n i = i % self.size\n entry = self.table[i]\n if entry.key is None:\n return entry if free is None else free\n if entry.key is key or \\\n (compare(entry.hash, key_hash) and key == entry.key):\n return entry\n elif entry.key is dummy and free is None:\n free = dummy\n\n assert False, \"not reached\"", "def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el", "def lookup(self, val):\n if val < self.val:\n if self.left is None:\n return None, None\n return self.left.lookup(val)\n elif val > self.val:\n if self.right is None:\n return None, None\n return self.right.lookup(val)\n else:\n return self", "def get_leaf(self, leaf_index):\n return self.__leaves_db.get(encode_int(leaf_index))", "def leaf(self, value, depth, available):\n method_name = 'leaf_' + value.__class__.__name__\n method = getattr(self, method_name, self.generic_leaf)\n return method(value, depth, available)", "def get_entry(self, entry_name):\n if entry_name in self.entries: # Don't invoke constructor if not needed\n return self.entries[entry_name]\n return self.entries.setdefault(entry_name, PathElement(self.file_name, self.namespaces))", "def labelRoot(lut, label):\n result = lut[label]\n if lut[result] != result:\n result = labelRoot(lut, result)\n lut[label] = result\n return result", "def get(self,root,key):\n node = root\n for digit in key:\n node = node.children[ord(digit)-ord('0')]\n if(node==None):\n return None\n return node.value.value", "def get_node(self, key: str) -> Optional[Node]:", "def find_leaf(self, _key):\n cur_node = self.root\n while type(cur_node) is not leaf:\n\n flag = True\n for i, key in enumerate(cur_node.keys):\n if key > _key:\n cur_node = cur_node.pt[i]\n flag = False\n break\n \n # the value passed in is greater than all the keys in this node\n if flag:\n cur_node = cur_node.pt[-1]\n \n return cur_node", "def _get_leaf(leaf, d, pattern):\n xleaf = d.rsplit('/', 1)[-1].strip()\n check_pattern = re.match('\\*(\\.[a-zA-Z0-9]+)$', pattern)\n if check_pattern:\n xten = check_pattern.groups()[0]\n if xleaf[-len(xten):] == xten:\n xleaf = xleaf[:-len(xten)].strip()\n if xleaf.find(ROOT_LEAF_PREFIX) == 0:\n return leaf\n elif leaf.strip():\n return '{0}.{1}'.format(leaf, xleaf)\n else:\n return xleaf", "def lookup(self, key):\n return self.root.lookup(key)", "def predict(self,entry):\n assert self.root is not None,\"Decision tree is not initialized\"\n return self.root.predict(entry)", "def _find_node(self, item):\n # Start with the root node\n node = self.root\n # Loop until we descend past the closest leaf node\n while node is not None:\n # TODO: Check if the given item matches the node's data\n if ...:\n # Return the found node\n return node\n # TODO: Check if the given item is less than the node's data\n elif ...:\n # TODO: Descend to the node's left child\n node = ...\n # TODO: Check if the given item is greater than the node's data\n elif ...:\n # TODO: Descend to the node's right child\n node = ...\n # Not found\n return None", "def search(T,k):\r\n for t in T.data:\r\n if k == t.word:\r\n return t\r\n if T.isLeaf:\r\n return None\r\n return search(T.child[findChildB(T,k)],k)", "def recursiveLookup(self, key, curr):\n # basically repeat insert\n debug.printMsg(\"Entered recursiveLookup\")\n # if we found a match break\n debug.printMsg('Checking base condition: ' + key + ' = ' + curr.key)\n if key == curr.key:\n debug.printMsg(\"Success, found\")\n return (curr, None)\n # if the key is larger than curr\n elif key > curr.key:\n debug.printMsg(\"Nope, now checking if we should go right\")\n debug.printMsg(\"yep\")\n debug.printMsg(\"Check if we still have room to search\")\n if curr.hasRightChild():\n debug.printMsg(\"Moving further right\")\n # move onto the next node along the search path\n return self.recursiveLookup(key, curr.right)\n else:\n debug.printMsg(\"Nope, ran out of search path. bummer\")\n # hit the end and there was no match\n return (None, True)\n else:\n debug.printMsg(\"Nope, we're going left\") \n debug.printMsg(\"Check if we still have room to search\") \n if curr.hasLeftChild():\n debug.printMsg(\"Moving further left\")\n return self.recursiveLookup(key, curr.left)\n\n else:\n debug.printMsg(\"Shit balls, we ran out of search path\")\n return (None, True)", "def tree_to_leaf(self,\n x_row):\n node = self.tree[0]\n while True:\n if node.is_leaf:\n return node\n val = x_row[node.label]\n if np.isnan(val):\n node = self.tree[node.id_null]\n elif val <= node.cutoff:\n node = self.tree[node.id_lower]\n elif val >= node.cutoff:\n node = self.tree[node.id_higher]\n else:\n raise NameError" ]
[ "0.6321155", "0.62434244", "0.61522406", "0.6018858", "0.601176", "0.6007655", "0.59877974", "0.5881265", "0.5834447", "0.5798371", "0.57872856", "0.5778088", "0.57707995", "0.5761493", "0.57495886", "0.57285845", "0.57001275", "0.56570214", "0.56385493", "0.5637989", "0.560824", "0.560087", "0.5568936", "0.5553265", "0.55228597", "0.55168426", "0.55156344", "0.55140805", "0.55074525", "0.5483108" ]
0.6669498
0
Given a indexed database db, a list of labels (one for each id), and a list of ids to test, sets this node to the best label.
def pick_best_label(self,db,labels,ids): self.type = 'v' if len(labels) > 0: self.value = vote([labels[id] for id in ids]) else: self.value = None return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinstance(labels,int):\n labelindex = labels\n labels = db.get_column(labelindex)\n else:\n assert len(labels) == len(db.entries)\n self.root = DecisionTreeNode()\n if labelindex >= 0:\n raise NotImplementedError(\"Ooops, taking out indexed label broken\")\n entries = np.delete(entries,labelindex,1)\n db = IndexedDatabase(db)\n if self.maxnodes != None:\n return self.greedy_learn_search(db,labels)\n else:\n self.deepest = 0\n return self.greedy_learn(self.root,db,labels,range(len(labels)))", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def identify_examples(self,db,labels,node):\n path = []\n while node.parent != None:\n nkey = None\n for (k,c) in node.parent().children.iteritems():\n if c is node:\n nkey = k\n break\n assert nkey != None\n path.append((node.parent(),nkey))\n node = node.parent()\n path = path[::-1]\n nids = len(labels)\n ids = []\n for id in xrange(nids):\n valid = True\n for n,ckey in path:\n f = n.feature\n val = featureMatrix[f,id]\n if val is None:\n #it's a None value, just continue on\n continue\n else:\n key = None\n if n.type == 'i':\n key = (0 if val <= n.value else 1)\n else:\n key = val\n if key != ckey:\n valid = False\n break\n if valid:\n ids.append(id)\n return ids", "def set_index(self, list):\n for key in list:\n self.find_label_by_id(key).index = True", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def save_data_to_db(labelled):\n add_query = sqlite3.connect(DB_PATH).cursor()\n add_query.execute(\n \"CREATE TABLE IF NOT EXISTS labels(text TEXT, label TEXT, score FLOAT)\")\n for entry in labelled:\n add_query.execute(\"\"\"INSERT INTO labels(text,label,score) VALUES(?,?,?)\"\"\",\n (entry))\n return", "def _compute_relevance_map(self, labels):\n\n ds_labels = np.zeros(self.ds_size)\n ds_relevance_map = 0\n for i in np.unique(labels):\n if i != 0:\n # 2.1- Compute the coarse label image\n y, x, z = np.where(labels == i)\n ds_labels[np.int32(y * self.full_to_ds_ratio[0]),\n np.int32(x * self.full_to_ds_ratio[1]), z] = i\n # 2.2- Compute the energy map\n M = np.ones_like(ds_labels)\n M[ds_labels == i] = 0\n distance_map = distance_transform_edt(M)\n ds_relevance_map += distance_map\n\n # 2.3- Normalize the energy map and compute the ROI\n ds_relevance_map = ds_relevance_map / ds_relevance_map.max()\n return ds_labels, ds_relevance_map", "def project(database, frequent_nodes, minsup, freq_labels, length, H, L, L_hat, n_graphs, n_pos, n_neg, pos_index, class_index, neg_index, graph_id_to_list_id, mapper, labels, model, constraints):\n\t# Declaring globals for recursive pattern mining\n\tglobal __subgraph_count\n\tglobal __positive_index\n\tglobal __n_pos\n\tglobal __n_graphs\n\tglobal __dataset\n\tglobal __pattern_set\n\tglobal __cl_constraints\n\tglobal __ml_constraints\n\tglobal __negative_index\n\tglobal __graph_id_to_list_id\n\tglobal __min_threshold\n\tglobal __min_index\n\n\t__graph_id_to_list_id = graph_id_to_list_id\n\t__ml_constraints = [c for c in constraints[0] if c[0] < n_graphs and c[1] < n_graphs]\n\t__cl_constraints = [c for c in constraints[1] if c[0] < n_graphs and c[1] < n_graphs]\n\t__positive_index = pos_index\n\t__negative_index = neg_index\n\t__n_pos = n_pos\n\t__n_graphs = n_graphs\n\t__H = H\n\t__L = L\n\t__L_hat = L_hat\n\t__dataset = []\n\t__pattern_set = []\n\t__subgraph_count = 0\n\t__min_threshold = sys.maxint\n\t__min_index = 0\n\tdfs_codes = []\n\tprojection_map = {}\n\tfeature_selection_model = None\n\n\tif model == \"top-k\":\n\t\tfeature_selection_model = TopKModel()\n\telif model == \"greedy\":\n\t\tfeature_selection_model = GreedyModel(__n_graphs, __positive_index)\n\telif model == \"gMGFL\":\n\t\tfeature_selection_model = GMGFLModel(__L, __L_hat)\n\telif model == \"gMLC\":\n\t\tfeature_selection_model = GMLCModel(__L, __H)\n\telse:\n\t\tlogging.log(logging.ERROR, \"Model %s not recognized\" %(model))\n\t\texit(0)\n\n\t# TODO: evaluate\n\t\"\"\"\n\tOnly constraints for current binary split\n\tfor con in __ml_constraints:\n\t\tif not labels[con[0]][class_index] == 1 and not labels[con[1]][class_index] == 1:\n\t\t\t__ml_constraints.remove((con[0], con[1]))\n\n\tfor con in __cl_constraints:\n\t\tif not labels[con[0]][class_index] == 1 and not labels[con[1]][class_index] == 1:\n\t\t\t__cl_constraints.remove((con[0], con[1]))\n\t\"\"\"\n\n\t# clean constraints from not applicable ones\n\tfor i, con in enumerate(__ml_constraints):\n\t\tif con[0] >= n_graphs or con[1] >= n_graphs:\n\t\t\t__ml_constraints.remove(con)\n\t\t\tcontinue\n\t\ttry:\n\t\t\tlist_id1 = __graph_id_to_list_id[con[0]]\n\t\t\tlist_id2 = __graph_id_to_list_id[con[1]]\n\t\t\t__ml_constraints[i] = (list_id1, list_id2)\n\t\texcept KeyError:\n\t\t\t__ml_constraints.remove(con)\n\n\tfor i, con in enumerate(__cl_constraints):\n\t\tif con[0] >= n_graphs or con[1] >= n_graphs:\n\t\t\t__cl_constraints.remove(con)\n\t\t\tcontinue\n\t\ttry:\n\t\t\tlist_id1 = __graph_id_to_list_id[con[0]]\n\t\t\tlist_id2 = __graph_id_to_list_id[con[1]]\n\t\t\t__cl_constraints[i] = (list_id1, list_id2)\n\t\texcept KeyError:\n\t\t\t__cl_constraints.remove(con)\n\n\t# TODO: Is this needed?\n\tfor l in frequent_nodes:\n\t\t__subgraph_count += 1\t\t\n\n\tfor g in database:\n\t\tfor n in g.nodes:\n\t\t\tedges = get_forward_init(n, g)\n\t\t\tif len(edges) > 0:\n\t\t\t\t for e in edges:\n\t\t\t\t\tnf = g.nodes[e.fromn]\n\t\t\t\t\tnt = g.nodes[e.to]\n\t\t\t\t\tdfsc = dfs_code(0,1,nf.label,e.label,nt.label)\n\t\t\t\t\tpdfs = pre_dfs(g.id,e,None)\n\t\t\t\t\t# because this is a root --> append the predecesspr dfs code (graph id, edge, None)\n\t\t\t\t\tif dfsc in projection_map:\n\t\t\t\t\t\tprojection_map[dfsc].append(pdfs)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprojection_map[dfsc] = [pdfs,]\n\n\t# Start Subgraph Mining\n\tthreshold = 0 \t# initial threshold for first length 1 subgraph\n\tfor pm in reversed(sorted(projection_map, key=dfs_code_compare)):\t# sorted by highest fromnode label (order is important)\n\t\tif len(projection_map[pm]) < minsup: # number of graphs, this initial pattern occurs (root patterns)\n\t\t\tcontinue\n\t\tdfs_codes.append(dfs_code(0,1,pm[2],pm[3],pm[4]))\t# initial pattern for this projection is always local 0, 1)\n\t\tdfs_codes = mine_subgraph(database, projection_map[pm],\n\t\t\t\t\t\t\tdfs_codes, minsup, length, mapper, feature_selection_model)\n\t\tdfs_codes.pop()\t# dfs_codes is a list of all projections for this initial pattern\n\treturn __dataset, __pattern_set", "def select_node_by_label(conn, label):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes WHERE label=?\", (label,))", "def update_database(JobData, Labels):\n\n DB = boto3.resource('dynamodb')\n DBTable = DB.Table(os.environ['DB_TABLE_NAME'])\n\t\n DBData = {}\n DBData['VideoID'] = JobData['JobId']\n DBData['VideoName'] = JobData['Video']['S3ObjectName']\n DBData['S3Bucket'] = JobData['Video']['S3Bucket']\n DBData['Labels'] = []\n\n print('Total number of labels detected was {}'.format(len(Labels)))\n\t\n\t# Now want to create a list of unique labels, number of occurrences, time of occurrences and average confidence\n for Label in Labels:\n if len(DBData['Labels']) == 0: # Populate the first item\n add_new_label(DBData['Labels'], Label)\n else:\n FoundMatch = False\n for UniqueLabel in DBData['Labels']:\n if Label['Label']['Name'] == UniqueLabel['LabelName']:\n update_label(UniqueLabel, Label)\n FoundMatch = True\n break\n # If we haven't found a match, need to add another unique label\n if not FoundMatch: add_new_label(DBData['Labels'], Label)\n\n # Now put this into the database. DynamoDB doesn't support Python float format so fix this\n DBData = FixFloats(DBData)\n DBTable.put_item(Item = DBData)\n\n return", "def choose_ltv(self, label):\n tids = self.node_tids[label]\n vals = self.node_vals[label]\n losses = [self.tid_losses_dct[tid] for tid in tids]\n\n # -- try to return the value corresponding to one of the\n # trials that was previously chosen\n tid_set = set(tids)\n for tid in self.best_tids:\n if tid in tid_set:\n idx = tids.index(tid)\n rval = losses[idx], tid, vals[idx]\n break\n else:\n # -- choose a new best idx\n ltvs = sorted(zip(losses, tids, vals))\n best_idx = int(self.rng.geometric(1.0 / self.avg_best_idx)) - 1\n best_idx = min(best_idx, len(ltvs) - 1)\n assert best_idx >= 0\n best_loss, best_tid, best_val = ltvs[best_idx]\n self.best_tids.append(best_tid)\n rval = best_loss, best_tid, best_val\n return rval", "def update_labels(self, nidxs, y):\n\n y = np.array(y, dtype=bool)\n for n, yi in zip(nidxs, y):\n self.node_labels[n] = [self.labels[i] for i, j in enumerate(yi) if j]\n\n return self", "def load_idx_to_label(dataset_name):\n if dataset_name == 'imagenet':\n path = 'https://gist.githubusercontent.com/yrevar/'\n path += '6135f1bd8dcf2e0cc683/raw/'\n path += 'd133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'\n path += '/imagenet1000_clsid_to_human.pkl'\n idx_to_label = pickle.load(urllib.request.urlopen(path))\n \n elif dataset_name == 'indoor_scenes':\n label_to_idx = {'airport_inside': 0,\n 'bar': 1,\n 'bedroom': 2,\n 'casino': 3,\n 'inside_subway': 4,\n 'kitchen': 5,\n 'livingroom': 6,\n 'restaurant': 7,\n 'subway': 8,\n 'warehouse': 9}\n idx_to_label = {idx: label for label, idx in label_to_idx.items()}\n \n elif dataset_name == 'pubfig10':\n celebs = ['Aaron-Eckhart', 'Adriana-Lima',\n 'Angela-Merkel', 'Beyonce-Knowles', \n 'Brad-Pitt', 'Clive-Owen', \n 'Drew-Barrymore', 'Milla-Jovovich', \n 'Quincy-Jones', 'Shahrukh-Khan']\n idx_to_label = { i: celebs[i] for i in range(len(celebs)) }\n\n elif dataset_name == 'pubfig83':\n celebs = ['adam-sandler', 'alex-baldwin', 'angelina-jolie', 'anna-kournikova', 'ashton-kutcher', 'avril-lavigne',\n 'barack-obama', 'ben-affleck', 'beyonce-knowles', 'brad-pitt', 'cameron-diaz', 'cate-blanchett', 'charlize-theron',\n 'christina-ricci', 'claudia-schiffer', 'clive-owen', 'colin-farell', 'colin-powell', 'cristiano-ronaldo', 'daniel-craig',\n 'daniel-radcliffe', 'david-beckham', 'david-duchovny', 'denise-richards', 'drew-barrymore', 'dustin-hoffman', 'ehud-olmert',\n 'eva-mendes', 'faith-hill', 'george-clooney', 'gordon-brown', 'gwyneth-paltrow', 'halle-berry', 'harrison-ford',\n 'hugh-jackman', 'hugh-laurie', 'jack-nicholson', 'jennifer-aniston', 'jennifer-lopez', 'jennifer-lovehewitt',\n 'jessica-alba', 'jessica-simpson', 'joaquin-phoenix', 'john-travolta', 'julia-roberts', 'jula-stiles', 'kate-moss',\n 'kate-winslet', 'katherine-heigl', 'keira-knightley', 'kiefer-sutherland', 'leonardo-dicaprio', 'lindsay-lohan', 'mariah-carey',\n 'martha-stewart', 'matt-damon', 'meg-ryan', 'meryl-streep', 'michael-bloomberg', 'mickey-rourke', 'miley-cyrus',\n 'morgan-freeman', 'nicole-kidman', 'nicole-richie', 'orlando-bloom', 'reese-witherspoon', 'renee-zellweger', 'ricky-martin',\n 'robert-gates', 'sania-mirza', 'scarlett-johansson', 'shahrukh-khan', 'shakira', 'sharon-stone', 'silvio-berlusconi',\n 'stephen-colbert', 'steve-carell', 'tom-cruise', 'uma-thurman', 'victoria-beckham', 'viggo-mortensen', 'will-smith', 'zac-efron']\n idx_to_label = { i: celebs[i] for i in range(len(celebs)) }\n\n elif dataset_name == 'vggface2':\n path = \"../utils/vggface2_80_to_complete.pkl\"\n with open(path, 'rb') as file:\n idx_to_label = pickle.load(file)\n\n else:\n raise NotImplementedError\n \n return idx_to_label", "def test_label_anonymizing(self):\n class User(Base):\n @property\n def prop_score(self):\n return sum([tag.prop_score for tag in self.tags])\n\n class Tag(Base):\n @property\n def prop_score(self):\n return self.score1 * self.score2\n \n for labeled, labelname in [(True, 'score'), (True, None), (False, None)]:\n clear_mappers()\n \n tag_score = (tags_table.c.score1 * tags_table.c.score2)\n user_score = select([func.sum(tags_table.c.score1 *\n tags_table.c.score2)],\n tags_table.c.user_id == users_table.c.id)\n \n if labeled:\n tag_score = tag_score.label(labelname)\n user_score = user_score.label(labelname)\n else:\n user_score = user_score.as_scalar()\n \n mapper(Tag, tags_table, properties={\n 'query_score': column_property(tag_score),\n })\n\n\n mapper(User, users_table, properties={\n 'tags': relation(Tag, backref='user', lazy=False), \n 'query_score': column_property(user_score),\n })\n\n session = create_session()\n session.save(User(name='joe', tags=[Tag(score1=5.0, score2=3.0), Tag(score1=55.0, score2=1.0)]))\n session.save(User(name='bar', tags=[Tag(score1=5.0, score2=4.0), Tag(score1=50.0, score2=1.0), Tag(score1=15.0, score2=2.0)]))\n session.flush()\n session.clear()\n\n def go():\n for user in session.query(User).all():\n self.assertEquals(user.query_score, user.prop_score)\n self.assert_sql_count(testing.db, go, 1)\n\n\n # fails for non labeled (fixed in 0.5):\n if labeled:\n def go():\n u = session.query(User).filter_by(name='joe').one()\n self.assertEquals(u.query_score, u.prop_score)\n self.assert_sql_count(testing.db, go, 1)\n else:\n u = session.query(User).filter_by(name='joe').one()\n self.assertEquals(u.query_score, u.prop_score)\n \n for t in (tags_table, users_table):\n t.delete().execute()", "def test_dbscan_feature():\n # Parameters chosen specifically for this task.\n # Different eps to other test, because distance is not normalised.\n eps = 0.8\n min_samples = 10\n metric = 'euclidean'\n # Compute DBSCAN\n # parameters chosen for task\n core_samples, labels = dbscan(X, metric=metric,\n eps=eps, min_samples=min_samples)\n\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_1, n_clusters)\n\n db = DBSCAN(metric=metric)\n labels = db.fit(X, eps=eps, min_samples=min_samples).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_2, n_clusters)", "def parse_first_database(db, percentage_ids, alignment_lengths):\n #@@@ Try blast parser object\n results = MinimalBlastParser9(db)\n\n #@@@ cogent.util.transform.cartesian_product\n options = [(p,a) for p in percentage_ids for a in alignment_lengths]\n\n best_hits = {}\n for total_queries, (metadata, hits) in enumerate(results):\n fields = [i.strip() for i in metadata['FIELDS'].split(',')]\n name = metadata['QUERY']\n percentage_id = fields.index('% identity')\n bit_score = fields.index('bit score')\n alg_length = fields.index('alignment length')\n evalue = fields.index('e-value')\n subject_id = fields.index('Subject id')\n\n if not hits: \n continue\n\n best_hits[name] = []\n for p,a in options:\n # best bit score\n bbs = 0\n result = None\n\n for h in hits:\n h[percentage_id] = float(h[percentage_id])\n h[alg_length] = float(h[alg_length])\n h[bit_score] = float(h[bit_score])\n\n if h[percentage_id]>=p and h[alg_length]>=a and h[bit_score]>bbs:\n result = { 'a': { 'subject_id': h[subject_id],\n 'percentage_id': h[percentage_id],\n 'bit_score': h[bit_score],\n 'alg_length': int(h[alg_length]),\n 'evalue': float(h[evalue]) },\n 'b': { 'subject_id': None, \n 'bit_score': -1 } }\n bbs = h[bit_score]\n best_hits[name].append(result)\n\n return total_queries+1, best_hits", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n\n pool = Subset(train_data, subsample_idx)\n labelled = Subset(train_data, labelled_idx)\n # get ranking of datapoints\n idx_to_add = self.score(model, pool, labelled)\n\n # choose top scoring datapoints to label\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n\n return new_labelled_idx, new_unlabelled_idx", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n \n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get ranking of unlabelled datapoints from batchBALD\n ranking = self.score(model, pool)\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = ranking[:num_query] # take in order given\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def read_labelmap_vidor(labelmap_file):\n\n labelmap = []\n class_ids = set()\n name = \"\"\n class_id = \"\"\n\n with open('idx_to_pred.pkl', 'rb') as f:\n idx_to_pred = pickle.load(f)\n\n # with PathManager.open(labelmap_file, \"r\") as f:\n # import pdb; pdb.set_trace()\n # for line in f:\n # if line.startswith(\" name:\"):\n # name = line.split('\"')[1]\n # elif line.startswith(\" id:\") or line.startswith(\" label_id:\"):\n # class_id = int(line.strip().split(\" \")[-1])\n # labelmap.append({\"id\": class_id, \"name\": name})\n # class_ids.add(class_id)\n # return labelmap, class_ids\n\n \"\"\"\n (Pdb) categories\n [{'id': 1, 'name': 'bend/bow (at the waist)'}, {'id': 3, 'name': 'crouch/kneel'}, {'id': 4, 'name': 'dance'}, {'id': 5, 'name': 'fall down'}, {'id': 6, 'name': 'get up'}, {'id': 7, 'name': 'jump/leap'}, {'id': 8, 'name': 'lie/sleep'}, {'id': 9, 'name': 'martial art'}, {'id': 10, 'name': 'run/jog'}, {'id': 11, 'name': 'sit'}, {'id': 12, 'name': 'stand'}, {'id': 13, 'name': 'swim'}, {'id': 14, 'name': 'walk'}, {'id': 15, 'name': 'answer phone'}, {'id': 17, 'name': 'carry/hold (an object)'}, {'id': 20, 'name': 'climb (e.g., a mountain)'}, {'id': 22, 'name': 'close (e.g., a door, a box)'}, {'id': 24, 'name': 'cut'}, {'id': 26, 'name': 'dress/put on clothing'}, {'id': 27, 'name': 'drink'}, {'id': 28, 'name': 'drive (e.g., a car, a truck)'}, {'id': 29, 'name': 'eat'}, {'id': 30, 'name': 'enter'}, {'id': 34, 'name': 'hit (an object)'}, {'id': 36, 'name': 'lift/pick up'}, {'id': 37, 'name': 'listen (e.g., to music)'}, {'id': 38, 'name': 'open (e.g., a window, a car door)'}, {'id': 41, 'name': 'play musical instrument'}, {'id': 43, 'name': 'point to (an object)'}, {'id': 45, 'name': 'pull (an object)'}, {'id': 46, 'name': 'push (an object)'}, {'id': 47, 'name': 'put down'}, {'id': 48, 'name': 'read'}, {'id': 49, 'name': 'ride (e.g., a bike, a car, a horse)'}, {'id': 51, 'name': 'sail boat'}, {'id': 52, 'name': 'shoot'}, {'id': 54, 'name': 'smoke'}, {'id': 56, 'name': 'take a photo'}, {'id': 57, 'name': 'text on/look at a cellphone'}, {'id': 58, 'name': 'throw'}, {'id': 59, 'name': 'touch (an object)'}, {'id': 60, 'name': 'turn (e.g., a screwdriver)'}, {'id': 61, 'name': 'watch (e.g., TV)'}, {'id': 62, 'name': 'work on a computer'}, {'id': 63, 'name': 'write'}, {'id': 64, 'name': 'fight/hit (a person)'}, {'id': 65, 'name': 'give/serve (an object) to (a person)'}, {'id': 66, 'name': 'grab (a person)'}, {'id': 67, 'name': 'hand clap'}, {'id': 68, 'name': 'hand shake'}, {'id': 69, 'name': 'hand wave'}, {'id': 70, 'name': 'hug (a person)'}, {'id': 72, 'name': 'kiss (a person)'}, {'id': 73, 'name': 'lift (a person)'}, {'id': 74, 'name': 'listen to (a person)'}, {'id': 76, 'name': 'push (another person)'}, {'id': 77, 'name': 'sing to (e.g., self, a person, a group)'}, {'id': 78, 'name': 'take (an object) from (a person)'}, {'id': 79, 'name': 'talk to (e.g., self, a person, a group)'}, {'id': 80, 'name': 'watch (a person)'}]\n (Pdb) class_whitelist\n {1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 20, 22, 24, 26, 27, 28, 29, 30, 34, 36, 37, 38, 41, 43, 45, 46, 47, 48, 49, 51, 52, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 79, 80}\n \"\"\"", "def label_index2node(label_index, labels):\n hi_pairs, med_pairs = labels\n if label_index < len(hi_pairs):\n return hi_pairs[label_index][0]\n else:\n error_msg = \"there is no node with label \"+str(label_index)\n assert label_index-len(hi_pairs) < len(med_pairs), error_msg\n return med_pairs[label_index-len(hi_pairs)][0]", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n # TODO get some metrics on the scores/plot?\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def parse_second_database(db, best_hits, percentage_ids_other,\n alignment_lengths_other):\n results = MinimalBlastParser9(db)\n\n #@@@ create function to return results\n for metadata, hits in results:\n fields = [i.strip() for i in metadata['FIELDS'].split(',')]\n name = metadata['QUERY']\n percentage_id = fields.index('% identity')\n bit_score = fields.index('bit score')\n alg_length = fields.index('alignment length')\n evalue = fields.index('e-value')\n subject_id = fields.index('Subject id')\n\n if name in best_hits:\n for i,(p,a) in enumerate([(p,a) for p in percentage_ids_other \\\n for a in alignment_lengths_other]):\n if not best_hits[name][i]:\n continue\n\n # best bit score\n bbs = 0\n result = None\n for h in hits:\n h[percentage_id] = float(h[percentage_id])\n h[alg_length] = float(h[alg_length])\n h[bit_score] = float(h[bit_score]) \n if h[percentage_id]>=p and h[alg_length]>=a and h[bit_score]>bbs:\n result = { 'subject_id': h[subject_id],\n 'percentage_id': h[percentage_id],\n 'bit_score': h[bit_score],\n 'alg_length': int(h[alg_length]),\n 'evalue': float(h[evalue]) }\n bbs = h[bit_score]\n if result:\n best_hits[name][i]['b'] = result", "def find_label_by_id(self, _id):\n search = True\n i = 0\n while search:\n if i == len(self.labels):\n break;\n\n if self.labels[i].id == _id:\n return self.labels[i]\n search = False\n #print self.labels[i].id\n i += 1\n if search:\n return None", "def set_label(self, labels_set=None):\n for pos in labels_set:\n self._q_bnn_circ.x(self.outputs[int(pos)])", "def put(self, id):\n context = request.environ.get('context')\n resp = dbapi.netdevices_labels_update(context, id, request.json)\n response = {\"labels\": list(resp.labels)}\n return response, 200, None", "def get_query(self, model, train_data, labelled_idx, unlabelled_idx):\n self.num_steps += 1\n # if this is the first step, then just return the seed set\n if self.num_steps == 1:\n return labelled_idx, unlabelled_idx\n\n if self.num_subsample is not None:\n num_subsample = min(self.num_subsample, len(unlabelled_idx))\n subsample_idx = random.sample(unlabelled_idx, k=num_subsample)\n else:\n subsample_idx = unlabelled_idx\n # initialise dataloader. Loads data in order of unlabelled idx\n pool = Subset(train_data, subsample_idx)\n\n # get scores on unlabelled datapoints\n scores = self.score(model, pool)\n\n # choose top scoring datapoints to label\n num_query = min(self.num_query, len(subsample_idx))\n idx_to_add = np.argsort(scores)[-num_query:]\n new_labelled_idx = labelled_idx + [subsample_idx[i] for i in idx_to_add]\n new_unlabelled_idx = [j for j in range(len(train_data)) if j not in new_labelled_idx]\n return new_labelled_idx, new_unlabelled_idx", "def _determine_index(self, id):\n\n return bisect.bisect_left(self._max_node_ids, id)" ]
[ "0.68891954", "0.6300339", "0.58898443", "0.5635267", "0.5226151", "0.51870143", "0.51595086", "0.50911933", "0.50677323", "0.49870756", "0.4969618", "0.4949305", "0.4914531", "0.49075586", "0.48753136", "0.48677775", "0.48287308", "0.47792596", "0.47701105", "0.47633627", "0.47311106", "0.47189683", "0.47187024", "0.4717691", "0.471671", "0.47091928", "0.47000366", "0.46964186", "0.46963125", "0.46918386" ]
0.73168194
0
Given an index database db, a list of labels (one for each id), and a list of ids to train on, computes the optimal split value. It modifies this node to have the optimal split type and value, and then returns the quality of the split as computed by the split_cost function. If features != None, it is a list of available feature indices to use in this split, or a function of 0 arguments that can be called to get a list of features.
def pick_best_split(self,db,labels,ids,features=None): idlabels = [labels[id] for id in ids] if misclassification_error(idlabels) == 0: #base case: no misclassifications self.type = 'v' self.value = idlabels[0] return 0 best = None bestCost = 0 splitval = None discrete = True if features == None: if len(ids) < db.numFeatures(): #look at all present features in the training set features = db.getPresentFeatures(ids) #print len(features),"of",db.numFeatures(),"features selected" else: features = range(db.numFeatures()) elif callable(features): features = features() for i in features: if len(db.entryLists[i]) == 0: continue idiscrete = db.discreteFeature[i] if idiscrete: #count number of labels of a certain value splitter = defaultdict(lambda:defaultdict(int)) #count of labels for missing values nmissing = defaultdict(int) for id in ids: val = db[i,id] if val is None: #missing values go down to all splits nmissing[labels[id]] += 1 continue splitter[val][labels[id]] += 1 if len(splitter) > continuous_variable_threshold: #print "Determined to be a continuous variable" idiscrete = False break if idiscrete: if len(splitter) <= 1: #only a single value continue #count number of missing values in all splits cmax = 0 for k in splitter: for l,v in nmissing.iteritems(): splitter[k][l] += v cmax = max(cmax,sum(splitter[k].values())) #shrink by fraction of (# of ids - largest child)/(# of ids) scale = (1.0-float(cmax)/float(len(ids)))*len(splitter) #evaluate cost cost = split_cost(splitter.values())*scale #print "Split on",i,"information gain",-cost,splitter.values() else: #continuous, need to learn the best split vals = [] presentlabels = [] nonelabels = [] for id in ids: val = db[i,id] if val is None: nonelabels.append(labels[id]) continue vals.append(val) presentlabels.append(labels[id]) if len(vals) <= 1: print "No values for feature",i,"?" print vals continue #print "Considering continuous split on",i s,cost = best_split(vals,presentlabels,nonelabels) scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2 cost *= scale #print "Result",s,"Information gain",-cost if cost < bestCost: best = i bestCost = cost discrete = idiscrete if not idiscrete: splitval = s if best is None: self.type = 'v' if len(ids) > 0: self.value = vote(idlabels) return misclassification_error(idlabels) else: self.value = None return 0 else: self.feature = best #discrete or inequality split if discrete: self.type = 's' else: self.type = 'i' self.value = splitval return bestCost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def __find_best_split(self, x, y):\n data = np.transpose(np.vstack((np.transpose(x), y)))\n num_features = data.shape[1] - 1\n\n # initialise splitting rule components\n integer_splitting_rule = None\n feature_index_to_split = None\n max_info_gain = 0\n\n # iterate over all the features and find best splits within these\n for feature in range(num_features):\n info_gain, split_int = self.__find_best_split_in_feature(\n data[:, [feature, -1]])\n if info_gain is None:\n continue\n # update max info gain so far as it iterates over features\n if info_gain > max_info_gain:\n max_info_gain = info_gain\n feature_index_to_split = feature\n integer_splitting_rule = int(split_int)\n\n return feature_index_to_split, integer_splitting_rule", "def best_split(values,labels,nonelabels=None):\n assert len(values) >= 2\n assert len(values) == len(labels)\n N = len(values)\n ilist = sorted((v,l) for (v,l) in zip(values,labels))\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for v,l in ilist:\n rightcount[l] += 1\n bestindex = -1\n bestcost = split_cost([leftcount,rightcount])\n\n cost = bestcost\n #costs = [cost]\n #print \"Split costs:\"\n for i in xrange(len(ilist)):\n v,l = ilist[i]\n rightcount[l] -= 1\n leftcount[l] += 1\n if i+1 >= len(ilist) or v == ilist[i+1][0]:\n #no splits when v is equal to the next value\n continue\n cost = split_cost([leftcount,rightcount])\n #print \" \",v,leftcount.values(),rightcount.values(),cost\n #costs.append(cost)\n if cost < bestcost:\n bestcost = cost\n bestindex = i\n #raw_input()\n if bestindex < 0:\n #no split found... try splitting in half\n splitval = (ilist[0][0]+ilist[-1][0])*0.5\n else:\n splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5\n if nonelabels is None:\n return (splitval,bestcost)\n #reevaluate counts\n leftcount = defaultdict(int)\n rightcount = defaultdict(int)\n for l in nonelabels:\n leftcount[l] += 1\n rightcount[l] += 1\n for v,l in ilist:\n if v <= splitval:\n leftcount[l] += 1\n else:\n rightcount[l] += 1\n return splitval,split_cost([leftcount,rightcount])", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def best_cutoff(self,\n split_label):\n split_args = self.sub_split_args[split_label]\n split_data = self.sub_split_data[split_label]\n # This criterion for the use_scipy flag is arbitrary and needs\n # further testing\n n_unique = len(np.unique(split_data[~np.isnan(split_data)]))\n use_scipy = True\n if n_unique > len(split_data)/1000:\n use_scipy = False\n idxcut_below, effects_below, rstats_below, ndata_below =\\\n self.u_data(split_label, use_scipy=use_scipy)\n idxcut_above, effects_above, rstats_above, ndata_above =\\\n self.u_data(split_label, above=True, use_scipy=use_scipy)\n\n # Default cutoff is min(split_data) - 1\n cutoff = split_data[split_args[0]] - 1\n value = 0\n # If no cutoff was possible\n if len(idxcut_below) == 0 or len(idxcut_above) == 0:\n return cutoff, value\n\n # All idx_cutoffs and values for cutoffs, for debugging\n for idx in range(len(idxcut_above)):\n idxcut = idxcut_above[idx]\n if idxcut != idxcut_below[idx]:\n raise NameError('Code error, invalid split')\n value_temp = (abs(effects_above[idx] -\n effects_below[idx]) *\n rstats_above[idx] *\n rstats_below[idx] *\n min(ndata_above[idx]) *\n min(ndata_below[idx]))\n if value_temp > value:\n cutoff = (split_data[split_args[int(idxcut)]] +\n split_data[split_args[int(idxcut)+1]])/2\n value = value_temp\n return cutoff, value", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def pick_best_label(self,db,labels,ids):\n self.type = 'v'\n if len(labels) > 0:\n self.value = vote([labels[id] for id in ids])\n else:\n self.value = None\n return", "def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\titerative_based_score = 0\n\t# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used\n\tmin_number_features = int(0.15*len(train_features[0]))\n\tmax_number_features = int(0.85*len(train_features[0]))\n\n\t# min_number_features = 19\n\t# max_number_features = 20\n\n\titerative_based_selector = None\n\titerative_based_train_features_selected = None\n\titerative_based_test_features_selected = None\n\n\tfor i in range(min_number_features, max_number_features):\n\t\tprint(i)\n\t\ttemp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)\n\t\ttemp_iterative_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)\n\t\ttemp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Iterative Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > iterative_based_score:\n\t\t\titerative_based_score = temp_score\n\t\t\titerative_based_selector = temp_iterative_based_selector\n\t\t\titerative_based_train_features_selected = temp_iterative_based_train_features_selected\n\t\t\titerative_based_test_features_selected = temp_iterative_based_test_features_selected\n\n\titerative_based_mask = iterative_based_selector.get_support()\n\tprint(\"This is the iterative based mask: \")\n\tprint(iterative_based_mask)\n\n\treturn iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def feature_subset(self,node,db,labels,ids):\n return None", "def get_best_split_all(x, y) -> Tuple[int, float, float]:\n m = x.shape[1]\n col_best_gin = np.ones(shape=m)\n col_best_val = np.ones(shape=m)\n for c in range(m):\n best = 1\n best_x = 0\n for i in np.unique(x[:, c]):\n gini = Tree.split(x[:, c], y, i)\n if gini < best:\n best = gini\n best_x = i\n col_best_gin[c] = best\n col_best_val[c] = best_x\n\n # Select best feature to split on\n col_idx = np.argmin(col_best_gin)\n # Convert to bool index\n col_idx = np.array(range(x.shape[1])) == col_idx\n\n return col_idx, col_best_val[col_idx], col_best_gin[col_idx]", "def _find_split(self, X, y, n_features):\r\n splits_info = []\r\n\r\n # Select features to consider\r\n features = self._feature_selection.get_features(n_features, self._feature_prob)\r\n\r\n # Get candidate splits\r\n for feature_id in features:\r\n for split_value in compute_split_values(X[:, feature_id]):\r\n splits_info.append(\r\n compute_split_info(self._split_criterion, X, y, feature_id, split_value, self._min_samples_leaf))\r\n\r\n splits = []\r\n for split_info in splits_info:\r\n if split_info is not None:\r\n gain, feature_id, split_value = split_info\r\n split = Split(feature_id, value=split_value, gain=gain)\r\n splits.append(split)\r\n else:\r\n continue\r\n\r\n selected_split = self._split_chooser.get_split(splits)\r\n return selected_split", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def __find_best_split_in_feature(self, feature_and_class):\n\n # sort the feature and class and use changes in the class to reduce\n # number of potential split info gain calculations\n sorted_data = feature_and_class[\n feature_and_class[:, 0].astype(np.int).argsort()]\n potential_splits = self.__find_integers_with_class_change(sorted_data)\n info_gains = self.__info_gain_from_splits(potential_splits,\n sorted_data)\n\n # returning nothing in no information gains are found\n if len(info_gains) == 0:\n return None, None\n\n index = info_gains.index(max(info_gains))\n return info_gains[index], potential_splits[index]", "def find_split(x, y):\n\n # Need the starting entropy so we can measure improvement...\n start_entropy = calculate_entropy(y)\n\n # Best thus far, initialised to a dud that will be replaced immediately...\n best = {'infogain': -np.inf}\n\n # Randomly allocate the splits to be traversed (without replacement)\n feature_total = x.shape[1]\n feature_subset_count = int(np.sqrt(feature_total))\n feature_subset = np.random.permutation(feature_total)[:feature_subset_count]\n\n # Loop every possible split of every feature...\n for feature_index in feature_subset:\n for split in np.unique(x[:, feature_index]):\n\n left_indices = []\n right_indices = []\n\n # Get index of rows where x[row_index,feature_index] <= split\n for row_index,row in enumerate(x):\n left_indices.append(row_index) if x[row_index,feature_index] <= split else right_indices.append(row_index)\n\n left_ys = y[left_indices]\n right_ys = y[right_indices]\n\n nleft = len(left_ys)\n nright = len(right_ys)\n ntotal = nleft + nright\n infogain = start_entropy - (nleft / ntotal) * calculate_entropy(left_ys) - (\n nright / ntotal) * calculate_entropy(right_ys)\n\n if infogain > best['infogain']:\n best = {'feature': feature_index,\n 'split': split,\n 'infogain': infogain,\n 'left_indices': left_indices,\n 'right_indices': right_indices}\n return best", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def getBestDBScanModel ( features):\r\n\r\n\tprint(\"DBScan model\")\r\n\tmodels = []\r\n\tfor nbSamples in range( 2, len(features)//4):\r\n\t\tnbSamples *= 2\r\n\t\tfor distance in range( 1, 26):\r\n\t\t\tdistance /= 50\r\n\t\t\tmodels.append( st.getFittedDBScanModel( features, distance, nbSamples))\r\n\tbestModel, bestScore = st.getBestFittedModel( models, features)\r\n\tif not bestModel:\r\n\t\tprint(\"Regected all models\")\r\n\t\treturn False, -1\r\n\tprint(\"Score:\", bestScore)\r\n\tprint(\"Number of clusters:\", st.getNbClusters(bestModel))\r\n\tprint(\"Max distence:\", bestModel.get_params()[\"eps\"])\r\n\tprint(\"Min number of samples\", bestModel.get_params()[\"min_samples\"])\r\n\treturn bestModel, bestScore", "def get_best_split(rows):\n best_gain = 0\n best_question = None\n current_impurity = get_gini(rows)\n n_features = len(rows[0])\n\n for col in range(n_features):\n\n for row in rows:\n question = Question(col, row[col])\n true_rows, false_rows = partition(rows, question)\n\n if len(true_rows) == 0 or len(false_rows) == 0:\n break\n\n question_gain = get_info_gain(true_rows, false_rows, current_impurity)\n\n if question_gain >= best_gain:\n best_gain = question_gain\n best_question = question\n\n print(best_gain)\n print(best_question)\n return best_gain, best_question", "def _backward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = tuple(features)\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f not in k:\n continue\n candidate_features = tuple([x for x in k if x != f])\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def choose_best_feature(data_set):\n feature_size = len(data_set[0]) - 1\n base_entropy = calc_entropy(data_set)\n best_info_gain = 0.0; best_feature = -1\n for i in xrange(feature_size):\n feat_list = [eg[i] for eg in data_set]\n unique_values = set(feat_list)\n new_entropy = 0.0\n for value in unique_values:\n sub_ds = splite_dataset(data_set, i, value)\n prob = len(sub_ds) / float(len(data_set))\n new_entropy += prob * calc_entropy(sub_ds)\n info_gain = base_entropy - new_entropy\n if info_gain > best_info_gain:\n best_info_gain = info_gain\n best_feature = i\n\n return best_feature", "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinstance(labels,int):\n labelindex = labels\n labels = db.get_column(labelindex)\n else:\n assert len(labels) == len(db.entries)\n self.root = DecisionTreeNode()\n if labelindex >= 0:\n raise NotImplementedError(\"Ooops, taking out indexed label broken\")\n entries = np.delete(entries,labelindex,1)\n db = IndexedDatabase(db)\n if self.maxnodes != None:\n return self.greedy_learn_search(db,labels)\n else:\n self.deepest = 0\n return self.greedy_learn(self.root,db,labels,range(len(labels)))", "def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)", "def compute_splits(self, G, nw_name='test', train_frac=0.51, split_alg='spanning_tree', owa=True, fe_ratio=1,\n split_id=0, verbose=False):\n # Compute train/test split\n if split_alg == 'random':\n tr_E, te_E = stt.rand_split_train_test(G, train_frac)\n train_E, test_E, G, mp = pp.relabel_nodes(tr_E, te_E, G.is_directed())\n elif split_alg == 'naive':\n train_E, test_E = stt.naive_split_train_test(G, train_frac)\n elif split_alg == 'spanning_tree':\n train_E, test_E = stt.split_train_test(G, train_frac)\n elif split_alg == 'fast':\n train_E, test_E = stt.quick_split(G, train_frac)\n train_E_false, test_E_false = stt.quick_nonedges(G, train_frac, fe_ratio)\n elif split_alg == 'timestamp':\n train_E, test_E, G = stt.timestamp_split(G, train_frac)\n train_E = set(zip(train_E[:, 0], train_E[:, 1]))\n test_E = set(zip(test_E[:, 0], test_E[:, 1]))\n else:\n raise ValueError('Split alg. {} unknown!'.format(split_alg))\n\n # Compute non-edges\n if split_alg != 'fast':\n num_fe_train = len(train_E) * fe_ratio\n num_fe_test = len(test_E) * fe_ratio\n if owa:\n train_E_false, test_E_false = stt.generate_false_edges_owa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n else:\n train_E_false, test_E_false = stt.generate_false_edges_cwa(G, train_E, test_E,\n num_fe_train, num_fe_test)\n\n # Set class attributes to new values\n self.set_splits(train_E, train_E_false, test_E, test_E_false, directed=G.is_directed(), nw_name=nw_name,\n split_id=split_id, split_alg=split_alg, owa=owa, verbose=verbose)\n\n return train_E, train_E_false, test_E, test_E_false", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def find_split(self, X, y):\n choices = y.size\n if choices <= 1:\n return None, None\n\n # find the number of each option in the current node.\n options_parent = [np.sum(y == c) for c in range(self.num_outcomes)]\n\n # find the gini of current node.\n best_gini = 1.0 - sum((n / choices) ** 2 for n in options_parent)\n best_idx, best_split = None, None\n\n # loop through the features to get splits and options.\n for idx in range(self.num_features):\n splits, options = zip(*sorted(zip(X[:, idx], y)))\n\n num_left = [0] * self.num_outcomes\n num_right = options_parent.copy()\n for i in range(1, choices):\n c = options[i - 1]\n num_left[c] += 1\n num_right[c] -= 1\n gini_left = 1.0 - sum(\n (num_left[x] / i) ** 2 for x in range(self.num_outcomes)\n )\n gini_right = 1.0 - sum(\n (num_right[x] / i) ** 2 for x in range(self.num_outcomes)\n )\n\n gini = (i * gini_left + (choices - i) * gini_right) / choices\n\n if splits[i] == splits[i - 1]:\n continue\n\n if gini < best_gini:\n best_gini = gini\n best_idx = idx\n best_split = (splits[i] + splits[i - 1]) / 2\n\n return best_idx, best_split", "def load_data(dataset_str, random_split, split_sizes, random_split_seed, add_val, add_val_seed, p_val, active_learning):\n if dataset_str == 'polblogs':\n features, labels, adj = load_polblogs()\n features = sp.lil_matrix(features)\n n, d = features.shape\n idx_train = None\n idx_val = None\n idx_test = None\n else:\n features, labels, adj, n, idx_train, idx_val, idx_test = load_base_data(dataset_str)\n\n train_mask, val_mask, test_mask = get_training_masks(n, features, labels, random_split, split_sizes, random_split_seed,\n add_val, add_val_seed, p_val, idx_train, idx_val, idx_test)\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n if active_learning:\n t = adj.toarray()\n sg = list(nx.connected_component_subgraphs(nx.from_numpy_matrix(t)))\n vid_largest_graph = sg[np.argmax([nx.adjacency_matrix(g).shape[0] for g in sg])].nodes()\n adj = t[vid_largest_graph,:]; adj = adj[:, vid_largest_graph]\n return sp.csr_matrix(adj), sp.csr_matrix(features.toarray()[vid_largest_graph,:]), labels[vid_largest_graph]\n else:\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask" ]
[ "0.59855705", "0.57539624", "0.5735463", "0.57259667", "0.5551452", "0.55483556", "0.5174804", "0.5155585", "0.5107157", "0.50866544", "0.50679135", "0.5035721", "0.50326467", "0.5029078", "0.50207394", "0.4985602", "0.49575225", "0.4956578", "0.4938984", "0.49286735", "0.490701", "0.49064377", "0.4853137", "0.48498058", "0.4830959", "0.4826124", "0.4825611", "0.48152936", "0.48011008", "0.47958982" ]
0.7983181
0
Learns from a Database instance. Each entry is given a label.
def learn(self,db,labels): self.keys = db.keys[:] labelindex = -1 if isinstance(labels,str): labelindex = db.keys.index(labels) assert labelindex >= 0,"label does not exist in database keys" labels = db.get_column(labelindex) elif isinstance(labels,int): labelindex = labels labels = db.get_column(labelindex) else: assert len(labels) == len(db.entries) self.root = DecisionTreeNode() if labelindex >= 0: raise NotImplementedError("Ooops, taking out indexed label broken") entries = np.delete(entries,labelindex,1) db = IndexedDatabase(db) if self.maxnodes != None: return self.greedy_learn_search(db,labels) else: self.deepest = 0 return self.greedy_learn(self.root,db,labels,range(len(labels)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_data_to_db(labelled):\n add_query = sqlite3.connect(DB_PATH).cursor()\n add_query.execute(\n \"CREATE TABLE IF NOT EXISTS labels(text TEXT, label TEXT, score FLOAT)\")\n for entry in labelled:\n add_query.execute(\"\"\"INSERT INTO labels(text,label,score) VALUES(?,?,?)\"\"\",\n (entry))\n return", "def get_by_label(self, label, table, verbose=True):\n assert (self.connected)\n \n theId = -1\n GET_BY_LABEL_COMMAND = \"SELECT id,label FROM {0} WHERE samples.label = \\\"{1}\\\"\".format(table, label)\n \n \n self.cursor.execute(GET_BY_LABEL_COMMAND)\n \n for row in self.cursor:\n theId = row[0]\n break\n \n if verbose and theId != -1: \n print(\"Item with id {0} and label '{1}' retrieved.\".format(theId, label))\n elif verbose: \n print(\"No item in the table '{0}' with the label '{1}' was found.\".format(table, label))\n \n return int(theId)", "def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.drop(uncertain_pairs_index, axis=0, inplace=True)\n\n labels_list = []\n for index, row in to_label_raw.iterrows():\n\n print(\"\\n{0:30}\\t{1}\\n{2:30}\\t{3}\\n{4:30}\\t{5}\\n{6:30}\\t{7}\\n\".format(row.name_a, row.name_b,\n row.address_a, row.address_b,\n row.zip_a, row.zip_b,\n row.city_a, row.city_b))\n\n\n label = self.__user_input(\"Is this a match? (0/1)\")\n labels_list.append((index, label))\n\n labels_index = [index for index, label in labels_list]\n labels_values = [label for index, label in labels_list]\n\n # Create dataframe with index and labels\n add_labels = pd.Series(labels_values, index=labels_index, name='label')\n\n # Union the new training set to the full training set\n self.labeled_features = pd.concat([self.labeled_features, to_label_features], axis = 0, ignore_index=False)\n self.labeled_labels = pd.concat([self.labeled_labels, add_labels], axis = 0, ignore_index=False)\n\n return self", "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def fetchall(self, databaseName):\n pass", "def retrieve_labels(user_id: int) -> dict:\n user_label_table = dict()\n cur.execute('''SELECT USER_ID, NAME, CONTENT FROM \"labels\"''')\n rows = cur.fetchall()\n for row in rows:\n if user_id == row[0]:\n user_label_table[row[1]] = row[2]\n return user_label_table", "def get_labels():\n return if_found(dao.get_labels())", "def load_pdbbind_labels(labels_file):\n # Some complexes have labels but no PDB files. Filter these manually\n missing_pdbs = [\"1d2v\", \"1jou\", \"1s8j\", \"1cam\", \"4mlt\", \"4o7d\"]\n contents = []\n with open(labels_file) as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n else:\n # Some of the ligand-names are of form (FMN ox). Use regex\n # to merge into form (FMN-ox)\n p = re.compile('\\(([^\\)\\s]*) ([^\\)\\s]*)\\)')\n line = p.sub('(\\\\1-\\\\2)', line)\n elts = line.split()\n # Filter if missing PDB files\n if elts[0] in missing_pdbs:\n continue\n contents.append(elts)\n contents_df = pd.DataFrame(\n contents,\n columns=(\"PDB code\", \"resolution\", \"release year\", \"-logKd/Ki\", \"Kd/Ki\",\n \"ignore-this-field\", \"reference\", \"ligand name\"))\n return contents_df", "def find_percepts_to_label(backend, db_name, domain, limit=10):\n\ttimer = DebugTimer('~~~~\tfinding percepts to label')\n\tbackend._ensure_db_exists(db_name)\n\ttimer.tick('ensured db exists')\n\tpercepts = []\n\twith backend.dbs[db_name].get_session(commit=False) as session:\n\t\ttimer.tick('got session')\n\t\traw_sql = \"\"\"\n\t\t\tSELECT percept.id\n\t\t\tFROM percept\n\t\t\tWHERE NOT EXISTS (\n\t\t\t\tSELECT * FROM annotation\n\t\t\t\tWHERE annotation.domain = '{}'\n\t\t\t\tAND annotation.percept_id = percept.id\n\t\t\t)\n\t\t\tORDER BY random()\n\t\t\tLIMIT :limit;\n\t\t\"\"\".format(domain)\n\t\tcol_names = 'id'.split()\n\t\tparams = dict(limit=limit)\n\t\trows = session.execute(text(raw_sql), params=params)\n\t\ttimer.tick('executed sql')\n\t\tpercept_group = dict()\n\t\tthis_percept_id = None\n\t\tfor row in rows:\n\t\t\trowdict = dict(zip(col_names, row))\n\t\t\tpercepts.append(rowdict)\n\t\ttimer.tick('enumerated {} rows'.format(len(percepts)))\n\ttimer.tick('closed session')\n\ttimer.end()\n\treturn percepts", "def fetch_fromDB(self, searchPhrase):\n pass", "def select_node_by_label(conn, label):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes WHERE label=?\", (label,))", "def lookup(conn, language_code, graphic, phonetic, restrictions):\n c = conn.cursor()\n entry_ids = tuple(c.execute('SELECT entry_id FROM lemmas WHERE language = ? AND graphic = ? and phonetic = ?', (language_code, graphic, hiragana_to_katakana(phonetic))))\n return tuple(Lexeme(conn, language_code, entry_id, restrictions) for (entry_id,) in entry_ids)", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def _db_store(self, labels: Sequence[Tuple[int, np.ndarray]], table: str) -> None:\r\n # Labels are expected to be\r\n # [\r\n # (class, points),\r\n # (class, points)\r\n # .\r\n # .\r\n # .\r\n # ]\r\n # Where points are np.arrays\r\n # There should also always be one fish in the scene => len(labels) >= 1\r\n\r\n n_points = np.prod(labels[0][1].shape)\r\n\r\n gen = ((self.n, class_, *points.ravel().round(3)) for class_, points in labels)\r\n\r\n # First two \"?\" are for image id and class respectively, rest are for points\r\n sql_command = (\r\n f'INSERT INTO {table} VALUES {(\"?\",\"?\",*[\"?\" for i in range(n_points)])}'\r\n ).replace(\"'\", \"\")\r\n\r\n self.cursor.executemany(sql_command, gen)", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def select(self, labels):\n indexs = []\n \n for i in range(len(labels)):\n indexs.append(self.column_labels.index(labels[i]))\n new_rows = []\n for x in self.rows:\n new_row = []\n for index in indexs:\n new_row.append(x[index])\n new_rows.append(new_row)\n\n\n\n new_Table = T88ble(new_rows, labels)\n\n return new_Table", "def _get_ids_from_label(self, label):\r\n keys = self.list_keys()\r\n results = []\r\n for key in keys:\r\n if key['label'] == label:\r\n results.append(key['id'])\r\n return results", "def lookup(name, db):\n database = load(db)\n matches = [ key for key in database if name in key ]\n if len(matches):\n for name in matches:\n print(\"%s (%s)\" % (name, database[name]))\n else:\n print(\"0 results found\")", "def retrieve_from_db(self):\n pass", "def load(self, theList: DoubleLinkList):\n nextId = self.loadHeadId()\n while nextId:\n rec = self.db.selectById(self.tableName, nextId)\n theList.addNode(appendIt=True, nodeId=rec['nodeId'], childId=rec['childId'],\n label=rec['label'])\n nextId = rec['nextId']", "def acquire_label_by_name(app_label, label_name, obj=None):\n if JeevesLib.doesLabelExist(label_name):\n return JeevesLib.getLabel(label_name)\n else:\n label = JeevesLib.mkLabel(label_name, uniquify=False)\n model_name, field_name, jeeves_id = label_name.split('__')\n\n # Get the model that corresponds to the application label and\n # model name.\n # TODO: Make get_model faster?\n model = apps.get_model(app_label, model_name)\n\n # Gets the current row so we can feed it to the policy.\n # TODO: Figure out why we need the faceted value here...\n obj = model.objects.get(use_base_env=True, jeeves_id=jeeves_id)\n\n restrictor = getattr(model, 'jeeves_restrict_' + field_name)\n JeevesLib.restrict(label, lambda ctxt: restrictor(obj, ctxt), True)\n return label", "def db_values(self, db):", "def __init__(self, entries):\n # objects representing database records\n self.entries = entries", "def gracedb_add_label(gracedb_id, label):\n\n # begin GraceDB API\n client = gracedb_rest.GraceDb()\n\n # append comment to GraceDB entry\n out = client.writeLabel(gracedb_id, label)", "def load_database(self):\n # If there is already data, do not load\n if self:\n raise DatabaseError('Data already loaded!')\n\n # Gather all data from the table\n data = self.cursor.execute(\n 'SELECT unique_id, name, wins, time_stamp, '\n 'last_win FROM gungame_winners'\n )\n data = data.fetchall()\n\n # Are there no winners to add?\n if not data:\n return\n\n # Loop through all the past winners and their data\n for unique_id, name, wins, time_stamp, last_win in data:\n\n # Add the current winner to the database\n instance = self[unique_id]\n instance.name = name\n instance.wins = int(wins)\n instance.time_stamp = float(time_stamp)\n instance.last_win = float(last_win)", "def load_data(database_filepath):\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('Disasters', engine)\n X = df['message']\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n category_names = Y.columns\n return X, Y, category_names", "def makeDatabaseNamesList(n, ):", "def getLabels(df, eps=3, min_samples=100):\n #instantiate dbscan\n db = DBSCAN(eps=eps, \n min_samples=min_samples, \n metric='euclidean', \n n_jobs=-1\n )\n \n #fit and predict to data\n db.fit_predict(df[['x', 'y']])\n \n #Returns the sorted unique elements of an array\n labels_unique = np.unique(db.labels_)\n #drop the -1 labels which are unlabeled\n labels_unique = labels_unique[labels_unique != -1]\n \n \n return db.labels_, labels_unique", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def loadFromDatabase(self, verbose=True):\n #Connect to database\n conn = sqlite3.connect(self.dbname)\n c = conn.cursor()\n\n if verbose:\n print \"loadFromDatabase: Connected to database '%s' and established cursor\" % \\\n self.dbname\n\n #First, populate list of simulations\n c.execute(\"SELECT * FROM tamas\")\n propList = c.fetchall()\n self.simulations = {}\n if verbose: print \"SELECT answer:\", propList\n \n for prop in propList:\n uid = prop[0]\n pw = prop[2]\n prop = list(prop)\n prop[5] = bool(prop[5]) #sick should be a bool!\n self.simulations[uid] = TamaSimulation(uid, pw)\n self.simulations[uid].readDBValues(prop)\n \n if verbose:\n print \"self.simulations: %s\" % str(self.simulations)\n \n #Load up all items to all tamas\n c.execute(\"SELECT * FROM has\")\n hasList = c.fetchall()\n if verbose: print \"has SELECT answer:\", hasList\n for has in hasList:\n uid, name, amount = has\n for _ in range(amount):\n self.simulations[uid].inventory.append(name)\n\n #Load up all relationships\n c.execute(\"SELECT * FROM knows\")\n knowsList = c.fetchall()\n if verbose: print \"knows SELECT answer:\", knowsList\n for knows in knowsList:\n print \"knows:\", knows\n self.simulations[knows[0]].knows[knows[1]] = knows[2]" ]
[ "0.607027", "0.5603955", "0.52142614", "0.51333624", "0.51184493", "0.5110511", "0.50444543", "0.50385123", "0.50106204", "0.50006604", "0.49866673", "0.49164563", "0.48594052", "0.48593736", "0.48549092", "0.48429736", "0.4827033", "0.48192468", "0.4811291", "0.48048383", "0.47987357", "0.47962224", "0.47835875", "0.47828573", "0.4782045", "0.4780754", "0.4780069", "0.47745526", "0.4748405", "0.47475928" ]
0.6163972
0
Given a indexed database, greedily and recursively learns the split value for the subtree of the indicated node. Return value is the number of mistakes made by the decision tree. Missing values are handled properly as indicating a 'don't care' value that gets passed down to both sides of the tree.
def greedy_learn(self,node,db,labels,ids): if node.depth >= self.maxdepth or len(ids) <= self.minexamples: #terminate recursion node.pick_best_label(db,labels,ids) err = misclassification_error([labels[id] for id in ids]) if err > 0: print "Reached a leaf and had to make some sacrifices, cost",err print " depth",node.depth print " labels",[labels[id] for id in ids] return err features = self.feature_subset(node,db,labels,ids) cost = node.pick_best_split(db,labels,ids,features) #do a split if node.type == 'v': #base case: no misclassifications """ if cost>0: print "greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero" print "cost=",cost,"misclassification=",misclassification_error([labels[id] for id in ids]) print "# of ids:",len(ids) for i in ids: print "id",i,",", for k in range(db.numFeatures()): if db[k,i] != None: print k,"=",db[k,i],",", print "label",labels[i] raw_input() """ return 0 elif node.type == 's': #print "Picked feature",node.feature,"split" #do a discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in ids: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #print " split sizes:",[len(x) for x in Eids.values()] #print " None ids:",len(noneids) ids = None errors = 0 for v,vids in Eids.iteritems(): #recurse c = DecisionTreeNode(node) #print "Recursing on value",v #print " ids:",vids errors += self.greedy_learn(c,db,labels,vids+noneids) node.children[v] = c if c.depth > self.deepest: self.deepest = c.depth print "Decision tree learner: Reached node with depth",self.deepest return errors else: #do an inequality split assert node.type == 'i' #print "Picked feature",node.feature,"inequality value",node.value,"cost",cost leftids = [] rightids = [] for id in ids: if db[node.feature,id] is not None: if db[node.feature,id] <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(rightids) == len(ids) or len(leftids) == len(ids): #due to missing values, this split is useless errors = misclassification_error([labels[id] for id in ids]) print "useless split on feature",node.feature,"value",node.value,"misclassification error",errors print "Left size",len(leftids),"right size",len(rightids) raw_input() node.pick_best_label(db,labels,ids) return errors #clear memory associated with ids list del ids[:] ids = None #print "Left size",len(leftids),"right size",len(rightids) c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) #left side errors = self.greedy_learn(c1,db,labels,leftids) #right side errors += self.greedy_learn(c2,db,labels,rightids) #restore index node.children = {0:c1,1:c2} if c1.depth > self.deepest: self.deepest = c1.depth print "Decision tree learner: Reached node with depth",self.deepest return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def _split_threshold(self, node):\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # split only if min(children scores) > node.score\n force_split = False\n best_score = node.score\n else:\n # force split: just take the best (even if children are worse)\n force_split = True\n best_score = None\n\n left, right = None, None\n\n # iterate over embedding dimensions (first ones are more reliable)\n # up to max_n_vec (included), until we found an improving split\n for _vec in range(self.n_vec):\n\n # get the candidate thresholds along this dimension\n threshs = self._get_candidate_thresholds(node, _vec)\n\n # look for an improving best split along this eigenvector\n for _t in threshs:\n # compute the split\n below_thresh = self.E[node.ids, _vec] < _t\n _lids = node.ids[below_thresh]\n _rids = node.ids[np.logical_not(below_thresh)]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size\n if is_valid:\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n split_score = min(_sl, _sr)\n if best_score is None or split_score > best_score:\n # better split\n best_score = split_score\n node.has_children = True\n node.thresh = _t\n left = SpectralNode(\n _lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, _vec, score=_sr, name=node.name + \"1\")\n\n # check stopping criterion\n if node.has_children:\n # we found an improving split\n if _vec > 0 or not force_split:\n # found an improving non-forced split: stop here\n break\n\n return left, right", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def data_split(df, best_feature, info_gain_dict, dt_dict,\r\n curr_node, depth, continous = False):\r\n \r\n depth -= 1\r\n # decrease the depth count\r\n no_data = False\r\n # default flag for data check\r\n match_threshold_df = df[df[best_feature] == info_gain_dict[best_feature][0]]\r\n # subset the data if threshold is matched\r\n if not len(match_threshold_df):\r\n # no more data points\r\n no_data = True\r\n match_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n \r\n mismatch_threshold_df = df[df[best_feature] != info_gain_dict[best_feature][0]]\r\n # subset the data if there is a mismatch\r\n if not len(mismatch_threshold_df):\r\n # if no more data points\r\n no_data = True\r\n mismatch_threshold_df = df\r\n # go back to prev dataframe\r\n else:\r\n pass\r\n decision_tree(match_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"equal\", depth=depth, no_data = no_data)\r\n # function call to grow tree on the left side\r\n decision_tree(mismatch_threshold_df, dt_dict, curr_node, best_feature,\r\n align_dir = \"not_equal\", depth=depth, no_data = no_data)\r\n # function call to grow the tree on the right side\r", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)", "def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertAlmostEqual(self.decision_tree.get_root_node().node_split.criterion_value,\n 2. * -0.3 * math.log2(0.3) - 0.4 * math.log2(0.4))", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def leafScore(self) :\n return 0", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def split_count(self) -> int:\n return int(self.graph_tuple_stats.split_count or 0)", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def testTreeF(node, test):\n total = len(test)\n success = 0\n for d in test:\n i = searchTreeF(node, d)\n if i == d[-1]:\n success += 1\n return success / total", "def get_next_split ( self, feature_matrix: np.ndarray, target_array: np.ndarray, tree_split: TreeSplits):\n # If only 1 y value, make a leaf node\n if len ( set ( target_array ) ) == 1:\n tree_split.updateTreeValues (\n feature_column = None,\n feature_value = None,\n node_type = None,\n nodes = {},\n children = target_array,\n )\n return tree_split\n\n # Get the presplit entropy\n presplit_entropy = self.evaluate_function ( target_array )\n\n column_values = {}\n for k, v in self.map_column_node_type.items():\n # If there's only one value in feature matrix \"X\", set the split value to infinity\n if len ( set ( feature_matrix [ :, k ] ) ) == 1:\n value = np.inf\n split = None\n class_ratios = 1\n elif v == \"continuous\":\n # Get the best possible continuous split for the column\n split, value, class_ratios = self.get_optimal_continuous_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n else:\n # Get the split value for the discrete column\n value, class_ratios = self.get_optimal_discrete_feature_split (\n feature_matrix = feature_matrix, target_array = target_array, feature_column = k\n )\n split = None\n\n column_values [ k ] = ( split, value, class_ratios )\n\n # Get the column with the largest gain ratio\n col_idx_with_min_value = max (\n column_values,\n key = lambda x: ( presplit_entropy - column_values.get ( x ) [ 1 ] )\n / column_values.get ( x ) [ 2 ],\n )\n\n # If stopping criteria are met or all splits are infinite, terminate the process\n if (\n self.early_stopping_comparison (\n column_values.get ( col_idx_with_min_value ) [ 1 ], self.early_stopping_value\n )\n ) or not np.isfinite ( column_values.get ( col_idx_with_min_value ) [ 1 ] ):\n self.get_terminal_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n node = tree_split,\n feature_matrix = feature_matrix ,\n target_array = target_array,\n )\n return tree_split\n\n # If the best split is continuous, add a continuous node\n if self.map_column_node_type.get ( col_idx_with_min_value ) == \"continuous\":\n return self.get_continuous_node (\n feature_column = col_idx_with_min_value,\n feature_value = column_values [col_idx_with_min_value ] [ 0 ],\n feature_matrix = feature_matrix,\n target_array = target_array,\n node = tree_split,\n )\n\n # Otherwise, add a discrete node.\n else:\n return self.get_discrete_node (\n feature_matrix = feature_matrix,\n target_array = target_array,\n feature_value = column_values [ col_idx_with_min_value ] [ 0 ],\n feature_column = col_idx_with_min_value,\n node = tree_split,\n )\n # End get_next_split", "def test_gain_ratio(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)", "def test_gain_ratio(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def _fetch(tree, impurity_crit, dataSet, saved):\n\t\tif tree.cut_off is None:\n\t\t\treturn len(dataSet)*impurity_crit(dataSet), 1\n\n\t\telse:\n\t\t\tD1, D2 = DecisionTree._binarySplit(dataSet, *tree.cut_off)\n\t\t\tleft_impurity, left_leaves = DecisionTree._fetch(tree.left, impurity_crit, D1, saved)\n\t\t\tright_impurity, right_leaves = DecisionTree._fetch(tree.right, impurity_crit, D2, saved)\n\n\t\t\t# find node and set\n\t\t\tsaved.setdefault('node',[]).append(tree)\n\t\t\tsaved.setdefault('set', []).append(dataSet)\n\t\t\t# calculate g(t) for current TreeNode\n\t\t\tg = (len(dataSet)*impurity_crit(dataSet)-left_impurity-right_impurity) / \\\n\t\t\t\t(left_leaves + right_leaves - 1)\n\t\t\tsaved.setdefault('G',[]).append(g)\n\t\t\t\n\t\treturn left_impurity+right_impurity, left_leaves+right_leaves", "def test_depth_returns_correct_value_left_balanced_tree(bst_all_to_left):\n assert bst_all_to_left.depth() == 3", "def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 1.0)", "def __deep_count_errors(node, testSet, res):\n if node.results is not None: #Check if this node is a leaf node\n return __count_errors(node, testSet, res) #If so, return the test set classification errors made by this node.\n else:\n tbSet = testSet[testSet[node.col] >= node.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[node.col] < node.value] #find which test observations belong to this tree's false branch\n \n if node.tb.results is None: #Check if the true branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term1 = __deep_count_errors(node.tb, tbSet, res)\n else: #If the true branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term1 = __count_errors(node.tb, tbSet,res)\n if node.fb.results is None: #Check if the false branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term2 = __deep_count_errors(node.fb, fbSet, res)\n else: #If the false branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term2 = __count_errors(node.fb, fbSet, res) \n return term1 + term2 #Sum the classification errors made by this nodes descendant leaves.", "def findLeftContext(tree, start, ignore):\t\n nrOfClosingBrs = 0\n nrOfOpeningBrs = 0\n firstPass = True\n for currentIndex in range(start-1,-1,-1):\n if tree[currentIndex].symbol in ignore:\n continue\n elif tree[currentIndex].symbol == \"[\":\n if not firstPass:\n nrOfOpeningBrs = nrOfOpeningBrs + 1\n elif tree[currentIndex].symbol == \"]\":\n nrOfClosingBrs = nrOfClosingBrs + 1\n elif nrOfClosingBrs == nrOfOpeningBrs:\n return(tree[currentIndex])\n firstPass = False\n return(emptyModule())", "def test_tree_4_nodes_left_unbalanced_return_1(balanced_3_nodes):\n balanced_3_nodes.insert(8)\n assert balanced_3_nodes.balance() == 1", "def lmbd(self, lamb):\n\t n = self.nodes\n\n\t \t# The top_k_nodes is a list of all nodes in descending\n\t # order of influence\n\t top_k_nodes = self.top_k(self.nodes)\n\t for i in range(n):\n\t\t\tself.deactivate_all()\n\t\t\tinitially_active = top_k_nodes[:i]\n\n\t\t\ttotal_contrib = i + 1\n\t\t\tfor node in initially_active:\n\t\t\t\ttotal_contrib += self.v(node)\n\n\t\t\tcoverage = total_contrib*1.00/n\n\t\t\tif coverage >= lamb:\n\t\t\t\treturn top_k_nodes[:i]", "def decision_tree(df, dt_dict, curr_node,\r\n prev_attr = None, align_dir = None,\r\n depth = -1, no_data = False,\r\n ensemble = None):\r\n \r\n class_count = get_class_count(df)\r\n # get the class label counts for the given dataframe\r\n leaf_node_bool = check_leaf_node(df)\r\n # this function helps to check if we have a leaf node\r\n if leaf_node_bool:\r\n # if its leaf node\r\n curr_node[align_dir] = df['class'].values[0]\r\n # assign the leaf node value\r\n elif no_data:\r\n # if we are out of data points\r\n class_counts = df['class'].value_counts()\r\n # get the class counts\r\n curr_node[align_dir] = np.argmax(class_counts)\r\n # assign the majority class of prev node\r\n else:\r\n entropy_values_series = impurity.entropy_calc(df, ensemble = ensemble)\r\n # calculate the entropy values for each feature\r\n info_gain_dict = {}\r\n # empty dict for information gain\r\n for feature in entropy_values_series.index:\r\n # iterate over each features\r\n impurity.information_gain_calc(df, feature, info_gain_dict)\r\n # function call for information gain calculation\r\n for f in entropy_values_series.index:\r\n # iterate over each feature\r\n information_gain = entropy_values_series[f] - info_gain_dict[f][1]\r\n # calculation of information gain\r\n info_gain_dict[f] = (info_gain_dict[f][0], information_gain)\r\n # update the information gain dict\r\n best_feature = sorted(info_gain_dict, key = lambda x: info_gain_dict[x][1])[-1]\r\n # get the best feature on which to be splitted.\r\n #print(best_feature)\r\n node_value = (best_feature, info_gain_dict[best_feature], class_count[0],\r\n class_count[1])\r\n # get the node value\r\n \r\n if not leaf_node_bool and align_dir:\r\n # growing the tree\r\n if depth == 0:\r\n if node_value[2] > node_value[3]:\r\n node_value = 0\r\n else:\r\n node_value = 1\r\n curr_node[align_dir] = node_value\r\n return 0\r\n else:\r\n curr_node[align_dir] = {node_value:{}}\r\n curr_node = curr_node[align_dir][node_value]\r\n else:\r\n dt_dict[node_value] = {}\r\n curr_node = dt_dict[node_value]\r\n \r\n data_split(df, best_feature, info_gain_dict, \r\n dt_dict, curr_node, depth)\r\n # function call for data split\r", "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinstance(labels,int):\n labelindex = labels\n labels = db.get_column(labelindex)\n else:\n assert len(labels) == len(db.entries)\n self.root = DecisionTreeNode()\n if labelindex >= 0:\n raise NotImplementedError(\"Ooops, taking out indexed label broken\")\n entries = np.delete(entries,labelindex,1)\n db = IndexedDatabase(db)\n if self.maxnodes != None:\n return self.greedy_learn_search(db,labels)\n else:\n self.deepest = 0\n return self.greedy_learn(self.root,db,labels,range(len(labels)))" ]
[ "0.55779785", "0.5408219", "0.5375125", "0.52919394", "0.5285259", "0.5265397", "0.523007", "0.5206214", "0.5131763", "0.5119118", "0.5102969", "0.50822806", "0.50254524", "0.50055486", "0.5000578", "0.498862", "0.49706918", "0.4968559", "0.49658915", "0.49245366", "0.48860818", "0.48827252", "0.4872405", "0.48620558", "0.48172337", "0.4815391", "0.4813557", "0.4813403", "0.4786351", "0.47734585" ]
0.5963019
0
Identifies the list of example indices that would follow the decision tree to node.
def identify_examples(self,db,labels,node): path = [] while node.parent != None: nkey = None for (k,c) in node.parent().children.iteritems(): if c is node: nkey = k break assert nkey != None path.append((node.parent(),nkey)) node = node.parent() path = path[::-1] nids = len(labels) ids = [] for id in xrange(nids): valid = True for n,ckey in path: f = n.feature val = featureMatrix[f,id] if val is None: #it's a None value, just continue on continue else: key = None if n.type == 'i': key = (0 if val <= n.value else 1) else: key = val if key != ckey: valid = False break if valid: ids.append(id) return ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def labeled_indices(self):\n return self._labeled_indices", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def inspectedIndex(self):\n if self.inspectedNodeIsVisible:\n return self.createIndex(0, 0, self._inspected_item)\n else:\n return self.rootIndex()", "def ligand_idxs(self):\n return self._ligand_idxs", "def get_indications(self):\n indications = np.zeros_like(self.predictions)\n for i in range(self.predictions.shape[0]):\n ind = np.where(self.predictions[i, :] - self.labels != 0.0)[0]\n indications[i, ind] = 1.0\n\n return indications", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]", "def output_node_ids(self):\n return [\n i\n for i in range(\n self.n_inputs + self.n_hidden,\n self.n_inputs + self.n_hidden + self.n_outputs,\n )\n ]", "def input_node_ids(self):\n return [i for i in range(self.n_inputs)]", "def _get_child_indices(self, current_index: int) -> List[int]:\n multiplier = current_index * 2\n left_index = multiplier + 1\n right_index = multiplier + 2\n\n return [left_index, right_index]", "def get_child_indices(idx: int):\n return 2 * idx + 1, 2 * idx + 2", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def index(self):\n return self._ll_tree.get_index()", "def _tree_field_indices(self):\n\n if self._tfi is not None:\n return self._tfi\n\n self.arbor._grow_tree(self)\n self._tfi = np.array([node.tree_id for node in self._tree_nodes])\n return self._tfi", "def get_relevant_indices(dataset, classes, target_classes):\n indices = []\n for i in range(len(dataset)):\n # Check if the label is in the target classes\n label_index = dataset[i][1] # ex: 3\n label_class = classes[label_index] # ex: 'cat'\n if label_class in target_classes:\n indices.append(i)\n return indices", "def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j", "def py_enumerate_list_index_target():\n target = [None]\n for target[0],k in enumerate(range(1,5)):\n print(target, k)", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def mainIndices(self):\n return self.i1, self.i2", "def hidden_node_ids(self):\n return [i for i in range(self.n_inputs, self.n_inputs + self.n_hidden)]", "def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def get_custom_indices():\n return [i for i, val in enumerate(all_topics) if val[2] == \"1\"]", "def childWellIndices(self):\n return self._wellIndices", "def all_node_ids(self):\n return [i for i in range(0, self.n_inputs + self.n_hidden + self.n_outputs)]" ]
[ "0.65589315", "0.63866156", "0.6244448", "0.6186481", "0.60354745", "0.5978617", "0.5964173", "0.5956777", "0.589274", "0.5866264", "0.58425367", "0.58398837", "0.58153033", "0.58101755", "0.58045644", "0.57759035", "0.57590544", "0.57380056", "0.57340384", "0.5718878", "0.57145804", "0.5710891", "0.5710064", "0.5708704", "0.56982917", "0.56888497", "0.56849015", "0.56807256", "0.56720734", "0.5664907" ]
0.68075645
0
Same as greedy learn, but with a maximum number of nodes. Rather than a DFS, this uses a priority queue that at each step splits the node with the maximum improvement in misclassification error. At most maxnodes are in the resulting tree, and the depth is limited to maxdepth. Returns the total number of misclassifications of the training set. There is a lowmemory mode when self.lowmem == True or self.lowmem == 'auto' and the number of saved ids at a node grows beyond a certain number (self.lowmem_threshold, 10m by default). In lowmemory mode, the subset of of examples at a given node is determined dynamically, which incurs a O(|D|d) cost per node, where d is the depth of the node. Overall this raises running time by a factor of approximately O(|D| log_2 |D|).
def greedy_learn_search(self,db,labels): queue = PriorityQueue() dolowmem = (self.lowmem == True) numidsets = 0 root_ids = range(len(labels)) queue.push((self.root,root_ids),len(labels)) numnodes = 1 deepest = 0 err = 0 while len(queue) > 0 and numnodes+2 <= self.maxnodes: #print "%d nodes, priority %d"%(numnodes,queue.nextkey()) nerr = queue.nextkey() (node,trainingset) = queue.pop() #print "Greedy learn",len(trainingset) if trainingset is None: trainingset = self.identify_examples(db,labels,node) if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples: #print " Hit depth or training set limit" node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) continue features = self.feature_subset(node,db,labels,trainingset) cost = node.pick_best_split(db,labels,trainingset,features) numidsets -= len(trainingset) #do a split if node.type == 'v': continue elif node.type == 's': #discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in trainingset: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #determine whether to switch to low-memory mode if not dolowmem and self.lowmem=='auto': for v,vids in Eids.iteritems(): numidsets += len(vids)+len(noneids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += len(Eids) #print "Split sizes",[len(v) for v in Eids.itervalues()] #print "None size",len(noneids) for v,vids in Eids.iteritems(): #print "->",len(vids),"+",len(noneids) #recurse c = DecisionTreeNode(node) node.children[v] = c err = misclassification_error([labels[id] for id in vids+noneids]) cids = (None if dolowmem else vids+noneids) queue.push((c,cids),err) if c.depth > deepest: deepest = c.depth print "Decision tree learner: Reached node with depth",deepest else: #do an inequality split assert node.type == 'i',"Got a weird type? "+str(node.type) leftids = [] rightids = [] for id in trainingset: val = db[node.feature,id] if val is not None: if val <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(leftids)==0 or len(rightids)==0: print "node feature "+str(node.feature)+" doesn't have a valid split value "+str(node.value) vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None] print "min,max of training set:",min(vals),max(vals) print "cost is",cost raw_input() assert len(leftids) > 0 and len(rightids) > 0 if not dolowmem and self.lowmem=='auto': numidsets += len(leftids) + len(rightids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += 2 c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) node.children = {0:c1,1:c2} #print "->",len(leftids) #print "->",len(rightids) err1 = misclassification_error([labels[id] for id in leftids]) err2 = misclassification_error([labels[id] for id in rightids]) if dolowmem: leftids = None rightids = None queue.push((c1,leftids),err1) queue.push((c2,rightids),err2) if c1.depth > deepest: deepest = c1.depth print "Decision tree learner: Reached node with depth",deepest #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes if len(queue) > 0: print "%d nodes remaining in queue, setting to leaves"%(len(queue),) for (node,trainingset) in queue: node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) return err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def max_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max_nodes\")", "def total_max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"total_max_node_count\")", "def num_actual_nodes(tree):\n return (tree.n_node_samples > 0).sum()", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts", "def __count_errors(node, testSet, res):\n training_results = __get_results(node) #Get a dictionary of labels and counts for the *training* data which made it to this node\n leaf_label = None #Initialize a label for this leaf\n majority_count = 0 #Initialize a variable to track the number of observations for the label with the most observations\n #Note that the steps below do not handle ties of the majority count in a nice way.\n for label, count in training_results.items(): #iterate through each pair of labels and counts from the training set\n if count > majority_count: #find the label with the highest count\n leaf_label = label #the label for the leaf is the label with the highest count\n majority_count = count #keep track of the count for the leaf_label\n \n wrong_labels = testSet[res].unique().tolist() #initialize wrong_labels to be all labels in the testSet\n if leaf_label in wrong_labels: #If the leaf label is in the list of labels for the part of the test set that got to this node\n wrong_labels.remove(leaf_label) #remove the leaf_label so that all which remains are incorrect labels\n \n wrong_count = 0 #Initialize a count of how many testSet observations will be classified incorrectly\n testCounts = testSet.groupby(res).size() #Get a series of the testSet labels and how many observations pertain to each label\n for label in wrong_labels: #Iterate over all the labels not equal to the leaf_label\n wrong_count += testCounts[label] #Sum up all of the observations with a label not equal to the leaf_label\n return wrong_count", "def n_trees(self):\n return len(self.data_kd)", "def data_flow_positive_node_count_max(self) -> Optional[int]:\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)", "def get_n_leaves(clf):\n leaves = clf.tree_.children_left == -1\n leaves = np.arange(0,clf.tree_.node_count)[leaves]\n return len(leaves)", "def num_trees(self) -> int:\n\n return len(self.nodes)", "def max_node_size(self):\n return self.max_node_capacity", "def get_n_best(self):\n pass", "def number_of_nodes(self):\n return int(self._data['number_of_nodes'])", "def prune_trivial_subtrees(self):\n num_pruned = 0\n if not self.is_leaf:\n children_classes = set()\n num_trivial_children = 0\n for child_node in self.nodes:\n num_pruned += child_node.prune_trivial_subtrees()\n if child_node.is_leaf:\n num_trivial_children += 1\n children_classes.add(child_node.most_common_int_class)\n if num_trivial_children == len(self.nodes) and len(children_classes) == 1:\n self.is_leaf = True\n num_pruned += num_trivial_children\n self.nodes = []\n return num_pruned", "def num_trees(self):\n return self._ll_tree_sequence.get_num_trees()", "def score_max_depths(graph, max_depths):\n ###TODO\n pass", "def number_of_nodes(self) -> int:\n return self.graph.number_of_nodes()", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)", "def __deep_count_errors(node, testSet, res):\n if node.results is not None: #Check if this node is a leaf node\n return __count_errors(node, testSet, res) #If so, return the test set classification errors made by this node.\n else:\n tbSet = testSet[testSet[node.col] >= node.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[node.col] < node.value] #find which test observations belong to this tree's false branch\n \n if node.tb.results is None: #Check if the true branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term1 = __deep_count_errors(node.tb, tbSet, res)\n else: #If the true branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term1 = __count_errors(node.tb, tbSet,res)\n if node.fb.results is None: #Check if the false branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term2 = __deep_count_errors(node.fb, fbSet, res)\n else: #If the false branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term2 = __count_errors(node.fb, fbSet, res) \n return term1 + term2 #Sum the classification errors made by this nodes descendant leaves.", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def min_node_count(self) -> int:\n return pulumi.get(self, \"min_node_count\")", "def essential_node_count(self) -> int:\n return sum(\n 1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS\n )", "def _num_nodes(self):\n return int(self._node_map[-1])", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten", "def fraction_mislabeled_nodes(labels, labels_pred):\n G1 = partition_indicator(labels)\n G2 = partition_indicator(labels_pred)\n\n # cost is minimized, overlap maximized\n cost_matrix = -G1.T.dot(G2)\n row_ind, col_ind = linear_sum_assignment(cost_matrix.A)\n cost = -cost_matrix[row_ind, col_ind].sum()\n\n return 1 - (cost / len(labels))", "def Nnodes(self):\n return len(self.nodes)" ]
[ "0.6156422", "0.6155967", "0.59827256", "0.5911448", "0.5853999", "0.583166", "0.5827011", "0.5772355", "0.5760408", "0.570417", "0.5690565", "0.5642428", "0.55889016", "0.556948", "0.5526726", "0.5512847", "0.5484053", "0.54723763", "0.54671174", "0.5442169", "0.54306436", "0.54249865", "0.54195684", "0.54076916", "0.5398807", "0.5395521", "0.53886503", "0.5386159", "0.5379833", "0.53706694" ]
0.62972176
0
Initializes the list. If entries is given, this initializes the entries of the list. If memoized = True, any lazy evaluated entries are saved after their first evaluation.
def __init__(self,entries=None,memoized=False): if entries is not None: self.entries = entries[:] else: self.entries = [] self.memoized = memoized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, contents=()):\n self._data = [self._Item(k, v) for k,v in contents] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def init_all_entries(self) -> bool:\n raise NotImplementedError", "def __init__(self, owner, entries=None):\n\n self.owner = owner\n # self.entries = EntriesDict({})\n self.entries = EntriesDict(self)\n\n if entries is None:\n return\n\n # self.add_entries(entries)", "def __init__(self, list_of_entry_names, screen_size, width=100, height=30, x_offset=0, y_offset=0):\n self.entry_names = list_of_entry_names\n self.screen_size = screen_size\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.main_list = ListObj(list_of_entry_names, screen_size, width=width, height=height, x_offset=x_offset,\n y_offset=y_offset)\n self.entry_value_map = dict()\n for _ in list_of_entry_names:\n self.entry_value_map[_] = 0\n self.values_list = None\n self.update_values()", "def __init__(self, contents=()):\n self. data = [ self._Item(k,v) for k,v in contents ] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def __init__(self, entries=[]):\n\n for item in entries:\n self.append(_WebObject(item))", "def _create_temp_cache(self, num_traced_tensors, num_signatures, graph):\n init_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,\n dtype=dtypes.float32,\n shape=[num_signatures])\n self._temp_cache_var[graph] = [\n init_value for _ in range(num_traced_tensors)]", "def __init__(self, items=None):\n\n if items is None:\n items = []\n self.set = dict((item, []) for item in items)\n self.heap = list(self.set.keys())\n hpq.heapify(self.heap)\n self.counter = itertools.count()", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def __init__(self, init_size=8):\n # Create a new list (used as fixed-size array) of empty linked lists\n self.buckets = [LinkedList() for _ in range(init_size)]", "def __init__(self):\n self.hashmap = [[[],[]] for _ in range(self.N)]", "def init_items(self, lazy=False):\n return []", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(next(self._iterable)) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def __init__(self, values=None):\n self.values = list()\n self.populate(values)", "def __init__(self, cache=None, num_args=None):\n self.cache = cache if cache is not None else {}\n self.num_args = num_args", "def init_cache(self):\n self.left_lane_cache = list()\n self.right_lane_cache = list()", "def __init__(self, items=None):\n # type: (Optional[List[object]]) -> _WeakList\n list.__init__(self, self._refs(items or []))", "def initialize(self):\n self.assmts = {}\n\n offset = 0\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.bit = 1 << offset\n assmts.mask = assmts.bit\n self.assmts[entry] = assmts\n offset += 1\n\n for block in self.blocks:\n block.stats = block.phis.values() + block.stats\n for stat in block.stats:\n if isinstance(stat, (PhiNode, NameAssignment)):\n stat.bit = 1 << offset\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= stat.bit\n offset += 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bound:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def test_cache_init(case, method):\n if method == \"init\":\n cache = CacheDict(case.init, cache_len=case.cache_len)\n elif method == \"assign\":\n cache = CacheDict(cache_len=case.cache_len)\n for (key, val) in case.init:\n cache[key] = val\n else:\n assert False\n\n # length is max(#entries, cache_len)\n assert cache.__len__() == case.len\n\n # make sure the first entry is the one ejected\n if case.cache_len > 1 and case.init:\n assert \"one\" in cache.keys()\n else:\n assert \"one\" not in cache.keys()", "def __init__(self, initial_length=2, resizing_factor=2):\n\n # Initialise underlying list, with no elements\n self.main_list = [None] * initial_length\n\n # Initialise variable to store number of elements inserted into\n # main_list, which will always be less than or equal to list length\n self.num_elements = 0\n\n self.resizing_factor = resizing_factor", "def initialize(self):\n self.assmts = {}\n\n bit = 1\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.mask = assmts.bit = bit\n self.assmts[entry] = assmts\n bit <<= 1\n\n for block in self.blocks:\n for stat in block.stats:\n if isinstance(stat, NameAssignment):\n stat.bit = bit\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= bit\n bit <<= 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bounded:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.values():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def __init__(self, items=[]):\n self.set = dict((item, True) for item in items)\n self.heap = self.set.keys()\n heapq.heapify(self.heap)", "def __init__(self, iterable=None):\n self._seen = set()\n self._list = []\n if iterable is not None:\n self.extend(iterable)", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable))) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def __init__(self, iterable=None):\n self.list = LinkedList()\n\n if iterable:\n for item in iterable:\n self.push(item)", "def __init__(self, inference_state, lazy_value_list):\n super(_FakeSequence, self).__init__(inference_state)\n self._lazy_value_list = lazy_value_list", "def __init__(self):\n        self.list=[]\n        self.hashmap={}\n        ", "def _populate(self):\n if not self._populated:\n logging.debug(\"Populating lazy list %d (%s)\" % (id(self), self.__class__.__name__))\n self.populate()\n self._populated = True", "def initialize(self):\r\n for cell in self.free_cell_list:\r\n cell.unlock()\r\n self.add_cell(cell)\r\n self.free_cell_list.clear()", "def init_cache(self):\n if self.cacheable:\n self._instance._cache[self.name] = {}" ]
[ "0.5535386", "0.54829353", "0.5454826", "0.53897613", "0.5307603", "0.5187028", "0.5180785", "0.514409", "0.50772923", "0.5034808", "0.5021943", "0.5018495", "0.49956906", "0.49621394", "0.49150985", "0.49012715", "0.4892579", "0.48834223", "0.48831782", "0.48757192", "0.485907", "0.48379377", "0.48367697", "0.48344493", "0.4796964", "0.47895494", "0.47819415", "0.4774125", "0.47710106", "0.47707924" ]
0.7950624
0
Given a training and testing dataset, builds a decision tree and tests it
def test_decision_tree(train,test,maxnodes=None): tree = DecisionTree() tree.maxnodes = maxnodes errors = tree.learn(train,'label') print "Decision tree makes",errors,"errors" print "Depth",tree.depth(),"nodes",tree.numNodes() if tree.numNodes() < 100: tree.pprint() if errors > 0: print "Training errors:" for id,e in enumerate(train.entries): res = tree.predict(e[:-1]) if res != e[-1]: if len(e[:-1]) > 10: print " Error on",id,"prediction",res else: print " Error on",e[:-1],"prediction",res print "Testing error:" tp,tn,fp,fn = 0,0,0,0 for e in test.entries: res = tree.predict(e[:-1]) if res and e[-1]: tp += 1 elif res and not e[-1]: fp += 1 elif not res and e[-1]: fn += 1 else: tn += 1 Ntest = len(test.entries) print "True +: %g, True -: %g"%(float(tp)/Ntest,float(tn)/Ntest) print "False -: %g, False +: %g"%(float(fn)/Ntest,float(fp)/Ntest) print "Overall error: %g"%(float(fn+fp)/Ntest,)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_and_evaluate_decision_tree(X_train, y_train, X_test, y_test):\n model = DecisionTreeClassifier(criterion='entropy')\n model.fit(X_train, y_train)\n y_pred = model.predict(X_train)\n y_heldPred = model.predict(X_test)\n acc_train = accuracy_score(y_train, y_pred)\n acc_heldOut = accuracy_score(y_test, y_heldPred)\n return acc_train, acc_heldOut", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def run_train_test(training_file, testing_file):\n\n training = parse_file(training_file)\n training = np.array(training)\n\n X_train = training[:,:4]\n Y_train = training[:,4]\n\n testing = parse_file(testing_file)\n testing = np.array(testing)\n\n X_test = testing[:,:4]\n Y_test = testing[:,4]\n\n gini_clf = DecisionTreeClassifier(random_state=0)\n gini_clf.fit(X_train, Y_train)\n gini_Y_hat = gini_clf.predict(X_test)\n gini_tp, gini_tn, gini_fp, gini_fn, gini_err = eval_results(Y_test, gini_Y_hat)\n\n entropy_clf = DecisionTreeClassifier(criterion=\"entropy\", random_state=0)\n entropy_clf.fit(X_train, Y_train)\n entropy_Y_hat = entropy_clf.predict(X_test)\n entropy_tp, entropy_tn, entropy_fp, entropy_fn, entropy_err = eval_results(Y_test, entropy_Y_hat)\n\n return {\n \"gini\":{\n 'True positives': gini_tp,\n 'True negatives': gini_tn,\n 'False positives': gini_fp,\n 'False negatives': gini_fn,\n 'Error rate': gini_err\n },\n \"entropy\":{\n 'True positives': entropy_tp,\n 'True negatives': entropy_tn,\n 'False positives': entropy_fp,\n 'False negatives': entropy_fn,\n 'Error rate': entropy_err\n }\n }", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def create_tree(f_train, f_test, l_train, l_test):\n # initialize model\n model = DecisionTreeClassifier(max_depth=2)\n\n # train it on training data\n model.fit(f_train, l_train)\n\n # gather the model's predictions for train\n train_predictions = model.predict(f_train)\n\n # gather the model's predictions for test\n test_predictions = model.predict(f_test)\n\n # calculate accuaracy of train\n print('Tree Train Accuracy: ', accuracy_score(l_train, train_predictions))\n\n # calculate accuracy of test\n print('Tree Test Accuracy: ', accuracy_score(l_test, test_predictions))\n\n return model", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def main():\n \n # 1. Learn a decision tree from the data in training.txt\n print \"--Building trees--\"\n train_examples = read_file('training.txt')\n print(train_examples)\n attrs = range(len(train_examples[0])-1)\n rand_tree = decision_tree_learning(train_examples, attrs, use_gain=False)\n gain_tree = decision_tree_learning(train_examples, attrs, use_gain=True)\n print \"--Done building--\"\n print\n\n # 2. Document the tree you got\n print \"--Random tree--\"\n print_tree(rand_tree)\n print\n print \"--Learn tree--\"\n print_tree(gain_tree)\n print\n\n # 3. Classify all examples in the test-set\n test_examples = read_file('test.txt')\n print \"--Testing random tree--\"\n test(rand_tree, test_examples, attrs)\n print\n print \"--Testing information gain tree--\"\n test(gain_tree, test_examples, attrs)\n print \"--Done testings--\"", "def train_and_evaluate_decision_stump(X_train, y_train, X_test, y_test):\n model = DecisionTreeClassifier(criterion='entropy', max_depth=4)\n model.fit(X_train, y_train)\n y_pred = model.predict(X_train)\n y_heldPred = model.predict(X_test)\n acc_train = accuracy_score(y_train, y_pred)\n acc_heldOut = accuracy_score(y_test, y_heldPred)\n return acc_train, acc_heldOut", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def main(args):\n if args.train_test_split < 0.2 or args.train_test_split > 0.8:\n print(\"Bad value for train_test_split, range is 0.2 - 0.8\")\n sys.exit()\n\n dataset = pd.read_csv(args.train_file)\n\n x_data = dataset.loc[:, (dataset.columns != args.classification_column) \\\n & (dataset.columns != \"Survey_id\")]\n y_data = dataset[args.classification_column].to_numpy()\n dataset_headers = list(x_data.columns)\n x_data = x_data.fillna(0).to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, \\\n test_size=args.train_test_split)\n\n\n dtc = DecisionTreeClassifier(max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n dtc = dtc.fit(x_train, y_train)\n dtc_score = dtc.score(x_test, y_test)\n\n\n export_graphviz(dtc, out_file=\"decision_tree.dot\", feature_names=dataset_headers, \\\n rounded=True, precision=1, filled=True)\n os.system(\"dot -Tpng decision_tree.dot -o decision_tree.png\")\n\n\n rfc = RandomForestClassifier(n_estimators=args.estimators, max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n rfc.fit(x_train, y_train)\n rfc_score = rfc.score(x_test, y_test)\n\n file = open('result.txt', 'w')\n file.write(f'Decisions tree score = {dtc_score}\\n')\n file.write(f'Random forest score = {rfc_score}\\n')\n file.close()", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([2]), set([0, 1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:1, 1:1, 2:0})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.48)", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def fit_decision_tree(model, x_train, y_train):\r\n model.fit(x_train, y_train)\r\n score = model.score(x_train, y_train)\r\n importance = model.feature_importances_\r\n return score, importance", "def test_machine_learning():", "def __init__(self, data, features, resulting_feature, criterion='entropy'):\n self.__train_data = data\n self.__features = features\n self.__resulting_feature = resulting_feature\n self.__criterion = criterion\n self.__tree = self.__built_tree(self.__train_data, self.__features, self.__resulting_feature, None)\n self.accuracy_of_previous_test = 0", "def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def decision_tree(self, min_impurity_splits = None, is_voice_data = True):\n title = \"Learning Curves (Decision Tree - voice dataset)\"\n if not is_voice_data:\n title = \"Learning Curves (Decision Tree - EEG dataset)\"\n estimators = []\n for min_impurity_split in min_impurity_splits:\n estimator = tree.DecisionTreeClassifier(criterion=\"entropy\", \\\n min_impurity_split = min_impurity_split)\n estimators.append(estimator)\n\n # set colors: r -red, g- green, b - blue, m - magenta\n colors = [(\"r\", \"g\"), (\"b\", \"m\")] \n labels = [(\"Training accuracy (unpruned tree)\", \n \"Cross-validation accuracy (unpruned tree)\"),\n (\"Training accuracy (pruned tree)\", \n \"Cross-validation accuracy (pruned tree)\")]\n \n # Cross validation with 100 iterations to get smoother mean test and train\n # score curves, each time with 30% data randomly selected as a validation set.\n cv = cross_validation.ShuffleSplit(self.X.shape[0], n_iter=100,\n test_size=0.3, random_state=42)\n self.plot_learning_curve(estimators, title, labels, colors, self.X, self.y, \\\n cv=cv, n_jobs=4)\n \n # plot validation curve\n estimator_val = tree.DecisionTreeClassifier (criterion=\"entropy\") \n param_name = \"min_impurity_split\"\n x_label = \"Number of nodes in decision tree\"\n val_title = \"Validation Curve with Decision Tree (voice dataset)\"\n params =[i/100.0 for i in range(1,50)]\n if not is_voice_data:\n val_title = \"Validation Curve with Decision Tree (EEG dataset)\"\n params = np.logspace(-0.25, 0, 50)\n number_of_nodes = []\n for param in params:\n clf = tree.DecisionTreeClassifier(criterion=\"entropy\", min_impurity_split = param)\n clf.fit(self.X, self.y)\n number_of_nodes.append(clf.tree_.node_count)\n print number_of_nodes\n self.plot_validation_curve(estimator_val, params, param_name, self.X, \n self.y, val_title, xtricks = number_of_nodes, x_label = x_label,\n cv=cv, n_jobs = 4)\n plt.show()", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"md\",\n type=int,\n help=\"maximum depth\")\n parser.add_argument(\"mls\",\n type=int,\n help=\"minimum leaf samples\")\n parser.add_argument(\"--xTrain\",\n default=\"q4xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q4yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q4xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q4yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n # create an instance of the decision tree using gini\n start = time.time()\n dt1 = DecisionTree('gini', args.md, args.mls)\n trainAcc1, testAcc1 = dt_train_test(dt1, xTrain, yTrain, xTest, yTest)\n print(\"GINI Criterion ---------------\")\n print(\"Training Acc:\", trainAcc1)\n print(\"Test Acc:\", testAcc1)\n dt = DecisionTree('entropy', args.md, args.mls)\n trainAcc, testAcc = dt_train_test(dt, xTrain, yTrain, xTest, yTest)\n print(\"Entropy Criterion ---------------\")\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n end = time.time()\n print(\"Time taken: \", end-start)", "def test_train_dataset(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n expected = [\n {'alpha': 0.6931471805599453,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 1.3},\n {'alpha': 0.9729550745276565,\n 'dim': 1,\n 'inequal': 'lt',\n 'threshold': 1.0},\n {'alpha': 0.8958797346140273,\n 'dim': 0,\n 'inequal': 'lt',\n 'threshold': 0.90000000000000002}\n ]\n self.assertEqual(classifiers, expected)", "def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def test_decision_tree_min_samples_split_parameter(params, X_train, X_test, y_train, y_test):", "def fit(self, dataSet, prune=False, validSet=None):\n\t\t\n\t\tmodel_args = self._model_complexity_args.copy()\n\t\tif prune:\n\t\t\tif type(validSet).__name__ != 'ndarray':\n\t\t\t\traise AttributeError(\"To make pruning, validation set accept 'ndarray'\\\n\t\t\t\t\t, cannot be {}!\".format(type(validSet).__name__))\n\t\t\t# get a fully-grown tree\n\t\t\tmodel_args['min_impurity_decrease'] = 0\n\t\t\tmodel_args['min_samples_split'] = 2\n\t\t\n\t\tif self._treeType == 'reg':\n\t\t\timpurity_crit = DecisionTree._MSE\n\t\telif self._treeType == 'clf':\n\t\t\timpurity_crit = DecisionTree._Gini\n\n\n\t\telse:\n\t\t\traise ValueError(\"Argument 'treeType' accept 'clf' or 'reg' only\")\n\t\tself._root = DecisionTree._createTree(dataSet, impurity_crit=impurity_crit,\n\t\t\t\t\t\t\t\t\t\t\t**model_args)\n\n\t\tprint(\"Decision Tree Generated!\")\n\n\t\tif prune:\n\t\t\tprint(\"Pruning...\")\n\t\t\ttreeSeq = {'tree':[self._root], 'alpha':[0], 'num_leaves': [self._root.leaves()]} \n\t\t\tpruned_tree = DecisionTree._prune(deepcopy(self._root), impurity_crit, dataSet, treeSeq)\n\t\t\tprint('Pruning Done: %d pruned sub tree got' % len(treeSeq['tree']))\n\t\t\tprint('choosing best subtree through validation set...')\n\t\t\tbestSubtree, error_score = DecisionTree._bestSubtree(treeSeq, impurity_crit, validSet)\n\t\t\tprint('best subtree selected with error score: {}'.format(error_score))\n\n\t\t\tself._root = bestSubtree", "def test_training(self):\n\t\tpass", "def buildDecisionTree(self, data):\n self.data = data\n self.decisionTree = self.buildTree(self.data, self.listAttributes)\n with open(\"decision_tree_model\", \"wb\") as f:\n pickle.dump(self.decisionTree, f, pickle.HIGHEST_PROTOCOL)\n return self.decisionTree", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def train_and_test(self, data):\n\n np.random.shuffle(data)\n datalist = self.unpack_data(data)\n\n logger.info('[*] 75-25 partition of datasets ...')\n\n markline1 = math.floor(0.75*(len(datalist['features'])))\n markline2 = math.floor(0.75*len(datalist['labels']))\n\n train_features = datalist['features'][:(markline1)]\n test_features = datalist['features'][(markline1):]\n \n train_labels = datalist['labels'][:(markline2)]\n test_labels = datalist['labels'][(markline2):]\n\n logger.info('[*] Training started with 75% Dataset ...')\n\n self.knn_model.fit(train_features, train_labels)\n\n logger.info('[*] Testing started with 25% Dataset ...')\n print('\\n/---------------Accuracy----------------/') \n \n accuracy = self.knn_model.score(train_features, train_labels)\n print('Test set accuracy {:.2f} %'.format(accuracy*100))\n\n if accuracy < 0.40:\n logger.warning('[-.-!] Thanks for tryin\\' but this machine ain\\'t learning.')\n\n return True", "def generate_train_test_data(data_dir = '../../att_faces'):\n\n train_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(1, 36)]\n test_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(36, 41)]\n \n true_combinations_train = generate_true_combinations(train_data)\n false_combinations_train = generate_false_combination(train_data, int(len(true_combinations_train) / len(train_data)), 10)\n \n true_combinations_test = generate_true_combinations(test_data)\n false_combinations_test = generate_false_combination(test_data, int(len(true_combinations_test) / len(test_data)), 10)\n \n return prepare_to_classifier(true_combinations_train, false_combinations_train, true_combinations_test, false_combinations_test)", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions" ]
[ "0.71724963", "0.6940502", "0.6876765", "0.6875179", "0.68419737", "0.68007624", "0.671924", "0.6693489", "0.6642988", "0.6619835", "0.657808", "0.6437057", "0.6418784", "0.63988245", "0.6392599", "0.63923985", "0.6373904", "0.63466936", "0.6346446", "0.6345242", "0.63415146", "0.6334272", "0.63155454", "0.63086283", "0.62700933", "0.6267807", "0.6244437", "0.6231396", "0.6211399", "0.6206716" ]
0.6959578
1
Test the template tag js_settings
def test_js_settings(mocker, rf): mocker.patch( "mitxpro.templatetags.js_interop.get_js_settings", return_value={"data": "value"}, ) request = rf.get("/") context = Context({"request": request}) template = Template(("{% load js_interop %}" "{% js_settings %}")) rendered_template = template.render(context) assert ( rendered_template == """<script type="text/javascript"> var SETTINGS = {"data": "value"}; </script>""" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jssettings(self):\n self.update()\n return \"var %s = %s\" % (self.js_var_settings_name,\n json.dumps(self.settings))", "def test_jssettings(self):\n settings_fullpath = os.path.join(dirs.get_main_js_dir(), \"mediabrute-settings.js\")\n \n if os.path.isfile(settings_fullpath):\n os.unlink(settings_fullpath) \n self.assertFalse(os.path.isfile(settings_fullpath))\n \n call_command(\"mediabrute_jssettings\")\n self.assertTrue(os.path.isfile(settings_fullpath))\n \n os.unlink(settings_fullpath) \n self.assertFalse(os.path.isfile(settings_fullpath))\n \n custom_filename = \"heyo.js\"\n custom_fullpath = os.path.join(dirs.get_main_js_dir(), \"heyo.js\")\n \n if os.path.isfile(custom_fullpath):\n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n call_command(\"mediabrute_jssettings\", custom_filename)\n self.assertTrue(os.path.isfile(custom_fullpath))\n \n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n custom_filename = \"heyo\"\n custom_fullpath = os.path.join(dirs.get_main_js_dir(), \"heyo.js\")\n \n if os.path.isfile(custom_fullpath):\n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))\n \n call_command(\"mediabrute_jssettings\", custom_filename)\n self.assertTrue(os.path.isfile(custom_fullpath))\n \n os.unlink(custom_fullpath) \n self.assertFalse(os.path.isfile(custom_fullpath))", "def test_js_url(self):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.STATIC_URL, \"js\"))\n \n with self.settings(MEDIABRUTE_USE_STATIC=False):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.MEDIA_URL, \"js\"))\n \n with self.settings(MEDIABRUTE_JS_URL_PATH=\"heyo/yoyo\"):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.STATIC_URL, \"heyo/yoyo\"))\n \n with self.settings(MEDIABRUTE_USE_STATIC=False, MEDIABRUTE_JS_URL_PATH=\"heyo/yoyo\"):\n self.assertEquals(dirs.get_js_url(), \"%s%s\" % (settings.MEDIA_URL, \"heyo/yoyo\"))", "def module_use_template_javascript(self):\n return False", "def module_use_template_javascript(self):\n return False", "def angular_js_tests(request):\n return locals()", "def get_js_file(self):\n return 'placeholder'", "def check_jsable_context(self, context):\n pass", "def test_js_source(self):\n actual = is_js_source(self.view)\n\n self.assertTrue(actual)", "def test_never_load_jquery_setting(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n result = find_jquery_link(NO_JQUERY)\n self.assertEqual(result, True)", "def test_default_url(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n pattern, url = process_jquery_setting()\n self.assertEqual(\n url,\n 'http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js')", "def test_media_includes_jsi18n(self):\n form = self.form_class(choices={'replacements': self.model.objects.all()})\n self.assertIn(reverse('admin:jsi18n'), form.media._js)", "def test_none_pattern(self):\n with patch_settings(LIVETRANSLATION_JQUERY=None):\n pattern, url = process_jquery_setting()\n self.assertEqual(pattern, None)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]", "def test_js(self, model_value):\n return render_to_string(\"autodot/tester.js\", dict(\n test_data = json.dumps(model_value),\n model_name=self.model_name, \n hash=self.hash))", "def test_string_pattern(self):\n with patch_settings(LIVETRANSLATION_JQUERY=u'/jquery.js'):\n pattern, url = process_jquery_setting()\n self.assertEqual(pattern, ur'<script\\s[^>]*src=\"\\/jquery\\.js\"')", "def settings(request):\n gauges = Gauge.objects.all()\n return render_to_response('dashboard/settings.js',{'gauges': gauges} )", "def test_get_mt_settings(self):\n pass", "def enable_javascript(self):\n return self._enable_javascript", "def setJavaScriptMode(self,javaScriptMode):\n self.PDFreactorConfiguration.in1[\"javaScriptMode\"] = javaScriptMode", "def test_get_cached_js(self):\n self.assertEquals(len(api.get_cached_js()), 1)", "def test_searchjs_is_available(self):\n portal = self.layer['portal']\n resreg = getattr(portal, 'portal_registry')\n from Products.CMFPlone.interfaces import IResourceRegistry\n resources_ids = resreg.collectionOfInterface(\n IResourceRegistry, prefix=\"plone.resources\").keys()\n self.assertTrue(\n 'resource-search-js' in resources_ids)", "def test_get_value_json(self):\n val = self.setting_json.get_value()\n self.assertEqual(val, {'Testing': 'good'})", "def test_get_value_json(self):\n val = self.setting_json.get_value()\n self.assertEqual(val, {'Testing': 'good'})", "def test_config(self):\n self.assertEqual(self.view.template_name, \"resources/templanguage_admin.html\")", "def test_string_url(self):\n with patch_settings(LIVETRANSLATION_JQUERY=u'/jquery.js'):\n pattern, url = process_jquery_setting()\n self.assertEqual(url, '/jquery.js')", "def get_filter():\n return render_template(\"filter_js.html\")", "def get_default_javascript():\n return [\"_static/require.js\"]" ]
[ "0.6923433", "0.6532566", "0.6531576", "0.6457588", "0.6457588", "0.6105654", "0.6031209", "0.60102826", "0.5945879", "0.5761625", "0.57324207", "0.5658938", "0.5633526", "0.5626261", "0.5626261", "0.55889726", "0.5578439", "0.5576125", "0.5567976", "0.5524449", "0.54723006", "0.5431144", "0.542574", "0.5400255", "0.5359174", "0.5359174", "0.5351283", "0.5345427", "0.53056467", "0.5301526" ]
0.8117115
0
This function computes the fundamental matrix by computing the SVD of Ax = 0 ; 8point algorithm
def computeFundamentalMatrix(pts1, pts2): A = np.empty((8, 9)) for i in range(len(pts1)-1): x1 = pts1[i][0] x2 = pts2[i][0] y1 = pts1[i][1] y2 = pts2[i][1] A[i] = np.array([x1 * x2, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, 1]) # Compute F matrix by evaluating SVD U, S, V = np.linalg.svd(A) F = V[-1].reshape(3, 3) # Constrain the F matrix to rank 2 U1, S1, V1 = np.linalg.svd(F) # print('Old S', S) # S[2] = 0 S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]]) # print('New S', S) F = np.dot(np.dot(U1, S2), V1) return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)", "def invert_L1_svd():", "def visualize_svd():", "def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return (Matrix(U), Matrix(S), Matrix(Vh))", "def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n (u,s,v)=np.linalg.svd(matrix)\n ### END YOUR CODE\n\n return u, s, v", "def estimateFundamentalMatrix(x1, x2):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n \n F = np.dot(U, np.dot(np.diag(S), V))\n return F", "def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return u, s, v", "def svd(self, X): # [5pts]\n N,D = X.shape[0],X.shape[1]\n if X.ndim == 3:\n U = np.zeros((N,N,3))\n S = np.zeros((min(N,D),3))\n V = np.zeros((D,D,3))\n for i in range(3):\n U_temp,S_temp,V_temp = np.linalg.svd(X[:,:,i],compute_uv=True, full_matrices=True,hermitian=False)\n U[:,:,i] = U_temp\n S[:,i] = S_temp\n V[:,:,i] = V_temp\n else:\n U,S,V = np.linalg.svd(X,compute_uv=True,full_matrices=True, hermitian=False)\n return U,S,V", "def svd(matrix, approach):\n\n # Getting the eigenvalues and vectors of transpose(A) * A for V and Sigma\n a = mat_multiply(transpose(matrix), matrix)\n if approach == \"qr\":\n V, sigma, iterations = qr_eig(a)\n else:\n V, sigma, iterations = eig(a)\n\n # Sorting singular values and the colums of V accordingly\n V = transpose(V)\n\n singular_values = list()\n sorted_V = list()\n\n r = 0\n for i in range(rows(sigma)):\n singular_values.append([(sigma[i][i]), i])\n if sigma[i][i] > math.exp(-8):\n r += 1\n\n singular_values.sort(key=first_item, reverse=True)\n\n sigma_r = eye(r)\n sigma_r_inv = eye(r)\n\n # Constructing the sorted U and sigma matrices\n i, j = 0, 0\n for value in singular_values:\n if value[0] > math.exp(-8):\n sorted_V.append(V[value[1]])\n sigma_r[j][j] = value[0] ** (1 / 2)\n sigma_r_inv[j][j] = 1 / (value[0] ** (1 / 2))\n j += 1\n i += 1\n\n # Constructing U by multiplying V and sigma inverse\n sorted_U = mat_multiply(mat_multiply(matrix, transpose(sorted_V)), sigma_r_inv)\n\n return (sorted_U, sigma_r, sorted_V, r, iterations)", "def eight_points_algorithm(x1, x2, normalize=True):\n N = x1.shape[1]\n\n if normalize:\n # Construct transformation matrices to normalize the coordinates\n T1 = get_normalization_matrix(x1)\n T2 = get_normalization_matrix(x2)\n\n # Normalize inputs\n x1 = T1 @ x1\n x2 = T2 @ x2\n\n # Construct matrix A encoding the constraints on x1 and x2\n A = np.stack((x2[0, :] * x1[0, :],\n x2[0, :] * x1[1, :],\n x2[0, :],\n x2[1, :] * x1[0, :],\n x2[1, :] * x1[1, :],\n x2[1, :],\n x1[0, :],\n x1[1, :],\n np.ones((N,))), 1)\n\n # Solve for f using SVD\n U, S, V = np.linalg.svd(A)\n F = V.T[:, 8].reshape(3, 3)\n\n # Enforce that rank(F)=2\n U, S, V = np.linalg.svd(F)\n S[2] = 0\n F = (U[:, :len(S)] * S) @ V\n\n # Transform F back\n if normalize:\n F = T2.T @ F @ T1\n\n return F", "def singular_decomp(A):\n # Initialization\n n, m = A.shape\n U = np.zeros((n, m), dtype='float64')\n\n # Diagonalization of A^T * A\n rot, e, V = eigen.diag(np.dot(np.transpose(A), A))\n\n # Calculate U\n U = np.dot(A, V)\n for i in range(m):\n e[i] = np.sqrt(e[i])\n U[:, i] /= e[i]\n\n return U, e, V", "def invert_L2_svd():\n print('Starting SVD inversion')\n\n pix2avevel = np.nans(ts.size)\n pix2cumdef = np.nans(ts.size)\n\n for i in np.range(ts.WIDTH):\n print('column {0}'.format(i))\n pix2date = np.zeros(ts.LENGTH, ts.DATES)\n pix2model = np.zeros(ts.LENGTH, ts.DT)\n colPix = np.zeros(ts.LENGTH, ts.IGRAMS)\n\n # concatenate same column from each interferogram into an array\n for j, ig in enumerate(ts):\n column = np.fromfile(ig.NAME, dtype=float16, size=ts.LENGTH)\n colPix[:,j] = column\n\n pix2igram = np.isfinite(colPix)\n coverage = np.fromfile(coverage) #laod DQmap\n iterPixels = np.where(coverage >= ts.igthresh)\n\n #preform pixel-by-pixel inversion\n for k, pixel in enumerate(iterPixels):\n indIG = find(pix2igram[pixel,:])==1\n indDate = unique(ts.timeIndex[indIG,:])\n dtVector = np.diff(ts.Serial(indDate)) / 365.242 #convert years to days\n\n # Set up B matrix\n B = np.zeros(len(indIG), len(dtVector))\n\n print('Done')", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def spd_pinv(a, rcond=1e-10, square_root=False, check_stability=True):\n N, _N = a.shape\n assert N == _N, \"Matrix is not square!\"\n # get the eigen-decomposition\n # w, v = np.linalg.eigh(a)\n v, w, u = np.linalg.svd(a)\n sort_index = np.argsort(w)\n w = w[sort_index]\n v = v[:,sort_index]\n # check positive-definiteness\n ev_min = w.min()\n if ev_min <= 0:\n msg = \"Matrix is not positive-definite: min ev = {0}\"\n raise IndefiniteError(msg.format(ev_min))\n # check stability of eigen-decomposition\n if check_stability:\n # XXX use a preconditioner?\n if not np.allclose(a, np.dot(v, w[:, np.newaxis] * v.T)):\n raise NumericalError(\n \"Instability in eigh (condition number={:g})\".format(\n (w.max() / w.min())))\n\n # invert the \"large enough\" part of s\n cutoff = rcond * w.max()\n for i in range(N):\n if w[i] > cutoff:\n if square_root:\n # square root of the pseudo-inverse\n w[i] = np.sqrt(1. / w[i])\n else:\n w[i] = 1. / w[i]\n else:\n w[i] = 0.\n # compute the pseudo-inverse (using broadcasting)\n res = np.real(np.dot(v, w[:, np.newaxis] * v.T))\n # check stability of pseudo-inverse\n if check_stability:\n if square_root:\n pa = np.dot(res, res)\n approx_a = np.dot(a, np.dot(pa, a))\n msg = \"Instability in square-root of pseudo-inverse\"\n else:\n approx_a = np.dot(a, np.dot(res, a))\n msg = \"Instability in pseudo-inverse\"\n if not np.allclose(a, approx_a):\n # be a bit laxist by looking at the Mean Squared Error\n mse = np.mean((a - approx_a) ** 2)\n if mse > 1e-16:\n raise NumericalError(\"{} (MSE={:g})\".format(msg, mse))\n return res", "def truncated_svd(A,k=None):", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def compact_svd(A, tol=1e-6):\n #Compute eigenvalues/vectors\n lam, V = la.eig((A.conj().T @ A))\n sig = np.sqrt(lam)\n \n #Sort results\n argB = np.argsort(sig)\n arg = []\n for i in range(0, len(argB)):\n arg.append(argB[len(argB)-1-i])\n sig = sig[arg]\n V = V[:,arg]\n #How many non-zero positive\n r = 0\n for j in range(0, len(sig)):\n if abs(sig[j]) >= tol:\n r += 1\n \n sig1 = sig[:r]\n V1 = np.array(V[:,:r])\n \n# print(np.shape(A))\n# print(np.shape(V1))\n U1 = A@V1\n U1 = U1/sig1\n \n #Return answers\n return U1, sig1, V1.conj().T\n\n raise NotImplementedError(\"Problem 1 Incomplete\")", "def nullOld(A, eps=1e-14):\n\t# Taken with gratitude from http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix\n\tu, s, vh = la.svd(A)\n\tnull_mask = (s <= eps)\n\tnull_space = scipy.compress(null_mask, vh, axis=0)\n\treturn scipy.transpose(null_space)", "def get_stain_matrix(I):", "def visualize_svd():\n A=np.array([[3,1],[1,3]])\n U,s,Vh=truncated_svd(A)\n \n twopi=np.linspace(0,2.*np.pi,360)\n one=np.reshape(np.linspace(0,1,100),(1,100))\n zeros=np.zeros((1,100))\n S=np.vstack((np.reshape(np.cos(twopi),(1,360)),np.reshape(np.sin(twopi),(1,360))))\n e1=np.vstack((zeros,one))\n e2=e1[::-1] \t\n \n s1S=Vh.dot(S)\n s1e1=Vh.dot(e1)\n s1e2=Vh.dot(e2)\n\n s2S=np.diag(s).dot(s1S)\n s2e1=np.diag(s).dot(s1e1)\n s2e2=np.diag(s).dot(s1e2)\n \n s3S=U.dot(s2S)\n s3e1=U.dot(s2e1)\n s3e2=U.dot(s2e2)\n \n \n \n \n\n \n \n plt.subplot(221)\n plt.plot(S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n \n plt.subplot(222)\n plt.plot(s1S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s1e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s1e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n plt.subplot(223)\n plt.plot(s2S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s2e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s2e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.subplot(224) \n \n plt.plot(s3S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s3e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s3e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.show()", "def test_svd(self):\n eigenvectors, eigenvalues = self.svd.run(self.test_matrix)\n\n self.assertEqual(eigenvectors.shape, (100, 100))\n self.assertEqual(eigenvalues.shape, (100,))", "def matrix_svd(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n\n # SVD each sector at a time.\n # While doing so, also keep track of a list of all singular values, as\n # well as a heap that gives the negative of the largest singular value\n # in each sector. These will be needed later when deciding how to\n # truncate the decomposition.\n svds = {}\n dims = {}\n minus_next_sings = []\n all_sings = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=self.dtype)\n s = np.empty((m,), dtype=np.float_)\n v = np.empty((m, shp[1]), dtype=self.dtype)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n u, s, v = spsla.svds(\n v, k=maxchi, return_singular_vectors=True\n )\n order = np.argsort(-s)\n u = u[:, order]\n s = s[order]\n v = v[order, :]\n else:\n u, s, v = np.linalg.svd(v, full_matrices=False)\n svd = (s, u, v)\n svds[k] = svd\n dims[k] = 0\n sings = svd[0]\n all_sings.append(sings)\n if 0 not in sings.shape:\n heapq.heappush(minus_next_sings, (-sings[0], k))\n try:\n all_sings = np.concatenate(all_sings)\n except ValueError:\n all_sings = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_sings,\n svds,\n minus_next_sings,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n svds = {k: v for k, v in svds.items() if dims[k] > 0}\n for k, v in svds.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n svds[k] = (v[0][:d], v[1][:, :d], v[2][:d, :])\n else:\n del svds[k]\n\n # Initialize U, S, V.\n d = self.dirs[0]\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=0,\n )\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=np.float_,\n invar=False,\n charge=0,\n )\n V = type(self)(\n [new_dim, self.shape[1]],\n qhape=[new_qim, self.qhape[1]],\n dirs=[d, self.dirs[1]],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=self.charge,\n )\n\n # Set the blocks of U, S and V.\n for k, v in svds.items():\n k_U = (k[0], k[0])\n S[(k[0],)] = v[0]\n U[k_U] = v[1]\n V[k] = v[2]\n\n return U, S, V, rel_err", "def compact_svd(A, tol=1e-6):\r\n eigs, vecs = la.eig(A.conj().T@A)\r\n svs = np.sqrt(eigs)\r\n #sort eigenvalues and eigenvectors accordingly\r\n sorter = list(zip(svs,vecs.T))\r\n sorter.sort(reverse=True, key=lambda tup: tup[0])\r\n svs = [x[0] for x in sorter]\r\n vecs = [x[1] for x in sorter]\r\n #find number of nonzero eigenvalues\r\n r_not = svs.count(0)\r\n r = len(svs) - r_not\r\n svs_1 = np.array(svs[:r])\r\n vecs_1 = np.array(vecs[:r])\r\n u_1 = (A@vecs_1)/svs_1\r\n\r\n return u_1, svs_1, vecs_1.conj().T", "def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return singular_values", "def fit_svd(self):\n\n # U has the eigenvectors of G.Gt as columns ()\n # S has square roots of the eigenvalues of G.Gt and Gt.G in its diagonal\n # The square roos of the eigenvalues are called singular values\n # V has the eigenvectors of Gt.G as columns ()\n # full_matrices set to false will set the Vt matrix to a shape m x n\n\n U, S, Vt = linalg.svd(self.norm_matrix, full_matrices=False)\n\n # Compute the eigenvalues\n eig_val = (S ** 2)\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = Vt[:n_components]\n print(\"The principal components have been calculated using svd\", self.components.shape)\n\n return self.components", "def cov_matrix(gx, gy, winsize, alpha):\n\n gx = edge_mirror(gx, winsize)\n gy = edge_mirror(gy, winsize)\n radius_filter = gen_gaussian(winsize)\n radius_filter = numpy.rot90(radius_filter, 2)\n\n lenth = sum(sum(radius_filter))\n\n gx = signal.convolve2d(gx, radius_filter, mode='valid')\n gy = signal.convolve2d(gy, radius_filter, mode='valid')\n\n c11 = numpy.multiply(gx, gx)\n c22 = numpy.multiply(gy, gy)\n c12 = numpy.multiply(gx, gy)\n\n\n # SVD closed form\n lambda1 = (c11 + c22 + numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n lambda2 = (c11 + c22 - numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n numer = c11 + c12 - lambda1\n denom = c22 + c12 - lambda2\n\n ev1 = numpy.zeros_like(numer)\n ev2 = numpy.zeros_like(ev1)\n\n rows, cols = numer.shape\n for r in range(rows):\n for c in range(cols):\n if abs(denom[r, c]) < _opzero:\n if abs(numer[r, c]) < _opzero:\n if abs(denom[r, c]) > abs(numer[r, c]):\n ev1[r, c] = 0\n ev2[r, c] = 1\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n theta = math.atan(-numer[r, c]/denom[r, c])\n ev1 = math.sin(theta)\n ev2 = math.cos(theta)\n\n sv1 = math.sqrt(abs(lambda1[r, c]))\n sv2 = math.sqrt(abs(lambda2[r, c]))\n p = ((sv1 * sv2 + _epsa) / lenth)**alpha\n s1 = (sv1 + 1) / (sv2 + 1)\n s2 = 1. / s1\n c11[r, c] = p * (s1 * ev2 ** 2 + s2 * ev1 ** 2)\n c22[r, c] = p * (s1 * ev1 ** 2 + s2 * ev2 ** 2)\n c12[r, c] = p * (s1 - s2) * ev1 * ev2\n\n c11 = edge_mirror(c11, winsize)\n c12 = edge_mirror(c12, winsize)\n c22 = edge_mirror(c22, winsize)\n\n return c11, c12, c22", "def normalize(self, matrix):\n eigvals, eigvecs = np.linalg.eig(matrix)\n Sdiag = np.diagonal(np.linalg.inv(eigvecs)@matrix@eigvecs)\n S12diag = Sdiag**-.5\n S12 = np.zeros((len(S12diag), len(S12diag)))\n np.fill_diagonal(S12, S12diag)\n return S12", "def Sa(self, x_surface, geom):\n\n return np.zeros((0, 0), dtype=float)", "def calculate_k_SVD(smooth_spreadsheet_matrix, k):\n U_unitary_matrix, singular_value, V_unitary_matrix = linalg.svd(smooth_spreadsheet_matrix)\n S_full_squared_matrix = np.zeros((k, k))\n np.fill_diagonal(S_full_squared_matrix, np.sqrt(singular_value[:k]))\n U_unitary_matrix = U_unitary_matrix[:, :k]\n return U_unitary_matrix, S_full_squared_matrix", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs" ]
[ "0.66048837", "0.6466162", "0.6259937", "0.6250825", "0.62505597", "0.62274474", "0.6104567", "0.6089218", "0.6025379", "0.5982765", "0.597328", "0.590215", "0.58907986", "0.58582675", "0.58575904", "0.584388", "0.58408606", "0.58376825", "0.581499", "0.58008623", "0.5792866", "0.57560784", "0.5754681", "0.573956", "0.5721776", "0.5710557", "0.56707674", "0.5659692", "0.5650968", "0.5639219" ]
0.68444854
0
Leverages the 8point algorithm and implement RANSAC algorithm to find the inliers and the best fundamental matrix
def getInlierRANSAC(pts1, pts2): # global finalFundamentalMatrix iterations = 50 threshold = 0.01 max_count = 0 n = len(pts1) finalFundamentalMatrix = np.zeros((3, 3)) for i in range(iterations): count = 0 idx = random.sample(range(n - 1), 8) left_pts = pts1[idx] right_pts = pts2[idx] F = computeFundamentalMatrix(left_pts, right_pts) left_feature_inlier = [] right_feature_inlier = [] # print("Sample index: ", len(idx)) for j in range(0, n): homogeneous_right = np.array([pts2[j, 0], pts2[j, 1], 1]) homogeneous_left = np.array([pts1[j, 0], pts1[j, 1], 1]) fit = np.dot(homogeneous_right.T, np.dot(F, homogeneous_left)) # print("Fit for iteration ", i," ", np.abs(fit)) if np.abs(fit) < threshold: left_feature_inlier.append(pts1[j]) right_feature_inlier.append(pts2[j]) count = count + 1 # print('Inlier count', count) inlier_Left = np.array(left_feature_inlier) inlier_Right = np.array(right_feature_inlier) if count > max_count: max_count = count finalFundamentalMatrix = F final_inlier_Left = inlier_Left final_inlier_Right = inlier_Right return finalFundamentalMatrix, final_inlier_Left, final_inlier_Right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ransac(data, hypothesis, metric, sample_size, num_iter, inlier_thresh):\n N,d = data.shape\n best_frac, best_hypothesis, best_mask = 0, None, None\n for i in range(num_iter):\n js = np.random.choice(N,size=sample_size,replace=False)\n hypothesis_elements = data[js,:]\n H = hypothesis(hypothesis_elements)\n badness = np.array([metric(row,H) for row in data])\n inlier_mask = (badness<inlier_thresh)\n inlier_frac = inlier_mask.mean()\n if inlier_frac>best_frac:\n best_frac, best_hypothesis, best_mask = inlier_frac,H,inlier_mask\n # print(H)\n # print(inlier_mask)\n return best_hypothesis, best_mask", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def estimate_F_ransac(corr, num_iter, inlier_thresh):\n _, inlier_mask = ransac(corr, estimate_F, sym_epipolar_dist, 8, num_iter, inlier_thresh)\n # inlier_mask = np.ones(9)\n # inlier_mask[0] = 0\n F = estimate_F(corr[inlier_mask.astype(np.bool)])\n return F", "def ransac(matches, kp1, kp2, s=4, threshold=3, maxIterations=2000, returnMatches=False, inlierRatio=0.05, ransacRatio=0.6):\n\n sizes_kp1 = [kp1[dt[0].queryIdx].size for dt in matches]\n sizes_kp2 = [kp1[dt[0].trainIdx].size for dt in matches]\n tup_matches_kp1 = [kp1[dt[0].queryIdx].pt for dt in matches]\n tup_matches_kp2 = [kp2[dt[0].trainIdx].pt for dt in matches]\n matches_kp1 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp1])\n matches_kp2 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp2])\n\n cnt_matches = len(matches)\n\n max_matches = []\n max_p1, max_p2 = [], []\n max_p1_sizes, max_p2_sizes = [], []\n max_total = 0\n\n for iter in range(maxIterations):\n # Find Homography based on random sample\n data = random.sample(matches, s)\n data_p1 = np.array([matches_kp1[dt[0].queryIdx] for dt in data])\n data_p2 = np.array([matches_kp2[dt[0].trainIdx] for dt in data])\n homography = homomat(data_p1[:, :2], data_p2[:, :2])\n\n # Find P1 projection from the homography matrix\n projected_p2 = np.dot(homography, matches_kp1.transpose())\n projected_p2 = projected_p2[0:3] / projected_p2[2] # make sure w' is 1\n projected_p2 = projected_p2.transpose()\n\n # Initialize Current Matches\n current_matches = []\n current_p1, current_p2 = [], []\n current_p1_sizes, current_p2_sizes = [], []\n current_total = 0\n\n # Check for inliers and outliers for each matches\n for i, (match) in enumerate(matches):\n # normalize the error\n error = np.linalg.norm(matches_kp2[i] - projected_p2[i])\n\n # Check for inliers\n if error < threshold:\n current_matches.append([cv.DMatch(current_total, current_total, match[0].distance)])\n current_p1.append(matches_kp1[i][0:2])\n current_p2.append(matches_kp2[i][0:2])\n current_p1_sizes.append(sizes_kp1[i])\n current_p2_sizes.append(sizes_kp2[i])\n current_total += 1\n\n # If\n if current_total > max_total and current_total >= np.round(inlierRatio*cnt_matches):\n max_matches = current_matches\n max_p1 = current_p1\n max_p2 = current_p2\n max_p1_sizes = current_p1_sizes\n max_p2_sizes = current_p2_sizes\n max_total = current_total\n\n # # we are done in case we have enough inliers\n if current_total > cnt_matches * ransacRatio:\n break\n\n\n # Re-evaluate the Homography based on the best inliers\n max_homography = homomat(np.array(max_p1), np.array(max_p2))\n\n if returnMatches:\n max_kp1 = [cv.KeyPoint(d[0], d[1], max_p1_sizes[i]) for i, d in enumerate(max_p1)]\n max_kp2 = [cv.KeyPoint(d[0], d[1], max_p2_sizes[i]) for i, d in enumerate(max_p2)]\n return max_homography, max_matches, max_kp1, max_kp2\n\n return max_homography", "def create_cands(data):\n\n best = np.zeros(data.dim+1)\n best[0:data.dim] = data.xbest\n best[data.dim] = 1-np.sum(data.xbest)\n\n # Ncand times the best value\n cp_e = np.kron(np.ones((data.Ncand, 1)), np.asmatrix(best))\n # This generates random perturbations\n # need dim+1 to account for the \"missing\" value\n r = np.random.rand(data.Ncand, data.dim+1)\n a = r < data.pertP\n idx = np.where(np.sum(a, axis=1) == 0)\n for ii in range(len(idx[0])):\n f = np.random.permutation(data.dim+1)\n a[idx[0][ii], f[0]] = True\n randnums = np.random.randn(data.Ncand, data.dim+1)\n randnums[a == False] = 0\n pv = randnums*data.sigma_stdev\n # Create new points by adding random fluctucations to best point\n new_pts = cp_e+pv\n\n # Iterative, column wise procedure to force the randomly\n # sampled point to be in [0,1]\n for ii in range(data.dim+1):\n vec_ii = new_pts[:, ii]\n adj_l = np.where(vec_ii < data.xlow)\n vec_ii[adj_l[0]] = data.xlow + (data.xlow - vec_ii[adj_l[0]])\n adj_u = np.where(vec_ii > data.xup)\n vec_ii[adj_u[0]] = data.xup - (vec_ii[adj_u[0]]-data.xup)\n stillout_u = np.where(vec_ii > data.xup)\n vec_ii[stillout_u[0]] = data.xlow\n stillout_l = np.where(vec_ii < data.xlow)\n vec_ii[stillout_l[0]] = data.xup\n new_pts[:, ii] = copy.copy(vec_ii)\n\n new_pts = new_pts / np.sum(new_pts, axis=1)\n\n cp_e = copy.copy(new_pts)\n rand_pts = np.asmatrix(np.random.uniform(0, 1, [data.Ncand, data.dim + 1]))\n cp_r = rand_pts/np.sum(rand_pts, axis=1)\n\n CandPoint = np.concatenate((cp_e, cp_r), axis=0)\n # return only data.dim candidate points\n CandPoint_out = CandPoint[:, 0:data.dim]\n\n return CandPoint_out", "def ransac(cloud_s, cloud_t, \n depth_s, depth_t,\n A_prev, b_prev,\n n_iter, n_inlier_cutoff, d_cutoff):\n import random\n n_s = len(cloud_s)\n n_t = len(cloud_t)\n n_inliers = [0] * n_iter\n# Initialization\n A_init = A_prev\n b_init = b_prev\n pred_t = A_init.dot(cloud_s.T).T + b_init\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n max_inliers = sum(inliers)\n print(\"Have \" + str(n_s) + \" features that could be inliers\")\n print(\"Starting with \" + str(max_inliers) + \" inliers\")\n for iter in range(n_iter):\n assert n_s == n_t, \"clouds not of equal size in ransac()\"\n # TODO: replace this random choice with 3 corresponding feature descriptors\n points_inds = random.sample(range(n_s), 3)\n x_vals = np.array([cloud_s[i] for i in points_inds])\n y_vals = np.array([cloud_t[i] for i in points_inds])\n\n # Using Horn 1987, Closed-form solution of absolute orientation\n # using unit quaternions.\n A_init_tmp, b_init_tmp = horn_adjust(x_vals, y_vals)\n\n # TODO: find inliers to the transformation T\n pred_t = A_init_tmp.dot(cloud_s.T).T + b_init_tmp\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n n_inliers = sum(inliers)\n\n # TODO: do we want to refit on the inliers?\n if n_inliers > max_inliers:\n A_init = A_init_tmp\n b_init = b_init_tmp\n max_inliers = n_inliers\n print(\"Adjusting A and b again!\")\n print(A_init)\n print(b_init)\n\n # TODO: are we using n_inlier_cutoff in this way? Check the paper!\n if max_inliers < n_inlier_cutoff:\n raise Exception('insufficient inliers! Want ' + str(n_inlier_cutoff) +\n ' but got ' + str(max_inliers))\n #max_index = n_inliers.index(max(n_inliers)) \n # Compute the best transformation T_star\n# TODO: actually optimize over the depth field!! using spatial.KDTree and spatial.KDTree.query\n# Need to shift depth1XYZ by our initial transformation first\n depth1XYZ = A_init.dot(depth_s.T).T + b_init\n depth2XYZ = depth_t\n tree = spatial.KDTree(depth2XYZ)\n tree_q = tree.query(depth1XYZ)\n# Keep only matches within the cutoff.\n# depth_pair_inds has indeces for depth1XYZ and depth2XYZ\n cutoff = 0.01\n depth_pair_inds = [(i,tree_q[1][i]) for i in range(len(tree_q[0]))\n if tree_q[0][i] < cutoff]\n #depth_cloud_s = np.array([depth1XYZ[k[0]] for k in depth_pair_inds])\n depth_cloud_s = np.array([depth_s[k[0]] for k in depth_pair_inds])\n depth_cloud_t = np.array([depth2XYZ[k[1]] for k in depth_pair_inds])\n\n# A_d = list(range(n_s))\n# A, b = find_argmin_T(cloud_s, cloud_t, A_d,\n# A_init, b_init)\n A_d = list(range(depth_cloud_s.shape[0]))\n A, b = find_argmin_T(depth_cloud_s, depth_cloud_t, A_d,\n A_init, b_init)\n print(\"A_init value:\")\n print(A_init)\n print(\"b_init value:\")\n print(b_init)\n \n print(\"Returning A, b\")\n print(\"A value:\")\n print(A)\n print(\"b value:\")\n print(b)\n print(\"inliers:\")\n print(max_inliers)\n return(A, b)", "def estimateFundamentalMatrix(x1, x2):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n \n F = np.dot(U, np.dot(np.diag(S), V))\n return F", "def pareto_frontier(cmrf,featlist) :\n\tQ = []\n\ttaboodict = {}\n\tnStates = len(featlist)\n\tfeat1,feat2 = featlist\n\tEaxa,Xa = cmrf.decode(feat1)\n\tEbxb,Xb = cmrf.decode(feat2)\n\tif Xa == Xb : \n\t\treturn [Xa],[(Eaxa,Ebxb)]\n\tEaxb = cmrf.score(Xb,feat1)\n\tEbxa = cmrf.score(Xa,feat2)\n\tQ.append((Xa,Xb))\n\tfrontier,frontier_energy = [],[]\n\tfrontier.extend([Xa,Xb])\n\tfrontier_energy.extend([(Eaxa,Ebxa),(Eaxb,Ebxb)])\n\ttaboodict[(Eaxa,Ebxa)] = 1;\n\ttaboodict[(Eaxb,Ebxb)] = 1;\n\twhile len(Q) > 0 :\n\t\t### Optimize \n\t\tXa,Xb = Q[0]\n\t\tQ = Q[1:] # Dequeue\n\t\tEaxb = cmrf.score(Xb,feat1)\n\t\tEbxa = cmrf.score(Xa,feat2)\t\n\t\tEaxa = cmrf.score(Xa,feat1)\n\t\tEbxb = cmrf.score(Xb,feat2)\t\n\t\tm = (Ebxa - Ebxb)/(Eaxa-Eaxb)\n\t\tif m > 0 : \n\t\t\t#stop()\n\t\t\tsys.stderr.write(\"### WARNING : Slope > 0. Cvxhull failed\")\n\t\t\treturn frontier,frontier_energy\n\t\tthetaa = -m/(1-m)\n\t\tthetab = 1/(1-m)\n\t\ttmrf = TMRF(cmrf,[thetaa,thetab],[feat1,feat2])\n\t\tXab = tmrf.decode()[1]\n\t\tEaxab = cmrf.score(Xab,feat1)\n\t\tEbxab = cmrf.score(Xab,feat2)\n\t\tif Xab != Xa and Xab != Xb and \\\n\t\t\tnot taboodict.has_key((Eaxab,Ebxab)) :\n\t\t\t# Check almost equal condition\n\t\t\tif any(map(lambda(x):almost_eq(Eaxab,x[0] or \\\n\t\t\t\talmost_eq(Ebxab,x[1])),taboodict.keys())) : \n\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\tfrontier.append(Xab)\n\t\t\tfrontier_energy.append((Eaxab,Ebxab))\n\t\t\ttaboodict[(Eaxab,Ebxab)]=1\n\t\t\tQ.extend([(Xa,Xab),(Xab,Xb)])\n\t# Calculate energy of frontier elements\t\n\treturn frontier,frontier_energy", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def fit_plane_ransac(pts, neighbors=None,z_pos=None, dist_inlier=0.05, \n min_inlier_frac=0.60, nsample=3, max_iter=100):\n n,_ = pts.shape\n ninlier,models = [],[]\n for i in range(max_iter):\n if neighbors is None:\n p = pts[np.random.choice(pts.shape[0],nsample,replace=False),:]\n else:\n p = pts[neighbors[:,i],:]\n m = fit_plane(p,z_pos)\n ds = np.abs(pts.dot(m[:3])+m[3])\n nin = np.sum(ds < dist_inlier)\n if nin/pts.shape[0] >= min_inlier_frac:\n ninlier.append(nin)\n models.append(m)\n\n if models == []:\n print (\"RANSAC plane fitting failed!\")\n return #None\n else: #refit the model to inliers:\n ninlier = np.array(ninlier)\n best_model_idx = np.argsort(-ninlier)\n n_refit, m_refit, inliers = [],[],[]\n for idx in best_model_idx[:min(10,len(best_model_idx))]:\n # re-estimate the model based on inliers:\n dists = np.abs(pts.dot(models[idx][:3])+models[idx][3])\n inlier = dists < dist_inlier\n m = fit_plane(pts[inlier,:],z_pos)\n # compute new inliers:\n d = np.abs(pts.dot(m[:3])+m[3])\n inlier = d < dist_inlier/2 # heuristic\n n_refit.append(np.sum(inlier))\n m_refit.append(m)\n inliers.append(inlier)\n best_plane = np.argmax(n_refit)\n return m_refit[best_plane],inliers[best_plane]", "def als(matrix, n_factors=8,n_iterations=15, lambda_=10):\r\n\tm, n = matrix.shape\r\n\tQ = matrix\r\n\tW = Q > 0.5\r\n\tW = W.astype(int)\r\n\tprint('X and Y randomly initialzied.')\r\n\tX = 5 * np.random.rand(m, n_factors) \r\n\tY = 5 * np.random.rand(n_factors, n)\r\n\tfor ii in range(n_iterations):\r\n\t\tfor u, Wu in enumerate(W):\r\n\t\t\tX[u] = np.linalg.solve(np.dot(Y, np.dot(np.diag(Wu), Y.T)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(Y, np.dot(np.diag(Wu), Q[u].T))).T\r\n\t\tfor i, Wi in enumerate(W.T):\r\n\t\t\tY[:,i] = np.linalg.solve(np.dot(X.T, np.dot(np.diag(Wi), X)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(X.T, np.dot(np.diag(Wi), Q[:, i])))\r\n\t\tprint('{}th iteration is completed of {}'.format(ii + 1,n_iterations))\r\n\tprediction = np.dot(X,Y)\r\n\tprint('Done.')\r\n\treturn prediction, X, Y", "def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return", "def scoreCirc_PassiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_SUHAD(gen, indi)#TODO\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5)# if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping)# if damping < 60 else 0\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n \n score = 10*r + g + 10*d\n\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results", "def ws06(adp1, adp2):\n # print sum(adp1[:3])/3. - sum(adp2[:3])/3.\n adp1 = get_matrix(adp1)\n adp2 = get_matrix(adp2)\n adp1i = np.linalg.inv(adp1)\n adp2i = np.linalg.inv(adp2)\n a = 2 ** 1.5\n b = np.dot(adp1i, adp2i)\n c = np.linalg.det(b)\n\n # if c <= 0:\n # c *= -1\n d = c ** 0.25\n up = a * d\n\n x = adp1i + adp2i\n y = np.linalg.det(x)\n # if y <= 0:\n # y *= -1\n z = y ** 0.5\n R = up / z\n return 100 * (1 - R)", "def _inexact_alm_l1(imgflt_stack,options):\n # Get basic image information and reshape input\n img_width = imgflt_stack.shape[0]\n img_height = imgflt_stack.shape[1]\n img_size = img_width* img_height\n img_3d = imgflt_stack.shape[2]\n imgflt_stack = np.reshape(imgflt_stack,(img_size, img_3d))\n options['weight'] = np.reshape(options['weight'],imgflt_stack.shape)\n\n # Matrix normalization factor\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n del temp\n\n # A is a low rank matrix that is being solved for\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n A_coeff = np.ones((1, img_3d),dtype=np.float64) # per image scaling coefficient, accounts for things like photobleaching\n A_offset = np.zeros((img_size,1),dtype=np.float64) # offset per pixel across all images\n\n # E1 is the additive error. Since the goal is determining the background signal, this is the real signal at each pixel\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n\n # Normalization factors\n ent1 = np.float64(1) # flatfield normalization\n ent2 = np.float64(10) # darkfield normalization\n\n # Weights\n weight_upd = _dct2(np.mean(np.reshape(A,(img_width, img_height, img_3d)),2))\n\n # Initialize gradient and weight normalization factors\n Y1 = np.float64(0)\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n\n # Frobenius norm\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Darkfield upper limit and offset\n B1_uplimit = np.min(imgflt_stack)\n B1_offset = np.float64(0)\n\n # Perform optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate the flatfield using existing weights, coefficients, and offsets\n W_idct_hat = _idct2(weight_upd)\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n temp_W = np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n\n # Update the weights\n temp_W = np.reshape(temp_W,(img_width, img_height, img_3d))\n temp_W = np.mean(temp_W,2)\n weight_upd = weight_upd + _dct2(temp_W)\n weight_upd = np.max(np.reshape(weight_upd - options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0) + np.min(np.reshape(weight_upd + options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0)\n W_idct_hat = _idct2(weight_upd)\n\n # Calculate the flatfield using updated weights\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n\n # Determine the error\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0) + np.min(np.reshape(E1 + options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0)\n\n # Calculate the flatfield coefficients by subtracting the errors from the original data\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0)/np.mean(R1),(1, img_3d))\n A_coeff[A_coeff<0] = 0 # pixel values should never be negative\n\n # Calculate the darkfield component if specified by the user\n if options['darkfield']:\n # Get images with predominantly background pixels\n validA1coeff_idx = np.argwhere(A_coeff<1)[:,1]\n R1_upper = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1)).astype(np.float64)>(np.float64(np.mean(W_idct_hat))-np.float64(10**-5)))[:,0],:]\n R1_upper = np.mean(R1_upper[:,validA1coeff_idx],0)\n R1_lower = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1))<np.mean(W_idct_hat)+np.float64(10**-5))[:,0],:]\n R1_lower = np.mean(R1_lower[:,validA1coeff_idx],0)\n B1_coeff = (R1_upper-R1_lower)/np.mean(R1)\n k = validA1coeff_idx.size\n\n # Calculate the darkfield offset\n temp1 = np.sum(np.square(A_coeff[0,validA1coeff_idx]))\n temp2 = np.sum(A_coeff[0,validA1coeff_idx])\n temp3 = np.sum(B1_coeff)\n temp4 = np.sum(A_coeff[0,validA1coeff_idx]*B1_coeff)\n temp5 = temp2 * temp3 - k*temp4\n if temp5 == 0:\n B1_offset = np.float64(0)\n else:\n B1_offset = (temp1*temp3-temp2*temp4)/temp5\n B1_offset = np.max(B1_offset,initial=0)\n B1_offset = np.min(B1_offset,initial=B1_uplimit/(np.mean(W_idct_hat)+10**-7))\n B_offset = B1_offset * np.mean(W_idct_hat) - B1_offset*np.reshape(W_idct_hat,(-1,1))\n\n # Calculate darkfield\n A1_offset = np.reshape(np.mean(R1[:,validA1coeff_idx],1),(-1,1)) - np.mean(A_coeff[0,validA1coeff_idx]) * np.reshape(W_idct_hat,(-1,1))\n A1_offset = A1_offset - np.mean(A1_offset)\n A_offset = A1_offset - np.mean(A1_offset) - B_offset\n\n # Update darkfield weights\n W_offset = _dct2(np.reshape(A_offset,(img_width, img_height)))\n W_offset = np.max(np.reshape(W_offset - options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0) \\\n + np.min(np.reshape(W_offset + options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0)\n\n # Calculate darkfield based on updated weights\n A_offset = _idct2(W_offset)\n A_offset = np.reshape(A_offset,(-1,1))\n A_offset = np.max(np.reshape(A_offset - options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0) \\\n + np.min(np.reshape(A_offset + options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0)\n A_offset = A_offset + B_offset\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Update weight regularization term\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if loss is below threshold\n stopCriterion = np.linalg.norm(Z1,ord='fro')/d_norm\n if stopCriterion < options['optimization_tol'] or iternum > options['max_iterations']:\n converged = True\n\n # Calculate final darkfield image\n A_offset = A_offset + B1_offset * np.reshape(W_idct_hat,(-1,1))\n\n return A,E1,A_offset", "def computeFundamentalMatrix(pts1, pts2):\n A = np.empty((8, 9))\n for i in range(len(pts1)-1):\n x1 = pts1[i][0]\n x2 = pts2[i][0]\n y1 = pts1[i][1]\n y2 = pts2[i][1]\n A[i] = np.array([x1 * x2, x2 * y1, x2,\n y2 * x1, y2 * y1, y2,\n x1, y1, 1])\n # Compute F matrix by evaluating SVD\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # Constrain the F matrix to rank 2\n U1, S1, V1 = np.linalg.svd(F)\n # print('Old S', S)\n # S[2] = 0\n S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]])\n # print('New S', S)\n F = np.dot(np.dot(U1, S2), V1)\n\n return F", "def ransac(matches, kp1, kp2, sample_points=4, iterations=5, inlier_tolerance=3, inlier_ratio=0.45, check=True, return_max_x=False):\n\n best_inlier_count = 0\n best_h = None\n best_inlier_indices = None\n\n # Get all the corresponing matching pairs for both the images\n pts1 = np.array([kp1[m.queryIdx].pt for m in matches])\n pts2 = np.array([kp2[m.trainIdx].pt for m in matches])\n\n # Re-usable variables for all iterations\n homogeneous_pts1 = np.hstack((pts1, np.ones((pts1.shape[0], 1)))).T\n indices = np.arange(len(pts1))\n num_pts = pts1.shape[0]\n required_inliers = inlier_ratio * num_pts\n\n # For number of iterations\n for _ in range(iterations):\n\n # Sample a small set of points from the point match pairs\n indices_to_sample = np.random.choice(indices, sample_points)\n pts1_sample = pts1[indices_to_sample]\n pts2_sample = pts2[indices_to_sample]\n\n # Get the homography matrix\n h = get_homography_matrix(pts1_sample, pts2_sample)\n\n # Find the new points using the homography matrix\n transformed_points = np.dot(h, homogeneous_pts1).T\n\n # Convert it to world coordinates\n last_col = np.copy(transformed_points[:, -1])\n last_col = last_col[:, np.newaxis]\n transformed_points /= last_col\n transformed_points = transformed_points[:, :-1]\n\n # Find the distance between the actual and the mapped points\n distance = np.linalg.norm(pts2 - transformed_points, axis=1)\n inlier_indices = distance < inlier_tolerance\n inlier_count = inlier_indices.sum()\n\n # Update the best_h if the current h has more inliers\n if inlier_count > best_inlier_count:\n best_h = h\n best_inlier_indices = inlier_indices\n best_inlier_count = inlier_count\n\n # If required inliers is reached break\n if inlier_count > required_inliers:\n break\n\n # Verbose mode - Print the number of inliers\n if check:\n transformed_points = np.dot(best_h, homogeneous_pts1).T\n # Convert it to world coordinates\n last_col = np.copy(transformed_points[:, -1])\n last_col = last_col[:, np.newaxis]\n transformed_points /= last_col\n transformed_points = transformed_points[:, :-1]\n distance = np.linalg.norm(pts2 - transformed_points, axis=1)\n inlier_count = len(distance[distance < inlier_tolerance])\n print('%2.2f of the points are inliers' %\n (inlier_count / num_pts * 100))\n\n # If x coordinates are needed\n if return_max_x:\n max_x_inlier_1 = ceil(pts1[best_inlier_indices].max(axis=0)[0])\n max_x_inlier_2 = ceil(pts2[best_inlier_indices].max(axis=0)[0])\n return best_h, max_x_inlier_1, max_x_inlier_2\n return best_h", "def ransac(data, model, n, k, t, d, debug=False, return_all=False):\n iterations = 0\n bestfit = None\n # besterr = np.inf\n best_inlier_idxs = None\n while iterations < k:\n maybe_idxs, test_idxs = random_partition(n, data.shape[0])\n maybeinliers = data[maybe_idxs, :]\n test_points = data[test_idxs, :]\n maybemodel = model.fit(maybeinliers)\n test_err = model.get_error(test_points, maybemodel)\n # select indices of rows with accepted points\n also_idxs = test_idxs[test_err < t]\n alsoinliers = data[also_idxs, :]\n if len(alsoinliers) > d:\n betterdata = np.concatenate((maybeinliers, alsoinliers))\n bestfit = model.fit(betterdata)\n # better_errs = model.get_error(betterdata, bettermodel)\n # thiserr = np.mean(better_errs)\n # if thiserr < besterr:\n # bestfit = bettermodel\n # besterr = thiserr\n best_inlier_idxs = np.concatenate((maybe_idxs, also_idxs))\n break\n iterations += 1\n if bestfit is None:\n raise ValueError(\"did not meet fit acceptance criteria\")\n if return_all:\n return bestfit, {'inliers': best_inlier_idxs}\n else:\n return bestfit", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def _cce(func, s, sf, bl, bu, mask, icall, maxn, alpha, beta, maxit, printit):\n\n \"\"\"\n List of local variables\n sb(.) = the best point of the simplex\n sw(.) = the worst point of the simplex\n w2(.) = the second worst point of the simplex\n fw = function value of the worst point\n ce(.) = the centroid of the simplex excluding wo\n snew(.) = new point generated from the simplex\n iviol = flag indicating if constraints are violated\n = 1 , yes\n = 0 , no\n \"\"\"\n # Assign the best and worst points:\n sb = s[0,:]\n fb = sf[0]\n sw = s[-1,:]\n fw = sf[-1]\n\n # Compute the centroid of the simplex excluding the worst point:\n ce = np.mean(s[:-1,:],axis=0)\n\n # Attempt a reflection point\n snew = ce + alpha*(ce-sw)\n snew = np.where(mask, snew, sb) # sb should have initial params at mask==False\n\n # Check if is outside the bounds:\n ibound = 0\n # s1 = snew-bl\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 1\n if np.ma.any(np.ma.array(snew-bl, mask=~mask) < 0.): ibound = 1\n\n # s1 = bu-snew\n # idx = (s1<0).nonzero()\n # if idx[0].size != 0: ibound = 2\n if np.ma.any(np.ma.array(bu-snew, mask=~mask) < 0.): ibound = 2\n\n if ibound >= 1:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Reflection failed; now attempt a contraction point:\n if fnew > fw:\n snew = sw + beta*(ce-sw)\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # Both reflection and contraction have failed, attempt a random point;\n if fnew > fw:\n snew = _SampleInputMatrix(1,bl,bu,distname='randomUniform')[0]\n snew = np.where(mask, snew, sb)\n fuc = func(snew)\n fnew = -fuc if maxit else fuc\n icall += 1\n if printit==1: print(' i, f, X: ', icall, fnew, snew)\n\n # end of _cce\n return snew, fnew, icall", "def scoreCirc_ActiveFilter_3(circuit, gen, indi, makeRedundancyInMatrix):\n\n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n score = 0\n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateActiveFilter_2(gen, indi)\n\n disfCount = 0\n \n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(20 - damping) if damping < 20 else 0\n \n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0)# if gain < 10 else 0.01\n \n THD_Lf = np.array(results['THD_Lf']['nominal'], dtype=float)\n if np.isnan(THD_Lf):\n disfCount = disfCount + 1\n thd_lf = 0\n else:\n thd_lf = THD_Lf-1 if THD_Lf > 1 else 0\n \n THD_Hf = np.array(results['THD_Hf']['nominal'], dtype=float)\n if np.isnan(THD_Hf):\n disfCount = disfCount + 1\n thd_hf = 0\n else:\n thd_hf = THD_Hf-1 if THD_Hf > 1 else 0\n \n #RIN = np.array(results['rin_meas']['nominal'], dtype=float) #--------not in use\n #if np.isnan(RIN):\n # disfCount = disfCount + 1\n # rin = 0\n #else:\n # rin = 1/RIN*1e6 if RIN < 1e7 else 0\n\n isLP = np.array(results['is_LP']['nominal'], dtype=float)\n if np.isnan(isLP):\n disfCount = disfCount + 1\n islp = 0\n else:\n islp = 0 if isLP>0 else 100# np.abs(isLP)\n \n #slope = np.array(results['maxDampingSlope']['nominal'], dtype=float)\n #print slope\n #if np.isnan(slope):\n # disfCount = disfCount + 1\n # slo = 0\n #else:\n # slo = 0 if slope>60 else 60-slope\n \n maxSlope = results['maxDampingSlope']['nominal']\n if type(np.nan) == type(maxSlope) or type(None) == type(maxSlope):\n disfCount = disfCount + 2\n slo = 0\n slof = 0 \n else:\n if len(maxSlope)==2:\n\tslo = 0 if maxSlope[0]>60 else 60-maxSlope[0]\n\tslof = np.log10(abs(maxSlope[1]-1000))\n else:\n\tslo = 0\n\tslof = 0\n\tdisfCount = disfCount + 1 \n \n \n bandwidth = np.array(results['bw']['nominal'], dtype=float)\n if np.isnan(bandwidth):\n #disfCount = disfCount + 1\n bandwidth = 0\n bw = abs(bandwidth-1000)\n \n StaticOut = not results['isOutVNonStationary']['nominal']\n score = 10*slo + 10*r + (100*StaticOut + 10*(thd_lf + thd_hf) + 1*islp + g)#rin!\n\n #print disfCount\n if disfCount > 0:\n score = 0 + np.exp(disfCount) * 1e3\n #print \"disfCount was there\"\n\n #score = score + (IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n \n print \"\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n #print \".\",\n #circuit.objectivesScore = copy(score)\t#id does not work with mpirun since mpirun works with copies\n #circuit.matrixDensity = matrixDensity\n return score, matrixDensity, matrixQuaziID, results", "def question26():\n n = 10\n n2 = n**2\n A = construct_matrix_A(n)\n x0 = np.random.randn(n2)\n b = np.random.randn(n2)\n\n # Compute optimal gamma:\n M, N = construct_M_N(n)\n\n # Eigenvalues of M and N are the same, so just use M for this now\n mu_max = scipy.sparse.linalg.eigsh(M, k=1, which='LM', return_eigenvectors=False)[0]\n mu_min = scipy.sparse.linalg.eigsh(M, k=1, which='SM', return_eigenvectors=False)[0]\n\n optimal_gamma_theoretical = np.sqrt(mu_min * mu_max)\n\n # We now verify this using our code:\n gamma_search = np.linspace(0.1, 4, 500)\n iters_array = np.zeros(500, dtype=int)\n\n for i, g in enumerate(gamma_search):\n iters_array[i] = alternative_iterative_method(x0, n, g, b)[1]\n\n min_graph = np.argmin(iters_array)\n min_iter = np.min(iters_array)\n min_gamma = gamma_search[min_graph]\n\n fig260 = plt.figure(figsize=(13, 8))\n plt.plot(gamma_search, iters_array)\n plt.plot(min_gamma, min_iter, 'ro',\n label=f\"Theoretical Gamma = {optimal_gamma_theoretical:.3f}\\n\" \\\n f\"Min Iterations at (Gamma={min_gamma:.3f}, Iters={min_iter})\")\n plt.axvline(x=optimal_gamma_theoretical)\n plt.legend()\n plt.grid()\n plt.xlabel(\"Gamma\")\n plt.ylabel(\"Iterations til Convergence\")\n plt.title(\"Figure 260 - Convergence Steps for Varying Gamma (N=10)\")\n plt.savefig(\"figures/figure260.png\")\n plt.show()\n return", "def scoreCirc_ActiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n #FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateActiveFilter_SUHAD(gen, indi)#TODO\n \n \n disfCount = 0\n \n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n \n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 10) if gain < 10 else 0\n \n THD = np.array(results['THD']['nominal'], dtype=float)\n if np.isnan(THD):\n disfCount = disfCount + 1\n thd = 0\n else:\n thd = THD-1 if THD > 1 else 0\n\t \n StaticOut = not results['isOutVNonStationary']['nominal']\n \n score = 5*r + 4*d + 2*g + (100*StaticOut + 10*thd)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n \n ##add a little salt!\n #score = score + random.uniform(0.0, 1)\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n \n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n #print \".\",\n return score, matrixDensity, matrixQuaziID, results", "def find_inlier(self):\n len_of_matches = len(self.match)\n # The last line of W stores the whole number of consistency of this match\n self.W = np.zeros((len_of_matches+1, len_of_matches))\n for i in np.arange(len_of_matches):\n for j in np.arange(len_of_matches):\n if i >= j:\n continue\n\n # ASSUMPTION : the index of descriptor is the same with the index of image\n wa = self.featureFrameA[self.match[i].queryIdx].pt[0]-self.featureFrameA[self.match[j].queryIdx].pt[0]\n wb = self.featureFrameA[self.match[i].queryIdx].pt[1]-self.featureFrameA[self.match[j].queryIdx].pt[1]\n wa_ = self.featureFrameB[self.match[i].trainIdx].pt[0]-self.featureFrameB[self.match[j].trainIdx].pt[0]\n wb_ = self.featureFrameB[self.match[i].trainIdx].pt[1]-self.featureFrameB[self.match[j].trainIdx].pt[1]\n\n # Compare and complete the matrix W\n if abs(wa-wa_) + abs(wb-wb_) <= INLIER_DIST_THRE:\n self.W[i, j] = 1\n self.W[j, i] = 1\n self.W[len_of_matches, j] += 1\n\n # Choose the best inlier features\n self.best_matches = []\n candidate = np.arange(len_of_matches)\n while True:\n best_matchIdx = self.find_most_compatible_match(candidate)\n if not best_matchIdx or best_matchIdx == -1: # in case no best match is found\n break\n else:\n self.best_matches.append(self.match[best_matchIdx])\n candidate = np.delete(candidate, np.where(candidate == best_matchIdx), axis=0)", "def getRowHeuristics(matrix):\n row, col = matrix.shape\n rHeuristic = np.zeros((row,2)) # Dos columnas. La primera para indicar la columna la segunda para la Heuristica\n for i in range(0,row):\n rHeuristic[i,0] = int(i)\n #print (i,sum(matrix[:,i]), pesos[i], float(pesos[i]/sum(matrix[:,i])))\n rHeuristic[i,1] = 1/sum(matrix[i,:])\n return rHeuristic[rHeuristic[:,1].argsort()]", "def __finalize(self,final_data):\n\t\tcopy_input_data = copy.deepcopy(self.matrix)\n\t\tbest_matrix = self.__set_format_info(copy_input_data,0)\n\t\tbest_matrix = self.__fill_data(best_matrix,final_data,0)\n\t\tmin_penalty = lost_point(best_matrix)\n\t\tbest_mask_pattern = 0\n\t\tfor i in range(1,8):\n\t\t\tcopy_input_data = copy.deepcopy(self.matrix)\n\t\t\ttemp_matrix = self.__set_format_info(copy_input_data,i)\n\t\t\ttemp_matrix = self.__fill_data(temp_matrix,final_data,i)\n\t\t\tpenalty = lost_point(temp_matrix)\n\n\t\t\tif penalty < min_penalty:\n\t\t\t\tbest_matrix = copy.deepcopy(temp_matrix)\n\t\t\t\tbest_mask_pattern = i\n\t\t\t\tmin_penalty = penalty\n\n\t\treturn best_matrix,best_mask_pattern", "def ransac(cloud, sacmodel):\n # Create the segmentation object\n seg = cloud.make_segmenter()\n\n # Set the model you wish to fit \n seg.set_model_type(sacmodel)\n seg.set_method_type(pcl.SAC_RANSAC)\n\n # Max distance for a point to be considered fitting the model\n # Experiment with different values for max_distance \n # for segmenting the table\n max_distance = 0.01\n seg.set_distance_threshold(max_distance)\n\n # Call the segment function to obtain set of inlier indices and model coefficients\n inliers, coefficients = seg.segment()\n return inliers, coefficients", "def scoreCirc_PassiveFilter_2(circuit, gen, indi, makeRedundancyInMatrix):\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluatePassiveFilter_2(gen, indi)\n \n disfCount = 0\n\n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 0) if gain < 0 else 0\n\n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n\n slope = np.array(results['dumpingSlope']['nominal'], dtype=float)\n if np.isnan(slope):\n disfCount = disfCount + 1\n slo = 0\n else:\n slo = 0 if slope>60 else 60-slope\n \n bandwidth = np.array(results['bw']['nominal'], dtype=float)\n if np.isnan(bandwidth):\n disfCount = disfCount + 1\n bw = 0\n else:\n bw = abs(bandwidth-1000)/100\n \n #THD = np.array(results['THD']['nominal'], dtype=float)\n #if np.isnan(THD):\n # disfCount = disfCount + 1\n # thd = 0\n #else:\n # thd = THD-1 if THD > 1 else 0\n #print 10*r, g, d, slo, bw\n score = 10*r + g + d + slo + bw\n\n if disfCount > 0:\n score += np.exp(disfCount) * 1e3\n\n #score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n return score, matrixDensity, matrixQuaziID, results" ]
[ "0.6203597", "0.5916464", "0.5894118", "0.5867515", "0.5715989", "0.56956524", "0.56905115", "0.5686345", "0.56403846", "0.55984086", "0.5590803", "0.5577823", "0.5559308", "0.5542196", "0.5525735", "0.55202436", "0.55189615", "0.55174667", "0.5481329", "0.5479639", "0.54680353", "0.5448406", "0.54449385", "0.5394492", "0.5389445", "0.53708994", "0.53643274", "0.5359837", "0.53582895", "0.53452563" ]
0.699575
0
This function computes the essential matrix from the fundamental matrix. The E matrix is defined in normalized image coordinates
def getEssentialMatrix(K, F): E = np.dot(K.T, np.dot(F, K)) u, s, v = np.linalg.svd(E) # We correct the singular values of the E matrix s_new = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]]).reshape(3, 3) final_E = np.dot(u, np.dot(s_new, v)) return final_E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def P(self):\n self.eigenmatrix()", "def Euler2Mat(e):\n x=e[0]\n y=e[1]\n z=e[2]\n s1=np.sin(x)\n s2=np.sin(y)\n s3=np.sin(z)\n c1=np.cos(x)\n c2=np.cos(y)\n c3=np.cos(z)\n m=np.array([[c1*c2*c3-s1*s3,-c3*s1-c1*c2*s3,c1*s2],\n [c1*s3+c2*c3*s1,c1*c3-c2*s1*s3,s1*s2],\n [-c3*s2,s2*s3,c2]])\n return m", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def mattock(e, E1, A, B, C):\r\n se = np.sign(e)\r\n ae = np.abs(e)\r\n s = se * E1 * ae * (A + (1 - A) / (1 + (B * ae)**C)**(1 / C))\r\n d = E1 * (A + (1 - A) / (1 + (B * ae)**C)**(1 / C)) - E1 * ae * (1 - A) * B * (B * ae)**(C - 1) / (1 + (B * ae)**C)**(1 / C + 1)\r\n return s, d", "def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0", "def _image_orthogonal_matrix22_eigvals(M00, M01, M11):\n tmp1 = M01 * M01\n tmp1 *= 4\n\n tmp2 = M00 - M11\n tmp2 *= tmp2\n tmp2 += tmp1\n cp.sqrt(tmp2, out=tmp2)\n tmp2 /= 2\n\n tmp1 = M00 + M11\n tmp1 /= 2\n l1 = tmp1 + tmp2\n l2 = tmp1 - tmp2\n return l1, l2", "def _compute_eigenmatrix(self, p, expand=False, factor=False,\n simplify=False):\n B = [Matrix(SR, [M[i] for M in p]) for i in range(self._.d + 1)]\n V = SR**(self._.d + 1)\n R = [[self._.d + 1, V, [Integer(1)]]]\n for i in range(1, self._.d + 1):\n S = sorted(([k, m, V.subspace_with_basis(b)]\n for k, b, m in B[i].eigenvectors_right()),\n key=lambda kvb: CoefficientList(kvb[0], self._.vars),\n reverse=True)\n j = 0\n while j < len(R):\n m, s, r = R[j]\n h = 0\n while h < len(S):\n k, v, b = S[h]\n sb = s.intersection(b)\n d = sb.dimension()\n if d == v:\n del S[h]\n else:\n S[h][1] -= d\n h += 1\n if d == m:\n R[j][1] = sb\n r.append(k)\n break\n elif d > 0:\n R.insert(j, [d, sb, r + [k]])\n j += 1\n m -= d\n R[j][0] = m\n j += 1\n assert len(R) == self._.d + 1 and all(len(r) == self._.d + 1\n for _, _, r in R), \\\n \"failed to compute the eigenmatrix\"\n return Matrix(SR, [r for _, _, r in R])", "def _init_eigenmatrix(self, P):\n self._.d = nrows(P) - 1\n assert all(len(r) == self._.d + 1 for r in P), \\\n \"parameter length mismatch\"\n P = Matrix(SR, P)\n for i, x in enumerate(P[0]):\n P[0, i] = integralize(x)\n self._.n = sum(P[0])\n return P", "def compute_e(f_mat, m_mat):\r\n return m_mat.T @ f_mat @ m_mat", "def calculate_eigenvalues(self):\n self.__eigenvalues = []\n dictionary = np.linalg.eig(np.array(self.__A))\n indicator = True\n sum1 = 0\n for i in range(self.__A.shape[0]):\n if all(self.__A[i, j] == 0 for j in range(self.__A.shape[1])):\n indicator = all(self.__B[i,j] for j in range(self.__B.shape[1]))\n if (indicator):\n sum1 += 1\n \n for val in dictionary[0]:\n if (val != 0):\n self.__eigenvalues.append(complex(val))\n elif (indicator) and (sum1 > 0):\n sum1 -= 1\n self.__eigenvalues.append(complex(val))", "def eigenCheat( Ja, Jf, truncNum = scipy.inf ):\n H = glueEmH( Ja, Jf, truncNum )\n \n return scipy.linalg.eigh( H )", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return w, v", "def get_E_matrix(dR, dt):\n\n E = np.matmul(\n np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),\n dR\n ).reshape(3, 3)\n return E", "def calculate_cep_matrix(molecule):\n num_atoms = molecule.GetNumAtoms()\n charges = [atom.GetAtomicNum() for atom in molecule.GetAtoms()]\n cep_matrix = np.zeros((num_atoms, num_atoms))\n for bond in molecule.GetBonds():\n bond_type = str(bond.GetBondType()).lower()\n bond_order = get_bond_order(bond_type)\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n degree_i = len(molecule.GetAtomWithIdx(i).GetNeighbors())\n degree_j = len(molecule.GetAtomWithIdx(j).GetNeighbors())\n z_i = degree_i * charges[i]\n z_j = degree_j * charges[j]\n weighted_electronic_distance = ((z_i + z_j) / (bond_order * degree_i * degree_j))\n cep_matrix[i, j] = weighted_electronic_distance\n cep_matrix[j, i] = weighted_electronic_distance\n return cep_matrix", "def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E", "def fit_evd(self):\n\n # EVD only work on square matrices as we need to compute the eigenvalues and eigenvectors\n # For this we compute the covariance matrix K\n # K should be n x n matrix (pixels x pixels)\n\n # The covariance matrix is nxn\n self.cov_matrix = np.zeros(shape=[self.n_features, self.n_features], dtype='uint8')\n\n self.cov_matrix = np.cov(self.norm_matrix, rowvar=False)\n # C is a symmetric matrix and so it can be diagonalized:\n eig_val, eig_vec = linalg.eig(self.cov_matrix)\n\n # Sorting the eigenvectors by decreasing eigenvalues\n # [Start : stop : stepcount] stepcount is reversed\n idx = eig_val.argsort()[::-1]\n eig_val, eig_vec = eig_val[idx], eig_vec[:, idx]\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = eig_vec[:n_components]\n print(\"The principal components have been calculated using eigendecomposition\", self.components.shape)\n\n return self.components", "def compute_normalized_image_to_image_matrix(p1, p2, compute_essential=False):\n n = p1.shape[1]\n if p2.shape[1] != n:\n raise ValueError('Number of points do not match.')\n\n # preprocess image coordinates\n p1n, T1 = scale_and_translate_points(p1)\n p2n, T2 = scale_and_translate_points(p2)\n\n # compute F or E with the coordinates\n F = compute_image_to_image_matrix(p1n, p2n, compute_essential)\n\n # reverse preprocessing of coordinates\n # We know that P1' E P2 = 0\n F = np.dot(T1.T, np.dot(F, T2))\n\n return F / F[2, 2]", "def _emiss_ee(self,Eph):\n if self.weight_ee == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_ee(gam,Eph),\n self._gam, axis=0)\n return emiss", "def get_su_eig(self, delcc):\n pc = SimpleNamespace()\n h = self.h\n if self.rbsize:\n self._inv_mrb()\n if h:\n pc.G = h\n pc.A = h * h / 3\n pc.Ap = h / 2\n if self.unc:\n pv = self._el\n else:\n pv = np.ix_(self._el, self._el)\n if self.m is not None:\n self.m = self.m[pv]\n self.k = self.k[pv]\n self.b = self.b[pv]\n self.kdof = self.nonrf[self._el]\n self.ksize = self.kdof.size\n\n self._el = np.arange(self.ksize) # testing ...\n self._rb = np.arange(0)\n\n if self.elsize:\n self._inv_m()\n A = self._build_A()\n eig_info = eigss(A, delcc)\n pc.wn = eig_info.wn\n pc.zeta = eig_info.zeta\n pc.eig_success = eig_info.eig_success\n if h:\n self._get_complex_su_coefs(pc, eig_info.lam, h)\n self._add_partition_copies(pc, eig_info.lam, eig_info.ur, eig_info.ur_inv)\n return pc", "def head2eeg(self): \n LOG.info(\"Computing Head2EEGMat...\")\n h2s_mat = om.Head2EEGMat(self.om_head, self.om_sensors)\n LOG.info(\"head2eeg: %d x %d\" % (h2s_mat.nlin(), h2s_mat.ncol()))\n return h2s_mat", "def E_to_M(E, ecc):\n with u.set_enabled_equivalencies(u.dimensionless_angles()):\n M = _kepler_equation(E, 0.0 * u.rad, ecc)\n return M", "def E(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating R\", file=self.logfile)\n\n\n TAE = toeplitz(self.A*self.e2[:self.P+1], np.zeros(self.P+1))\n TA = toeplitz(self.A, np.zeros(self.P+1))\n M = np.dot(TAE.transpose(), TA)\n res = toeplitz(np.concatenate([M[:,0], np.zeros((self.L_h-self.P-1))]),\n np.concatenate([M[0,:], np.zeros((self.L_h-self.P-1))]))\n res[-self.P:, -self.P:] = M[1:,1:]\n res = res*np.array([self.e2]).transpose()\n self.R = self.la*self.sigma2*np.linalg.inv(self.la*np.eye(self.L_h) + self.sigma2*res)\n\n\n\n print(\"\", file=self.logfile)\n print(\"Updating mu\", file=self.logfile)\n self.mu = np.dot(self.R, self.h)/self.sigma2\n\n\n # Propagate\n self._propagate_mu()\n self._propagate_R()", "def ensemble_determinant(self):\n return np.linalg.det(self.ensemble_transition_matrix)", "def get_F_matrix_from_E(E, K1, K2):\n F = np.matmul(np.linalg.inv(K2), np.matmul(E,np.linalg.inv(K1)))\n\n return F", "def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def compute_image_to_image_matrix(x1, x2, compute_essential=False):\n A = correspondence_matrix(x1, x2)\n # compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3, 3)\n\n # constrain F. Make rank 2 by zeroing out last singular value\n U, S, V = np.linalg.svd(F)\n S[-1] = 0\n if compute_essential:\n S = [1, 1, 0] # Force rank 2 and equal eigenvalues\n F = np.dot(U, np.dot(np.diag(S), V))\n\n return F", "def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e", "def ema(matrix, alpha):\n\n # declare empty EMA numpy array\n e = np.zeros(matrix.shape[0])\n\n # set the value of the first element in the EMA array\n e[0] = matrix[0]\n\n # use the EMA formula to calculate the value of each point in the EMA array\n for t in range(1, matrix.shape[0]):\n e[t] = alpha*matrix[t] + (1 - alpha)*e[t - 1]\n\n return e" ]
[ "0.60159427", "0.6003989", "0.5936057", "0.5899959", "0.58792543", "0.58560514", "0.5814833", "0.58042705", "0.57690716", "0.5768329", "0.57617337", "0.57117414", "0.5709993", "0.5708076", "0.5683127", "0.56726044", "0.56690747", "0.5665278", "0.5657497", "0.56303555", "0.56117654", "0.5608965", "0.558355", "0.5565742", "0.55606663", "0.5553097", "0.5550065", "0.5544799", "0.5516541", "0.5516541" ]
0.6334938
0
Given the essential matrix, we derive the camera position and orientation
def ExtractCameraPose(E): u, s, v = np.linalg.svd(E, full_matrices=True) w = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]).reshape(3, 3) c1 = u[:, 2].reshape(3, 1) r1 = np.dot(np.dot(u, w), v).reshape(3, 3) c2 = -u[:, 2].reshape(3, 1) r2 = np.dot(np.dot(u, w), v).reshape(3, 3) c3 = u[:, 2].reshape(3, 1) r3 = np.dot(np.dot(u, w.T), v).reshape(3, 3) c4 = -u[:, 2].reshape(3, 1) r4 = np.dot(np.dot(u, w.T), v).reshape(3, 3) if np.linalg.det(r1) < 0: c1 = -c1 r1 = -r1 if np.linalg.det(r2) < 0: c2 = -c2 r2 = -r2 if np.linalg.det(r3) < 0: c3 = -c3 r3 = -r3 if np.linalg.det(r4) < 0: c4 = -c4 r4 = -r4 cam_center = np.array([c1, c2, c3, c4]) cam_rotation = np.array([r1, r2, r3, r4]) return cam_center, cam_rotation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math.atan2(vector_y, vector_x)\n pitch = -math.asin(vector_z)\n\n # Create the quaternion from the euler angles\n self.quaternion = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0, pitch, yaw))", "def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\n rot = transforms.euler_angles_to_matrix(rot,convention=\"XYZ\")\n pose = torch.cat((rot,trans.view(batch_size, 3, 1)), -1)\n\n return pose", "def camera_matrix(e, p, t):\n # Translates all points such that the camera is centered at the origin.\n T = np.array([[1, 0, 0, -e[0]],\n [0, 1, 0, -e[1]],\n [0, 0, 1, -e[2]],\n [0, 0, 0, 1]])\n\n # Set up orthonormal basis.\n w = e - p\n w = w / np.linalg.norm(w)\n u = np.cross(t, w)\n u = u / np.linalg.norm(u)\n v = np.cross(w, u)\n\n # Rotate points such that camera is aligned with UVW-axes (g -> -z-axis).\n R = np.array([[u[0], u[1], u[2], 0],\n [v[0], v[1], v[2], 0],\n [w[0], w[1], w[2], 0],\n [ 0, 0, 0, 1]])\n return R.dot(T)", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def camera_transformation_from_pose(azimutal, elevation):\n azimutal, elevation = azimutal * 2. * np.pi / 360., elevation * 2. * np.pi / 360.\n azimutal *= -1.\n elevation *= -1.\n r_y = np.array([[np.cos(elevation), 0, np.sin(elevation)],\n [0, 1, 0],\n [-np.sin(elevation), 0, np.cos(elevation)]])\n r_z = np.array([[np.cos(azimutal), -np.sin(azimutal), 0],\n [np.sin(azimutal), np.cos(azimutal), 0],\n [0, 0, 1]])\n r = r_z.dot(r_y)\n # world_to_camera matrix, camera_to_world matrix\n return r, np.linalg.inv(r)", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def pose2mat(pose):\n extrinsic = torch.eye(4)\n extrinsic[:3, :] = pose[:, :4]\n inv_extrinsic = torch.inverse(extrinsic)\n extrinsic = torch.inverse(inv_extrinsic)\n h, w, focal_length = pose[:, 4]\n intrinsic = torch.Tensor([[focal_length, 0, w/2],\n [0, focal_length, h/2],\n [0, 0, 1]])\n\n return extrinsic, intrinsic", "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def camera_matrix(self) -> TransformationMatrixType:\n return numpy.matmul(\n self.rotation_matrix(*self.rotation),\n displacement_matrix(*-numpy.array(self.location)),\n )", "def determine_rotation_matrix(self, origin, angle, scale):\n # scaling will be ignored at this step\n rotation_matrix = cv2.getRotationMatrix2D(origin, angle * 180 / np.pi, scale)\n return rotation_matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n\n origin = np.array([location.x, location.y, location.z])\n return matrix, origin", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def get_camera_transform(self):\r\n if not self.pose:\r\n rospy.loginfo(\"no pose!\")\r\n return None\r\n if self.pose.header.frame_id != self.role_name:\r\n rospy.logwarn(\"Unsupported frame received. Supported {}, received {}\".format(\r\n self.role_name, self.pose.header.frame_id))\r\n return None\r\n sensor_location = carla.Location(x=self.pose.pose.position.x,\r\n y=-self.pose.pose.position.y,\r\n z=self.pose.pose.position.z)\r\n quaternion = (\r\n self.pose.pose.orientation.x,\r\n self.pose.pose.orientation.y,\r\n self.pose.pose.orientation.z,\r\n self.pose.pose.orientation.w\r\n )\r\n roll, pitch, yaw = euler_from_quaternion(quaternion)\r\n # rotate to CARLA\r\n sensor_rotation = carla.Rotation(pitch=math.degrees(roll)-90,\r\n roll=math.degrees(pitch),\r\n yaw=-math.degrees(yaw)-90)\r\n return carla.Transform(sensor_location, sensor_rotation)", "def computeMVP(self):\n projMat = self.converterYUR\n modelViewMat = self.transforMat.invertCompose(\n Globals.render.getTransform(self.cameraNode)).getMat()\n return UnalignedLMatrix4f(modelViewMat * projMat)", "def intrinsic_matrix_from_camera(w, h, fov):\n (cx, cy), f = calc_focal_values(w, h, fov)\n return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def computeOrientation3D(object, P):\n\n # compute rotational matrix around yaw axis\n R = [[np.cos(object.ry), 0, np.sin(object.ry)],\n [0, 1, 0],\n [-np.sin(object.ry), 0, np.cos(object.ry)]]\n\n # orientation in object coordinate system\n orientation_3D = [[0.0, object.l],\n [0.0, 0.0],\n [0.0, 0.0]]\n\n # rotate and translate in camera coordinate system, project in image\n orientation_3D = R * orientation_3D\n orientation_3D[0, :] += object.t[0]\n orientation_3D[1, :] += object.t[1]\n orientation_3D[2, :] += object.t[2]\n\n # vector behind image plane?\n if any(orientation_3D[2, :] < 0.1):\n orientation_2D = []\n else:\n # project orientation into the image plane\n orientation_2D = projectToImage(orientation_3D, P)\n return orientation_2D", "def matrix(self):\n return self._rotation", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def get_matrix(self, transform):\r\n\r\n rotation = transform.rotation\r\n location = transform.location\r\n c_y = np.cos(np.radians(rotation.yaw))\r\n s_y = np.sin(np.radians(rotation.yaw))\r\n c_r = np.cos(np.radians(rotation.roll))\r\n s_r = np.sin(np.radians(rotation.roll))\r\n c_p = np.cos(np.radians(rotation.pitch))\r\n s_p = np.sin(np.radians(rotation.pitch))\r\n matrix = np.matrix(np.identity(4))\r\n matrix[0, 3] = location.x\r\n matrix[1, 3] = location.y\r\n matrix[2, 3] = location.z\r\n matrix[0, 0] = c_p * c_y\r\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\r\n matrix[1, 0] = s_y * c_p\r\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\r\n matrix[2, 0] = s_p\r\n matrix[2, 1] = -c_p * s_r\r\n matrix[2, 2] = c_p * c_r\r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def head_pose_points(image, rotation_vector, translation_vector, camera_matrix):\n rear_size = 1\n rear_depth = 0\n front_size = image.shape[1]\n front_depth = front_size*2\n val = [rear_size, rear_depth, front_size, front_depth]\n point_2d = get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val)\n y = (point_2d[5] + point_2d[8])//2\n x = point_2d[2]\n \n return (x, y)", "def get_orientation_matrix(self, p):\n for i in range(3):\n for j in range(3):\n self.Umat[i, j] = p['U%d%d' % (i, j)].value", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def _calculate_camera_pose(frame, K, d, corners, pattern_shape=(6, 4), grid_size=30): # noqa: E501\n img = frame.copy()\n axis = np.float32([[grid_size, 0, 0], [0, grid_size, 0],\n [0, 0, -grid_size]]).reshape(-1, 3)*2\n\n objp = np.zeros((np.prod(pattern_shape), 3), np.float32)\n objp[:, :2] = np.mgrid[0:pattern_shape[0],\n 0:pattern_shape[1]].T.reshape(-1, 2) * grid_size\n\n _, rvecs, tvecs = cv2.solvePnP(objp, corners, K, d)\n R, _ = cv2.Rodrigues(rvecs)\n # project 3D points onto image plane\n imgpts, _ = cv2.projectPoints(axis,\n rvecs, tvecs,\n K, d)\n\n canvas = computer_vision.draw_axis(img, corners, imgpts)\n return R, tvecs, canvas" ]
[ "0.6524759", "0.64992476", "0.6400119", "0.62868214", "0.6233258", "0.62323457", "0.6178379", "0.6016908", "0.6012133", "0.60015947", "0.5997646", "0.59892434", "0.5961536", "0.5942139", "0.59395564", "0.5925271", "0.5909638", "0.58591086", "0.58126867", "0.581134", "0.5802404", "0.5793011", "0.5793011", "0.5782746", "0.5780559", "0.577595", "0.5741405", "0.57348615", "0.5727813", "0.5678761" ]
0.7063537
0
This function returns the extrinsic parameter matrix
def getExtrinsicParameter(K, R, C): t = np.dot(-R, C) homogeneous_matrix = np.hstack((R.reshape(3, 3), t)) extrinsic_parameter = np.dot(K, homogeneous_matrix) return extrinsic_parameter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_intrinsic_mat(params):\n return np.asarray(\n [\n [params[0], 0.0, params[1]],\n [0.0, params[2], params[3]],\n [0.0, 0.0, 1.0],\n ]\n )", "def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\n rot = transforms.euler_angles_to_matrix(rot,convention=\"XYZ\")\n pose = torch.cat((rot,trans.view(batch_size, 3, 1)), -1)\n\n return pose", "def extrinsic(self):\n return self._extrinsic", "def matrix_param(self):\n return self.__matrix_param", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def get_params(self) -> np.array:\n pass", "def getMatrix(self) -> CMatrix4:\n ...", "def _get_params(self):\r\n x = np.hstack((self.X.flatten(), self.X_variance.flatten(), SparseGP._get_params(self)))\r\n return x", "def get_parameters(self):\n return self.sess.run(self.A_symm)", "def params(self):\n return {'out_dim': self.out_dim,\n 'act_fn': self.act_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def get_params_array(self):\n return np.array(self.W), np.array(self.b)", "def get_model_params(self):\n\n results = self._model.fit()\n model_params = np.expand_dims(results.params.as_matrix(), 1)\n return model_params", "def matrix(self):\n return self._matrix(*self.parameters)", "def M(self):\n return _hypre.HypreParMatrix_M(self)", "def get_design_matrix(x):\n\tF = np.ones((10, 1))\n\tF = np.hstack((F, x))\n\n\treturn F", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def _get_params(self):\r\n return np.hstack((self.k1._get_params(), self.k2._get_params()))", "def pack_params(K, k, extrinsic_matrices):\n packed_params = []\n\n # Flatten intrinsics\n alpha, beta, gamma, u_c, v_c = K[0,0], K[1,1], K[0,1], K[0,2], K[1,2]\n k1, k2, k3 ,p1, p2 = k\n\n a = [alpha, beta, gamma, u_c, v_c,k1, k2, k3, p1, p2 ]\n\n packed_params.extend(a)\n\n # Flattened extrinsics\n for E in extrinsic_matrices:\n # Convert extrinsics to flattened Rodrigues representation\n R = E[:3, :3]\n t = E[:, 3]\n\n rodrigues = cv2.Rodrigues(R)[0]\n\n rho_x, rho_y, rho_z = rodrigues\n t_x, t_y, t_z = t\n\n e = [rho_x, rho_y, rho_z, t_x, t_y, t_z]\n\n packed_params.extend(e)\n\n packed_params = np.array(packed_params,dtype=object)\n return packed_params", "def get_params(self):\n return self.arr", "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n keys._learning_rate:self.learningRate,\n keys._momentum:self.momentum}\n\n return parameters", "def designMatrix(self,x,m):\n\n phi = []\n\n for i in x:\n matric = []\n for j in range(0, m + 1):\n matric.append(np.power(i,j))\n phi.append(matric)\n return np.asarray(phi)", "def _get_proj_mat(self):\n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vec_handles)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vec_handles, self.basis_vec_handles)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])", "def _xyz_matrix():\n fx = 583.0\n fy = 583.0\n cx = 321\n cy = 249\n a = -0.0028300396\n b = 3.1006268\n mat = np.array([[1/fx, 0, 0, -cx/fx],\n [0, -1/fy, 0, cy/fy],\n [0, 0, 0, -1],\n [0, 0, a, b]])\n return mat", "def current_parameters(self):\n current = []\n for core_param in range(len(self.q)):\n for approx_param in range(self.q[core_param].param_no):\n current.append(self.q[core_param].vi_return_param(approx_param))\n return np.array(current)", "def get_parameters(self):\n if self.add_bias:\n params = np.concatenate((self.bias, self.W), 0)\n else:\n params = self.W\n return params", "def create_design_matrix(self):\n self.design_matrix = np.zeros([self.n, self.p])\n self.design_matrix[:,0] = 1.0 #First comlum is 1 (bias term)\n\n for i in range(self.n):\n for j in range(1,self.p):\n self.design_matrix[i,j] = self.phi(self.x[i],j)\n\n self.design_eigvals = np.linalg.eigvals([email protected]_matrix)", "def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def get_params(self):\n return deepcopy(np.hstack([to_numpy(v).flatten() for v in\n self.parameters()]))" ]
[ "0.7331173", "0.63358027", "0.63124555", "0.6310796", "0.61443675", "0.61443675", "0.61398846", "0.6122645", "0.6090896", "0.59263265", "0.58786243", "0.5862724", "0.5847503", "0.58337194", "0.57778543", "0.5750955", "0.57484686", "0.57484686", "0.5668891", "0.5660093", "0.56556934", "0.5648544", "0.5635101", "0.5631414", "0.56252795", "0.56166553", "0.56088704", "0.5601754", "0.5601537", "0.5598446" ]
0.6725677
1
removes all Hydrogen atoms from instance
def remove_hydrogens(self) -> None: for cid, c in self: for rid, r in c: for aid, a in r: if a.element == 'H': print('removing H at %s' % aid) r.remove_atom(a)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n del self.shx.atoms[self.index]", "def strip(self):\n types = [type(self.strip),\n type(self.values),\n type(self.__ne__),\n type(self.__class__)]\n\n for attr in dir(self):\n if not type(getattr(self, attr)) in types:\n if any(i in attr for i in self.keep) or attr[0:2] == '__':\n continue\n else:\n x = getattr(self, attr)\n del x\n for molecule in self.values():\n molecule.strip_molecule(self.keep)\n exit()", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def _removeOcean(self):\r\n\t\tnodesToClean = [CONST.OCEANDISPSHADER, CONST.OCEANANIMSHADER, CONST.OCEAN_ANIM_PREVIEWPLANENAME]\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass", "def remove_dummy(self) -> None:\n\n for i, atom in enumerate(self):\n if isinstance(atom, DummyAtom):\n del self[i]\n return", "def remove(self, atom):\n try:\n self.hutch.remove_atom(atom)\n except:# AttributeError or ValueError:\n pass\n self.atoms.remove(atom)\n self.natoms -= 1\n self.atomtypes[atom.z] -= 1", "def destroy(self):\n self.remove()\n for inst in reversed(self.insts[:]):\n uses = inst.uses()\n for tmp_inst in uses:\n if tmp_inst.op_name == 'OpPhi':\n IRError('Not implemented: remove from phi node') # XXX\n inst.destroy()\n self.module = None", "def reset():\n for hist in (\"Epair_Etagm\", \"Etagm_Epair\", \"ttagm_pair\", \n \"Epair_Etagm_fit\", \"dEpair_Etagm_fit\"):\n h = ROOT.gROOT.FindObject(hist)\n if h:\n h.Delete()", "def delX(self):\n del self.components[0]", "def delX(self):\n del self.components[0]", "def remove():", "def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)", "def remove(self):", "def destroyGlobalNuclides():\n global instances\n global byName\n global byDBName\n global byLabel\n global byMcc2Id\n global byMcc3Id\n global byMcnpId\n global byAAAZZZSId\n\n instances = []\n byName.clear()\n byDBName.clear()\n byLabel.clear()\n byMcc2Id.clear()\n byMcc3Id.clear()\n byMcnpId.clear()\n byAAAZZZSId.clear()", "def __call__(self, mol):\n return self.remove(mol)", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def remove_atom(atom_list, atom):\n del atom_list[atom.atom_number - 1]\n del atom\n return atom_list", "def clear(self):\n\n\t\tself.atomid = []\n\t\tself.resi = []\n\t\tself.resn = []\n\t\tself.atom = []\n\t\tself.element = []\n\t\tself.chain = []\n\t\tself.type = []\n\t\tself.inverted = False\n\t\tself.atomlist = []\n\t\tself.keeplist = []\n\t\tself.macros = []\n\n\t\tself.invresi = False\n\t\tself.invresn = False\n\t\tself.invatom = False\n\t\tself.invelement = False\n\t\tself.invchain = False\n\t\tself.invtype = False\n\t\tself.invatomid = False", "def cleanup(self):\n for key in list(self.__dict__.keys()):\n delattr(self, key)", "def purgeHis(atoms):\n for a in atoms:\n if getAtype(a) == \"N\" or getAtype(a) == \"NA\":\n found = 0\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = 1\n break\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False", "def removeMySims(self):\n for sim in self.sims:\n try:\n sim.destroy()\n except:\n sim.removeNode()", "def clean_copy(self):\n # this is a stub implementation\n #return Molecule(\"H2O\")\n m = self._gettokens()\n for t in self._gettokens():\n #if there is value errors or key errors, remove the invalid tokens\n if (t.isalpha() and t not in _atomic_mass) or (t not in \"()\" and not t.isalnum()):\n m.remove(t)\n str2 = \"\".join(m) \n return Molecule(str2)", "def clean(self):\n for i in self.winfo_children():\n i.destroy()", "def clear_quantities(shared):\n\n del_list = [i for i, fm in enumerate(shared.field_mappings)\n if fm.extra is not None]\n for index in reversed(del_list):\n del shared.field_mappings[index]\n\n shared.config.remove_section('extra')\n shared.config.add_section('extra')", "def __del__(self):\n\n # Base class destructor is called ?? needed\n sim.Simulation.__del__(self)\n\n if self.verbose:\n print \"Cleaning derived simulation object LAMMPS1\"\n\n del self.pairCoeffDct\n del self.bondCoeffDct", "def remove_atom(self, atom):\n assert atom.altloc == self\n del self[atom.alt_loc]\n atom.altloc = None", "def delete():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # The interatomic data.\n for interatom in interatomic_loop():\n # The data.\n if hasattr(interatom, 'j_coupling'):\n del interatom.j_coupling\n\n # The error.\n if hasattr(interatom, 'j_coupling_err'):\n del interatom.j_coupling_err", "def remove_atom(self, atom):\n assert isinstance(atom, Atom)\n assert atom.model_id == self.model_id \n self.chain_dict[atom.chain_id].remove_atom(atom)", "def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)" ]
[ "0.6937832", "0.6847128", "0.6714238", "0.6525774", "0.64063466", "0.63946915", "0.62644595", "0.6262052", "0.6121503", "0.60625106", "0.60625106", "0.6042291", "0.6015206", "0.5961813", "0.5960132", "0.592724", "0.5911329", "0.58740425", "0.5869668", "0.5853029", "0.5847566", "0.5840705", "0.5833174", "0.5787099", "0.5776653", "0.57677335", "0.57639223", "0.57606786", "0.5751832", "0.57430935" ]
0.7476278
0
collect a set of residues with memb_z within [15, 15]
def memb_residues(pdb: MyPDB) -> list(): result = [] for ch in pdb.chains.values(): for res in ch.values(): if res.memb_z is not None: result.append(res) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subset_mz(self, mz):\n regions = self.boxes_mz.at(mz)\n it = BoxHolder()\n for r in regions:\n box = r.data\n it.add_box(box)\n return it", "def get_subset(mlist,year):\n newlist = []\n for entry in mlist:\n if int(entry[0][:4]) > int(year):\n continue\n newvec = entry[:8]\n citations = entry[8]['citations']\n citations = filter(lambda a: int(a[:4]) <= int(year), citations)\n newvec[2] = len(citations)\n newlist.append(newvec)\n return newlist", "def filter_pores_by_z(network, pores, z=1):\n pores = network._parse_indices(pores)\n Nz = network.num_neighbors(pores=pores)\n orphans = np.where(Nz == z)[0]\n hits = pores[orphans]\n return hits", "def getLigandNbrs(resids: List[Residue], struct:Structure)->List[ResidueDict]:\n\n ns = NeighborSearch(list( struct.get_atoms() ))\n nbrs = []\n\n for r in resids:\n # a ligand consists of residues\n resatoms = r.child_list[0]\n # each residue has an atom plucked at random\n for nbrresidues in ns.search(resatoms.get_coord(), 5,level='R'):\n # we grab all residues in radius around that atom and extend the list of neighbors with those\n nbrs.extend([nbrresidues])\n\n # Filter out the residues that constitute the ligand itself\n filtered = [] \n for neighbor in nbrs:\n present = 0\n for constit in resids:\n if ResidueDict(constit)==ResidueDict( neighbor ):\n present = 1\n if present == 0:\n filtered.append(ResidueDict(neighbor))\n\n return [ * map(lambda x: addBanClass(x) , set(filtered) ) ]", "def create_subsets(x, y):\n # initiate empty list for return variables.\n sets_x = []\n sets_y = []\n indices = []\n\n # iterate through value of PRI_JET_NUM (ranged inclusively from 0 until 3)\n for pri_jet_num_val in np.unique(x[:,22]):\n \n # Find subset which DER_MASS_MMC is not equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] != -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask)\n\n # Find subset which DER_MASS_MMC is equal to -999\n mask = (x[:,22] == pri_jet_num_val) & (x[:,0] == -999)\n x_tmp = x[mask,:]\n y_tmp = y[mask]\n\n # store the subset into list\n sets_x.append(x_tmp)\n sets_y.append(y_tmp)\n indices.append(mask) \n \n # return subsets of x, y, and corresponding indices\n return sets_x, sets_y, indices", "def get_regions_mask(self, input):", "def _find_members(self, given_members):\n if len(list(self.points)) > 3:\n out_mem = [m for m in given_members if\n self.intersects_poly(m.polygon)]\n else:\n out_mem = []\n return out_mem", "def fetchCooler(c, regions, coolerFetch = lambda coo, ext:coo.matrix(balance=True, sparse=True).fetch(ext),\n mask=True, force=False, ):\n regions = [list(i) for i in regions]\n resolution = c.binsize\n\n for i in regions:\n if i[1] == None:\n i[1] = 0 \n if i[2] == None:\n i[2] = c.chromsizes[i[0]]\n\n \n for a in regions: \n if str(a[0]) not in c.chromnames:\n raise ValueError(\"Chromosome {0} from regions not found in cooler\".format(a))\n if (a[1] % resolution) != 0:\n raise ValueError(\"Start of an region should be a multiple fo resolution\")\n \n# bins = c.bins()[:]\n \n# # managing masks \n# if mask is False: \n# bins[\"mask\"] = 1 \n# elif mask is None:\n# assert \"mask\" in bins.columns\n# elif mask is True: \n# pass \n# elif callable(mask):\n# pass \n# else:\n# bins[\"mask\"] = mask \n \n \n for region in regions:\n matrix = coolerFetch(c, region)\n try: # setting matrix nans to zeros.\n matrix.data = np.nan_to_num(matrix.data, copy=False)\n except TypeError: #workaround for old numpy versions\n matrix.data = np.nan_to_num(matrix.data)\n# st,end = c.extent(region)\n# subbins = bins[st:end].copy()\n if mask is True: \n newmask = np.array((matrix.sum(axis=0) > 0 ))[0]\n# if callable(mask):\n# new_mask = mask(matrix)\n# subbins[\"mask\"] = newmask \n\n assert len(newmask) == matrix.shape[0]\n\n yield matrix, newmask", "def get_relevant_zones(array,threshold=3):\n\n\treturn [item for item in array if len(item)>3]", "def roi_vecs(layer_coords, vec_coords, region):\n \n if region == 'crown':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n \n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(start_x, stop_x+5))]\n \n return roi_vec_coords\n \n elif region == 'fundus':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n\n # roi_vec_coords = [i for i in vec_coords if i[1][0] in list(range(start_x-10, stop_x+3))]\n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(stop_x, start_x))]\n \n # print(roi_vec_coords)\n return roi_vec_coords", "def bounds(self, resids: NDArray) -> List[Tuple[float, float]]:", "def get_zones(array,kind,relevant=False,threshold=3):\n\n\tresulting_set=[]\n\n\ti=0\n\tif array[i]==kind:\n\t\tcount=1\n\telse:\n\t\tcount=0\n\n\twhile i<len(array):\n\t\t\n\t\tif array[i]==kind:\n\t\t\tcount+=1\n\t\telif array[i]!=kind and array[i-1]==kind:\n\t\t\tresulting_set.append(([kind]*count,i-count))\n\t\t\tcount=0\n\t\telse:\n\t\t\tpass\n\n\t\ti+=1\n\n\tif count>0:\n\t\tresulting_set.append(([kind]*count, i-count))\n\n\tif relevant == False:\n\t\treturn resulting_set\n\telse:\n\t\treturn [item for item in resulting_set if len(item[0])>threshold]", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def get_feats(mz_list,intensity_list,feat_matrix,instance_index,feats,max_dist=275,allowed_c=[]):\n\t# UNCOMMENT var below if standard library combinations is used\n\t#allowed_c = set(allowed_c)\n\t\n\tspectrum = zip(mz_list,intensity_list)\n\tdists_mz = []\n\tdists_mz_intens = []\n\tprev_analyzed = set()\n\t\n\t#Make deepcopy since we are going to change the spectra!\n\tspec_one = copy.deepcopy(spectrum)\n\tspec_two = copy.deepcopy(spectrum)\n\t\n\t#Iterate over the peaks and measure the distance in m/z between all combinations\n\tfor peak_one in spec_one:\n\t\tif len(spec_two) == 1: continue\n\t\tspec_two = spec_two[1:]\n\t\tfor peak_two in spec_two:\n\t\t\tdist_mz = abs(peak_one[0]-peak_two[0])\n\t\t\tif dist_mz > max_dist: break\n\t\t\tdists_mz.append(dist_mz)\n\t\t\tdists_mz_intens.append(peak_one[1]+peak_two[1])\n\t\n\t# UNCOMMENT code below if standard library combinations is used\n\t#for c in combinations(spectrum,2):\n\t#\tdist_mz = abs(c[0][0]-c[1][0])\n\t#\tif c[0][0] in prev_analyzed: continue\n\t#\tif dist_mz > max_dist: \n\t#\t\tprev_analyzed.add(c[0][0])\n\t#\t\tcontinue\n\t#\tif len(allowed_c) != 0:\n\t#\t\tif dist_mz not in allowed_c: continue\n\t#\tdists_mz.append(dist_mz)\n\t#\tdists_mz_intens.append(c[0][1]+c[1][1])\n\t\n\t#Digitize the delta m/z; assign bins for all delta m/z s\n\tindex_bins = np.digitize(dists_mz,feats)\n\t\n\t#Iterate over assigned bins and sum the intensity for possible existing values\n\tfor index,intens in zip(index_bins,dists_mz_intens):\n\t\tfeat_matrix[instance_index,index-1] += intens\n\n\treturn(feat_matrix)", "def get_data(n):\n data = pd.read_csv('map_data/lior_results_2.csv')\n data = data.drop(['estimated_mass', 'estimated_pop'], axis=1)\n data = data[data.binomial != 'Sus scrofa'] # Wild Boar\n data = data[data.binomial != 'Ursus maritimus'] # Polar bear\n data = data[data.binomial != 'Sus bucculentus'] # EX\n data = data[data.binomial != 'Melomys rubicola'] # EX\n data = data.assign(total_mass=data.AdultBodyMassG * data.pop_density * data.Range,\n total_mass_density=data.AdultBodyMassG * data.pop_density)\n data = data.sort_values(by='total_mass_density', ascending=False)\n data = data.iloc[0:n - 1]\n geo_data = gpd.read_file('TERRESTRIAL_MAMMALS/TERRESTRIAL_MAMMALS.shp').to_crs(\"EPSG:6933\")\n geo_data = geo_data[geo_data.category != 'EX']\n range_polygons = geo_data.loc[(geo_data['legend'] == 'Extant & Introduced (resident)') |\n (geo_data['legend'] == 'Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Extant & Reintroduced (resident)') |\n (geo_data['legend'] == 'Extant & Vagrant (seasonality uncertain)') |\n (geo_data['legend'] == 'Extant (non breeding)') |\n (geo_data['legend'] == 'Extant (resident)') |\n (geo_data['legend'] == 'Probably Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Probably Extant (resident)') |\n (geo_data['legend'] == 'Reintroduced')]\n range_polygons = range_polygons.merge(data, on='binomial')\n range_polygons = range_polygons.to_crs(\"EPSG:6933\")\n return range_polygons", "def items():\n for i in self._iter_restrict(zeros, ones):\n yield self.pcdata[i]", "def check_point(self, mz, rt):\n regions = self.boxes_mz.at(mz)\n hits = set()\n for r in regions:\n if r.data.rt_match(rt):\n hits.add(r.data)\n return hits", "def get_carboxyl_map(atom_list):\n carboxyl_map = [[atom_list[x], atom_list[x+1], atom_list[x+2], atom_list[x+3]] for x in range(len(atom_list)-3) if ((atom_list[x].residue_name == atom_list[x+1].residue_name == atom_list[x+2].residue_name == atom_list[x+3].residue_name == \"C1A\") and (atom_list[x].residue_number == atom_list[x+1].residue_number == atom_list[x+2].residue_number == atom_list[x+3].residue_number) and (atom_list[x].atom_name != \"CY\" != atom_list[x+1].atom_name != atom_list[x+2].atom_name != \"CY\" != atom_list[x+3].atom_name ))]\n return carboxyl_map", "def filter_residues(residues, biomolecule='PROTEIN'):\n biomolecule = biomolecule.strip().upper()\n standard_residues = []\n for res in residues:\n if res.get_resname().strip() in STANDARD_RESIDUES[biomolecule]:\n if not res.id[0].strip(): standard_residues.append(res) # filter out hetro residues\n return standard_residues", "def get_obstList(self,X,Y,Z):\n\n x = np.array(X); y = np.array(Y); z = np.array(Z);\n dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2\n \n return list(np.where(dist < self.r**2))", "def get_obstList(self,X,Y,Z):\n\n x = np.array(X); y = np.array(Y); z = np.array(Z);\n dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2\n \n return list(np.where(dist < self.r**2))", "def select_regions(binary,f,min=0,nbest=100000):\n if binary.max() == 1:\n labels,_ = label(binary)\n else:\n labels = binary.astype(uint8)\n objects = find_objects(labels)\n scores = [f(o) for o in objects]\n best = argsort(scores)\n keep = zeros(len(objects)+1,'i')\n if nbest > 0:\n for i in best[-nbest:]:\n if scores[i]<=min: continue\n keep[i+1] = 1\n # print scores,best[-nbest:],keep\n # print sorted(list(set(labels.ravel())))\n # print sorted(list(set(keep[labels].ravel())))\n return keep[labels]", "def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res", "def extract_upstream_for_meme(genomes, locuses, upstream, radius, overlap):\n\n records = []\n for genome in genomes:\n feature_len = len(genome.features)\n\n index = 0\n locations = set()\n for feature in filter(lambda f: f.type == \"CDS\", genome.features):\n locus = feature.qualifiers[\"locus_tag\"][0] \n if locus in locuses:\n locations.add(index)\n for i in range(index - radius, index + radius):\n locations.add(i)\n \n index += 1\n\n print(locations)\n records += extract_upstream(locations, genome, upstream, overlap)\n\n return records", "def getratios(results_condition,conditions):\n setlist = {}\n for r in range(conditions):\n setlist[r] = []\n \n for gene in results_condition.genelist:\n conditions = len(gene.logfold)\n\n count = 0\n for set_ in setlist:\n if gene.logfold[count] > 1.5 or gene.logfold[count] < -1.5 :\n setlist[count].append(gene.name)\n count +=1\n return setlist", "def extract_pi_region(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,min_nsites=0,min_variants=0,verbose=\"min\",called=True,output=\"pi\"):\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\t#Function\n\tpi_values=[]#list \n\tnsites_considered=0#iterator for sampling frequency\n\ttotal_nsites=0\n\tnvariants=0# iterator for sites that are varying\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is ion sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tif chrom!=\"all\":\n\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\tcond=checkRecord_Cov(input_vcf,record,mincov,maxcov,inds=inds,called=True,nalleles=[1,2])# check if the site respect our condition\n\t\t\ttotal_nsites+=1\n\t\t\tif cond:# if it does\n\t\t\t\tnsites_considered+=1 \n\t\t\t \tif total_nsites%100000==0: print total_nsites,\"sites\",nsites_considered,\"sites passed filter\"\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \tif verbose==True:print record.POS\n\t\t\t \tif verbose==True:print \"inds\",inds\t\t \t\n\t\t\t \tif verbose==True:print \"GT\",[sample[\"GT\"] for sample in record.samples] \n\t\t\t \tif verbose==True:print \"DP\",[sample[\"DP\"] for sample in record.samples]\n\t\t\t\tpi_values.append(record.nucl_diversity)#calculate pi\n\t\t\t\tif record.nucl_diversity>0.0:nvariants+=1\n\t\t\t#compute total information for the window\n\telif chrom==\"all\":\n\t\tfor record in input_vcf:# for every site\n\t\t\tcond=checkRecord_Cov(input_vcf,record,mincov,maxcov,inds=inds,called=True,nalleles=[1,2])# check if the site respect our condition\n\t\t\ttotal_nsites+=1\n\t\t\tif cond:# if it does\n\t\t\t\tnsites_considered+=1\n\t\t\t \tif total_nsites%100000==0: print total_nsites,\"sites\",nsites_considered,\"sites passed filter\"\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \tif verbose==True:print record.POS\n\t\t\t \tif verbose==True:print \"inds\",inds\t\t \t\n\t\t\t \tif verbose==True:print \"GT\",[sample[\"GT\"] for sample in record.samples] \n\t\t\t \tif verbose==True:print \"DP\",[sample[\"DP\"] for sample in record.samples]\n\t\t\t\tpi_values.append(record.nucl_diversity)#calculate pi\n\t\t\t\tif record.nucl_diversity>0.0:nvariants+=1\n\tif verbose==True or verbose==\"min\":print \"nvariants:\",nvariants,\"nsites_considered:\",nsites_considered\n\tif output==\"pi\":\n\t\tif nsites_considered>=min_nsites and nvariants>=min_variants and len(pi_values):\n\t\t\tpi_value=sum(pi_values)/nsites_considered\t\t\n\t\t\treturn pi_value\n\t\telse:\n\t\t\treturn \"NA\"\n\telif output==\"extended\":\n\t\tif nsites_considered>=min_nsites and nvariants>=min_variants and len(pi_values):\n\t\t\tpi_value=sum(pi_values)/nsites_considered\t\t\n\t\t\treturn [nvariants,nsites_considered,pi_value]\n\t\telse:\n\t\t\treturn [nvariants,nsites_considered,\"NA\"]\n\telse:\n\t\traise Exception(\"incorrect output argumnent, should be pi or extended\")", "def getIndexes(z_nummers):\n sortCriteria = util.get_prop(\"ixsm\")\n if not sortCriteria:\n sortCriteria = \"z_index\"\n sortCriteria = \"ORDER BY z_nummer, %s\" % sortCriteria\n return getRecordsByAttributeIn(\"zeichnung\", \"z_nummer\", z_nummers, addtl=sortCriteria)", "def find_own_objects(cs):\n own_objects = {}\n for con in cs:\n own_objects[con] = []\n for obj in con.extent:\n own_objects[con].append(obj)\n for sub_con in cs:\n if sub_con.extent < con.extent and\\\n obj in sub_con.extent:\n own_objects[con].pop()\n break\n return own_objects", "def find_progenitors_at_z(self, SH, mtree, z1, z2):\n \n for ss in range(z1, z2):\n # nodes at redshift ss\n ss_indx = np.where(mtree.data.snapshotNumber.values == ss)\n nodeID = mtree.data.index.values[ss_indx]\n nodeID_desc = mtree.data.descendantIndex.values[ss_indx]\n \n # find number of progenitors for nodes at redshift ss\n if ss != z1:\n _progcounts = np.zeros(len(nodeID))\n for ii in range(len(nodeID_past_desc)):\n if nodeID_past_desc[ii] in nodeID:\n indx = np.where(nodeID == nodeID_past_desc[ii])\n _progcounts[indx] = count[ii]\n\n nodeID_desc_unique, count = np.unique(nodeID_desc, return_counts=True)\n nodeID_desc_unique=nodeID_desc_unique[1:]; count=count[1:]\n \n nodeID_past = nodeID\n nodeID_past_desc = nodeID_desc_unique\n if ss != z1:\n _progcounts_past = _progcounts\n print('_progcounts', _progcounts)", "def find_nearby_membranes(all_membranes, all_membrane_map, vert_normals):\r\n membrane_tree = scipy.spatial.cKDTree(all_membranes)\r\n nearby_membranes = np.array(list(membrane_tree.query_pairs(adhesion_max_dist, p=2)))\r\n nearby_membrane_map = defaultdict(list)\r\n if nearby_membranes.shape[0] > 0:\r\n # Exclude same-cell membrane interactions and same-direction-facing segments\r\n all_vert_normals = np.concatenate(vert_normals, axis=0)\r\n subset = np.where(\r\n (all_membrane_map[nearby_membranes[:, 0], 0] !=\r\n all_membrane_map[nearby_membranes[:, 1], 0])\r\n & (np.einsum('ij,ik->i', all_vert_normals[nearby_membranes[:, 0]], all_vert_normals[nearby_membranes[:, 1]]) < 0.0)\r\n )\r\n nearby_membranes = nearby_membranes[subset]\r\n # {cell idx: (vert idx, other cell idx, other vert idx, 'all_membranes' vert idx)}\r\n for nm in nearby_membranes:\r\n m0 = all_membrane_map[nm[0]]\r\n m1 = all_membrane_map[nm[1]]\r\n nearby_membrane_map[m0[0]].append((m0[1], m1[0], m1[1], nm[1]))\r\n nearby_membrane_map[m1[0]].append((m1[1], m0[0], m0[1], nm[0]))\r\n nearby_membrane_map = {k: np.array(v)\r\n for k, v in nearby_membrane_map.items()}\r\n# print(nearby_membrane_map)\r\n return nearby_membranes, nearby_membrane_map" ]
[ "0.5860528", "0.5294917", "0.5274569", "0.5238881", "0.51128125", "0.50799537", "0.50743914", "0.4973342", "0.49630117", "0.49471557", "0.49072868", "0.48891437", "0.48831913", "0.48573068", "0.48536083", "0.4842539", "0.4838259", "0.48302126", "0.48269477", "0.48252738", "0.48252738", "0.47874793", "0.47615215", "0.47506213", "0.4731234", "0.4727928", "0.47208488", "0.47161487", "0.47065267", "0.47056028" ]
0.59874827
0
Initializes indicating root Python module. The application will look for all `Resource` classes defined in the given root module.
def __init__(self, root): self._root = root if not self.get_resources(): raise Exception('Your application has no Resource.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def __init__(self):\n self.modules = {}", "def init(self):\n\n self.loaded = False\n self.exports = NotImplemented\n self.exception = None\n self.namespace = self.create_namespace()\n self.namespace.__file__ = str(self.filename)\n self.namespace.module = self\n self.namespace.require = self.require", "def __init__(self, rootPath=None):\n self.rootPath = rootPath or '.'", "def init_root(path_to_ecore: str) -> None:\n logger.debug(\"Initializing mm_root from %s\", path_to_ecore)\n\n global mm_root\n mm_root = ResourceSet().get_resource(URI(path_to_ecore)).contents[0]", "def initialize_api(app, api):\n api.init_app(app=app) # Initialize api first\n _resources = getattr(app, \"api_registry\", None)\n if _resources and isinstance(_resources, (list, tuple,)):\n for cls, args, kwargs in _resources:\n api.add_resource(cls, *args, **kwargs)", "def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)", "def init_rest(app_):\n\n rest_api = Api(app_)\n rest_api.add_resource(views.rest_resources.AppListResource,\n ActiveConfig.REST_URL_APPS_LIST,\n ActiveConfig.REST_URL_APPS_LIST + '/')\n rest_api.add_resource(views.rest_resources.AppResource,\n ActiveConfig.REST_URL_APPS_ITEM,\n ActiveConfig.REST_URL_APPS,\n ActiveConfig.REST_URL_APPS + '/')", "def register_root(cls):\n if RegisteredType._reg['root_class'] is None:\n\n del RegisteredType._reg\n RegisteredType._reg = {\n 'classes' : { 'classid_key' : 'type'},\n 'autoid' : 0,\n 'classids' : { 'type' : 'classid_key' },\n }\n RegisteredType._reg['root_class'] = cls \n cls.register_class()", "def __init__(self, root):\n FileHelper.ALL_PATHS = [os.path.join(dp, f) for dp, dn, filenames in os.walk(root) for f in filenames if os.path.splitext(f)[1] in Enums.App.VALID_FILE_TYPES]", "def static_init(cls):\n for path in sys.path:\n if os.path.isdir(path + \"/support_diagnostics\"):\n ImportModules.base_directory = path + \"/support_diagnostics\"", "def __init__(self, root, api, symlink_resource):\n assert root and isinstance(root, config_types.Path)\n self._root = root\n self._api = api\n self._resource = symlink_resource\n # dict[Path]list(Path): Maps target to a list of linknames.\n self._link_map = {}", "def root(self):\n return Resource()", "def initialize(self) -> typing.NoReturn:\n\t\tfor root, dirs, files in os.walk(INPUT_DIRECTORY, topdown=False):\n\t\t\tfor fileName in files:\n\t\t\t\tif fileName.endswith('.py'):\n\t\t\t\t\tself.moduleNameSet.add(os.path.join(root, fileName))", "def __setup_modules(self, config, db, rcontext):\n DEPTH_ROOT = 0\n DEPTH_TYPE = 1\n DEPTH_SUBTYPE = 2\n\n for root, sub_folders, files in os.walk(\"modules\"):\n nicepath = os.path.relpath(root, \"modules\")\n fullpath = root\n\n if nicepath == '.':\n depth = DEPTH_ROOT\n else:\n depth = nicepath.count(os.path.sep) + 1\n\n if depth > DEPTH_SUBTYPE:\n warnings.warn(\"sub-subdirectory in module (%s) \\\n ignored.\" % nicepath)\n\n modulenamebase = nicepath.replace(os.path.sep, '.')\n mimetype = nicepath.replace(os.path.sep, '/')\n\n if depth != DEPTH_ROOT:\n # Each folder should except root have an __init__.py,\n # otherwise the directory name be assigned as a module.\n if not \"__init__.py\" in files:\n warnings.warn(\"__init__.py not found in \\\n module folder '%s'.\" % nicepath)\n continue\n\n modulepath = fullpath + os.path.sep + \"__init__.py\"\n module = Module(modulepath, modulenamebase, mimetype)\n self.modules.append(module)\n\n # Now load each handler .py file\n for file in files:\n modulenameend, extension = os.path.splitext(file)\n if extension.lower() == \".py\":\n is_init = file == \"__init__.py\"\n modulepath = fullpath + os.path.sep + file\n modulename = None\n if is_init:\n modulename = modulenamebase\n elif depth == DEPTH_ROOT:\n modulename = modulenameend\n else:\n modulename = modulenamebase + '.' + modulenameend\n\n module = Module(modulepath, modulename, mimetype,\n is_global=(depth == DEPTH_ROOT),\n as_mime_handler=not is_init)\n if module.is_mime_handler and not rcontext.is_recursive:\n db.setup_module_table(module.md5_tablename,\n module.columndefinition)\n\n self.modules.append(module)", "def __init__(self,\n root: Path = None,\n resources_dir: Path = None,\n slave_configuration_path : Path = None,\n binaries_dir : Path = None,\n wrapper_win64 : Path = None,\n wrapper_linux64: Path = None,\n main_script_path : Path = None,\n model_description: Path = None,\n model_description_path : Path = None,\n main_script: Path = None,\n main_class : Path = None,\n pyfmu_dir : Path = None\n ):\n self.model_description = model_description\n\n self.main_script = main_script\n self.main_class = main_class\n self.slave_configuration = None\n\n # paths\n self.root = root\n self.resources_dir = resources_dir\n self.slave_configuration_path = slave_configuration_path\n self.main_script_path = main_script_path\n self.model_description_path = model_description_path\n self.binaries_dir = binaries_dir\n self.wrapper_win64 = wrapper_win64\n self.wrapper_linux64 = wrapper_linux64\n self.pyfmu_dir = pyfmu_dir", "def _load_modules(self):\n moduledocs = self._docset.get_compounds(xml.Group,\n lambda x: x.get_name().startswith('module_'))\n for moduledoc in moduledocs:\n moduleobj = self._modules.get(moduledoc.get_name())\n if not moduleobj:\n self._reporter.input_error(\n \"no matching directory for module: {0}\".format(moduledoc))\n continue\n moduleobj.set_doc_xml(moduledoc, self)\n self._docmap[moduledoc] = moduleobj", "def _setup_modules(self):\r\n module_registry = AppModule.module_registry()\r\n for bundle in topological_sort(AppModule.module_dependencies()):\r\n for module_label in bundle:\r\n assert module_label in module_registry\r\n module = module_registry[module_label]\r\n self._debug_log('Initializing: %s (%s)' % (module.label(), module.description()))\r\n try:\r\n module.setup_function()\r\n except AppModule.Unimplemented:\r\n pass\r\n self._init_modules.append(module.label())", "def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )", "def initialize(self, module_name):\n # Load.\n self._initialize(module_name)", "def __init__(__self__, *,\n root: str):\n pulumi.set(__self__, \"root\", root)", "def load_resource_map():\n # to avoid a circular dependency\n from coinbase_commerce.api_resources.base import APIResource\n global RESOURCE_MAP\n RESOURCE_MAP = {k.RESOURCE_NAME: k for k in APIResource.get_subclasses()\n if getattr(k, \"RESOURCE_NAME\", None)}", "def __init__(self):\n super(Modules, self).__init__()\n \n global superclasses\n superclasses['universe'] = []\n superclasses['actions'] = ['universe']\n superclasses['booleans'] = ['universe']\n\n global instances\n instances['universe'] = set()\n instances['actions'] = set()\n instances['booleans'] = set()", "def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)", "def test_init(self):\n\n class TestResource(BaseResource):\n\n name = 'test_resource'\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n route = '/a_route'\n TestResource.init(api, route)\n\n # validate the attribute values of the class\n self.assertEqual(api, TestResource.api)\n self.assertEqual(route, TestResource.route)\n self.assertEqual(api.mongodb, TestResource.mongodb)\n self.assertEqual(api.conf, TestResource.conf)\n self.assertEqual('http://an_endpoint/a_route', TestResource.endpoint)\n self.assertEqual('test_resource', TestResource.logger.name)", "def __init__(self, root):\n self.root = root", "def __init__(self, root):\n self.root = root", "def __init__(self, root=None):\n self.set_root(root)", "def _sub_init(self):\n self._find_mod(\"init\", match_only=True)", "def __init__(self, root):\n self.root = root\n self.app = Home(root, self)" ]
[ "0.6172543", "0.6130026", "0.58406955", "0.5820272", "0.5808664", "0.56877095", "0.5673514", "0.5664736", "0.5635875", "0.561636", "0.5615964", "0.559234", "0.5569267", "0.5522192", "0.5514438", "0.5474632", "0.54634255", "0.54632753", "0.5462733", "0.54501873", "0.5443123", "0.5411119", "0.5410135", "0.54009956", "0.536563", "0.5362878", "0.5362878", "0.53381944", "0.533463", "0.5330706" ]
0.7221802
0
Unnormalize a given image.
def unnormalize(self, image, transpose=False): return unnormalize(image, self.mean, self.std, transpose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise(image):", "def normalize_image(img):\n arr = np.array(img)\n new_img = Image.fromarray(normalize(arr).astype('uint8'),'L')\n return new_img", "def reverse_normalize(image):\n\n reverse = transforms.Normalize(mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.255])\n return reverse(image)", "def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))", "def normalize(image):\r\n return image / 127.5 - 1.", "def normalize(image):\n return image / 127.5 - 1.", "def deprocess_image(img):\n # normalize tensor: center on 0., ensure std is 0.1\n img -= img.mean()\n img /= (img.std() + 1e-5)\n img *= 0.1\n\n # clip to [0, 1]\n img += 0.5\n img = np.clip(img, 0, 1)\n\n # convert to RGB array\n img *= 255\n\n # TF image format if channels = (1 or 3) towards the last rank.\n if img.shape[-1] != 3 and img.shape[-1] != 1:\n img = img.transpose((1, 2, 0))\n\n img = np.clip(img, 0, 255).astype('uint8')\n return img", "def normalize_image(image):\n return image / 255.", "def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)", "def normalize_image(image):\n image = image.astype(np.float32) / 255.0\n\n return image", "def undo_normalise(img):\n\treturn img + CONFIG.MEAN_PIXEL", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img", "def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2", "def normalize(img):\r\n return ((img / 255.0) - 0.5) / 0.5", "def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def normalize_img(img: np.ndarray, bit_depth: int) -> np.ndarray:\n return img / ((1 << bit_depth) - 1)", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))", "def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im", "def normalize(img):\n norm = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_32F, 1)\n cvCopy(img, norm)\n cvNormalize(norm, norm, 1, 0, CV_MINMAX)\n norm_u = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_8U, 1)\n cvConvertScale(norm, norm_u, 255)\n return norm_u", "def normalize_image(img):\n min_, max_ = float(np.min(img)), float(np.max(img))\n return (img - min_) / (max_ - min_)", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def turn_intensity_normalization_off(self):\n self.intensity_normalize_image = False", "def normalize_image(im):\n pixels = im.flatten()\n\n # scale pixels to range 0 to 1\n normalized_im = (pixels - np.min(pixels)) / (np.max(pixels) - np.min(pixels))\n\n # scale the pixels by 255\n normalized_im = (normalized_im.reshape(im.shape) * 255).astype(np.uint8)\n\n return normalized_im", "def normalize_image(img):\n\n # Load image and convert to grayscale\n img = rgb2gray(img)\n\n # Normalize values, range 0 to 255\n img = (img - img.min()) / (img.max() - img.min())\n img *= 255\n\n # Make int values\n img = img.astype(int)\n\n # Return new image\n return img", "def reshape_and_normalize_image(image):\n # Reshape image to mach expected input of VGG16\n image = np.reshape(image, ((1,) + image.shape))\n # Substract the mean to match the expected input of VGG16\n image = image - CONFIG.MEANS\n \n return image", "def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk" ]
[ "0.78914344", "0.76616216", "0.7482308", "0.7246689", "0.7187682", "0.71326315", "0.71250784", "0.69919527", "0.6929422", "0.6927052", "0.6911609", "0.6908462", "0.6908327", "0.69034034", "0.6890873", "0.6881274", "0.6876974", "0.68578804", "0.6824972", "0.68119633", "0.67838544", "0.6769825", "0.6764126", "0.6737152", "0.6724794", "0.67195857", "0.66720265", "0.6648073", "0.6644086", "0.6641788" ]
0.7967136
0
Handle imbalanced dataset through sampler.
def create_class_imbalance_sampler(self): count = [0] * len(self.classes) for item in self.train_data.imgs: count[item[1]] += 1 weight_per_class = [0.] * len(self.classes) for i in range(len(self.classes)): weight_per_class[i] = float(sum(count)) / float(count[i]) weights = [0] * len(self.train_data.imgs) for idx, val in enumerate(self.train_data.imgs): weights[idx] = weight_per_class[val[1]] weights = torch.DoubleTensor(weights) self.sampler = torch.utils.data.sampler.WeightedRandomSampler( weights, len(weights) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def balanced_sampling(dat: pd.DataFrame, logger=None):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n \n # upsampling\n logger.info('Start balanced sampling')\n subsample = []\n num_of_each_class = dat.iloc[:, -1].value_counts().to_numpy()\n if num_of_each_class.std()*1.0 / num_of_each_class.mean() < 0.1:\n logger.info('The given data is balance.')\n # the dataset is balanced\n return dat\n logger.info('Given dataset is unbalance')\n logger.info('Sampling data from each class to generate a new dataset')\n n_smp = num_of_each_class.max()\n for label in dat.iloc[:, -1].value_counts().index:\n samples = dat[dat.iloc[:, -1] == label]\n num_samples = len(samples)\n index_range = range(num_samples)\n # take all from the set\n indexes = list(np.random.choice(index_range, size=num_samples, replace=False))\n indexes2 = list(np.random.choice(\n index_range, size=n_smp-num_samples, replace=True)) # add random items\n indexes.extend(indexes2)\n subsample.append(samples.iloc[indexes, :])\n logger.info('End with sampling')\n out = pd.concat(subsample)\n out = out.sample(frac=1).reset_index(drop=True) # shuffle and re index\n return out", "def train_dataloader(self) -> data.DataLoader:\n # Random weighted sampler to approach the imbalanced dataset\n self.weights = [1.0 / i for i in self.weights]\n\n _sample_weights = [0] * len(self.datasets['train'])\n\n for idx, (_, label) in enumerate(self.datasets['train']):\n _weight = self.weights[label]\n _sample_weights[idx] = _weight\n\n random_sampler = data.WeightedRandomSampler(_sample_weights,\n len(self.datasets['train']), replacement=False)\n\n return data.DataLoader(dataset=self.datasets['train'], batch_size=self.batch_size,\n num_workers=self.num_workers, pin_memory=False,\n sampler=random_sampler)", "def sampler_weights(dataset):\n class_counts = [0, 0]\n for index in range(len(dataset)):\n _, label = dataset[index]\n class_counts[label] += 1\n\n divisor = 2 * class_counts[0] * class_counts[1]\n sample_weights = (class_counts[1] / divisor, class_counts[0] / divisor)\n weights = []\n for index in range(len(dataset)):\n _, label = dataset[index]\n weights.append(sample_weights[label])\n\n num_samples = 2 * min(class_counts[0], class_counts[1])\n return weights, num_samples", "def handle_imbalance(dataset, minority_class):\n for i, l in enumerate(dataset):\n if l == minority_class:\n dataset[i] = 2\n return dataset", "def balance_data(df, y, do_undersample):\n if do_undersample:\n print('Under sampling the \\'0\\' class of our outcome data...')\n # Under sample -50K so we can better learn.\n ones = df[df['binary_income']==1]\n zeros = df[df['binary_income']==0]\n \n subsampled_df = pd.concat([ones, zeros.sample(ones.shape[0])])\n subsampled_y = subsampled_df['binary_income']\n subsampled_df = subsampled_df.drop('binary_income',axis=1)\n \n return subsampled_df, subsampled_y\n \n else:\n return df, y", "def overSampling( self, feature, Class, random_state = 0 ):\n oversampler = SMOTE(random_state=0)\n feature_resample, Class_resample = oversampler.fit_sample(feature, \n Class)\n print(\"Warning: You are increasing the dataset to balance the data\\n\")\n return feature_resample, Class_resample", "def process_sample_train(self):\n raise NotImplementedError", "def learn(self):\n metrics_hist = dict()\n max_runs = 3\n for run in range(max_runs):\n all_indices, initial_indices = self._init_al_dataset()\n\n metrics_hist[str(run)] = dict()\n\n current_indices = list(initial_indices)\n \n for split in self.data_splits_frac:\n print(f'\\nRUN {run} - SPLIT - {split*100:0.0f}%')\n\n # Initialise models\n self._init_models(mode='svaal')\n\n # Do some label stuff\n unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)\n unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)\n unlabelled_dataloader = data.DataLoader(self.datasets['train'],\n sampler=unlabelled_sampler,\n batch_size=64,\n drop_last=False)\n\n print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')\n\n # TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.\n if len(unlabelled_indices) == 0:\n break\n\n metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,\n dataloader_u=unlabelled_dataloader,\n dataloader_v=self.val_dataloader,\n dataloader_t=self.test_dataloader,\n mode='svaal') \n print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%') \n \n # Record performance at each split\n metrics_hist[str(run)][str(split)] = metrics\n\n \n sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg\n current_indices = list(current_indices) + list(sampled_indices)\n sampler = data.sampler.SubsetRandomSampler(current_indices)\n self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)\n \n # write results to disk\n with open('results.json', 'w') as fj:\n json.dump(metrics_hist, fj, indent=4)", "def get_dataset_sampler(self):\n return None", "def should_sample(self, span_context):\n raise NotImplementedError", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def importance_sampler(raw_data, analysis_settings):\n pass", "def __call__(self, y, pred, sample_weight=None):", "def downsample_data(dataset):\n loss = dataset.loc[dataset[TARGET] == 'loss']\n good_gain = dataset.loc[dataset[TARGET] == 'good_gain']\n \n sample_size = min([loss.shape[0], good_gain.shape[0]])\n loss = loss.sample(n=sample_size, random_state=42)\n good_gain = good_gain.sample(n=sample_size, random_state=42)\n \n frames = [loss, good_gain]\n return shuffle(pd.concat(frames), random_state=0)", "def load_binary_imbalanced(classes=(1,7), ratio=0.1):\r\n train_set, train_set_target = load_data()\r\n \r\n # binarize\r\n mask_train_set_imb = np.logical_or(train_set_target == classes[0],train_set_target == classes[1])\r\n (data_set_imb,data_set_imb_target)= (train_set[mask_train_set_imb], train_set_target[mask_train_set_imb])\r\n\r\n # imbalance\r\n data_minority = data_set_imb[data_set_imb_target == classes[1]]\r\n data_minority_target = data_set_imb_target[data_set_imb_target == classes[1]]\r\n data_majority = data_set_imb[data_set_imb_target == classes[0]]\r\n data_majority_target = data_set_imb_target[data_set_imb_target == classes[0]]\r\n original_size = data_minority_target.shape[0]\r\n majority_size = data_majority_target.shape[0]\r\n target_size = int(np.floor(majority_size * ratio))\r\n indices = np.random.choice(original_size, size=target_size)\r\n data_minority = data_minority[indices]\r\n data_minority_target = data_minority_target[indices]\r\n\r\n # merge\r\n train_set = np.concatenate([data_minority, data_majority])\r\n train_set_target = np.concatenate([data_minority_target, data_majority_target])\r\n\r\n #shuffle\r\n train_set, train_set_target = np.hsplit(\r\n np.random.permutation(\r\n np.hstack((train_set, train_set_target.reshape((train_set_target.shape[0], 1))))\r\n ), [-1]\r\n )\r\n train_set_target = np.asarray(train_set_target, dtype='int').reshape((train_set_target.shape[0],))\r\n return (train_set[:],train_set_target[:])", "def initialise_sampler(self):\n raise NotImplementedError", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def sample(self):\r\n raise NotImplementedError", "def __call__(self, samples_number):\n self.sampler.sample(samples_number)", "def sample_count(self):", "def _run(self, **params):\n# if softEvidence is None:\n# self.softEvidence = self.mln.softEvidence\n# else:\n# self.softEvidence = softEvidence\n # initialize chains\n chains = MCMCInference.ChainGroup(self)\n for i in range(self.chains):\n chain = GibbsSampler.Chain(self, self.queries)\n chains.chain(chain)\n# if self.softEvidence is not None:\n# chain.setSoftEvidence(self.softEvidence)\n # do Gibbs sampling\n# if verbose and details: print \"sampling...\"\n converged = 0\n steps = 0\n if self.verbose:\n bar = ProgressBar(color='green', steps=self.maxsteps)\n while converged != self.chains and steps < self.maxsteps:\n converged = 0\n steps += 1\n print('STEP {} / {}'.format(steps, self.maxsteps))\n for chain in chains.chains:\n chain.step()\n if self.verbose:\n bar.inc()\n bar.label('%d / %d' % (steps, self.maxsteps))\n# if self.useConvergenceTest:\n# if chain.converged and numSteps >= minSteps:\n# converged += 1\n# if verbose and details:\n# if numSteps % infoInterval == 0:\n# print \"step %d (fraction converged: %.2f)\" % (numSteps, float(converged) / numChains)\n# if numSteps % resultsInterval == 0:\n# chainGroup.getResults()\n# chainGroup.printResults(shortOutput=True)\n # get the results\n return chains.results()[0]", "def generate_sampler(dataset, sampler_option='random', step=1):\n\n df = dataset.df\n min_age = np.min(df.age)\n max_age = np.max(df.age)\n\n if (max_age - min_age) % step == 0:\n max_age += step\n\n bins = np.arange(min_age, max_age, step)\n count = np.zeros(len(bins))\n for idx in df.index:\n age = df.loc[idx, \"age\"]\n key = np.argmax(np.logical_and(age - step < bins, age >= bins)).astype(int)\n count[key] += 1\n\n # weight_per_class = (1 / np.array(count)) if count.any() != 0 else 0.\n weight_per_class = np.zeros_like(count).astype(float)\n np.divide(1., count, out=weight_per_class, where=count != 0)\n weights = [0] * len(df)\n\n for idx, age in enumerate(df.age.values):\n key = np.argmax(np.logical_and(age - 5 <= bins, age > bins)).astype(int)\n weights[idx] = weight_per_class[key]\n\n weights = torch.FloatTensor(weights)\n\n if sampler_option == 'random':\n s = sampler.RandomSampler(dataset, replacement=False)\n elif sampler_option == 'weighted':\n s = sampler.WeightedRandomSampler(weights, len(weights))\n else:\n raise NotImplementedError(\"The option %s for sampler is not implemented\" % sampler_option)\n\n return s", "def CrossCheck(dataloader):", "def IBP_sampler(mat):\n mat.val, mat.siblings[0].val = numba_mu.sample_2d_IBP(\n mat(),\n mat.siblings[0](),\n mat.layer.child().transpose(transpose_order),\n mat.layer.lbda(),\n mat.siblings[0].bernoulli_prior,\n mat.layer.alpha)", "def sample(self, seg_logit, seg_label):", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def create_weighted_sampler(local_df, test_label = 'protease_stability'):\n all_label_ids = torch.tensor([x for x in local_df[test_label]], dtype=torch.long)\n labels_unique, counts = np.unique(local_df[test_label], return_counts=True)\n print(labels_unique)\n\n class_weights = [sum(counts)/c for c in counts]\n print(class_weights)\n\n weights = [class_weights[e] for e in local_df[test_label]]\n\n print(len(local_df[test_label]))\n sampler = data_utils.WeightedRandomSampler(weights, len(local_df[test_label]))\n return sampler", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch" ]
[ "0.62204057", "0.61685914", "0.5826256", "0.5715278", "0.5707387", "0.5623797", "0.561231", "0.55672514", "0.5549182", "0.54543215", "0.5443967", "0.5442228", "0.5435967", "0.54301053", "0.5364784", "0.53321433", "0.53314674", "0.5326067", "0.5314146", "0.5309174", "0.53041863", "0.52870476", "0.5278337", "0.52631384", "0.5234448", "0.5223024", "0.5210587", "0.5210587", "0.5202301", "0.51957947" ]
0.6245745
0
Get the selected locale from user settings.
def get_locale(): setting = Setting.query.filter(Setting.name == 'default_language').first() if setting is not None: return setting.value # Return default language when none found return 'en'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def get_current_locale(self, req):\n if req.view_args and 'locale' in req.view_args:\n for locale in self.locales:\n if locale.code == req.view_args['locale']:\n return locale\n\n # Return the default locale\n return self.default_locale", "def get_locale_for_user(self):\n return 'en_US' # TODO(psimakov): choose proper locale from profile", "def get_locale(self):\n return self.locale", "async def get_user_locale(self, action: str, args: Tuple[Any]) -> str:\n\n tg_user = types.User.get_current()\n user = await get_user(tg_user.id)\n super_locale = await super().get_user_locale(action, args)\n\n if user.locale is not None: # if user set his locale\n return user.locale\n else:\n if super_locale in LANGUAGES:\n return super_locale\n if tg_user.locale in LANGUAGES:\n return tg_user.locale\n else: # else, return default\n return DEFAULT_USER_LOCALE", "def get_locale():\n return babel.Locale.parse(_get_locale())", "def get_current_locale(self) -> str:\n return self.locale", "def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def locale(self):\n return self.__locale", "def get_locale():\n return \"he\"", "def _get_locale() -> str:\n languages = flask.current_app.config['LANGUAGES'].keys()\n locale = flask.request.accept_languages.best_match(languages)\n\n # If no locale could be determined, fall back to the default.\n if locale is None:\n locale = flask.current_app.config['BABEL_DEFAULT_LOCALE']\n\n return locale", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")", "def get_locale(self):\n raise Unimplemented()", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def get_default_language():\n return getattr(thread_locals, 'DEFAULT_LANGUAGE',\n settings.DEFAULT_LANGUAGE)", "def get_language(self):\n return self.language if self.language is not None else get_language()", "def get_language(self):\r\n return self.language", "def get_language(self):\n return self.lang", "def get_locale(lang):\n lang = babel_format_locale_map.get(lang) or lang\n try:\n return Locale.parse(lang, sep='-')\n except (UnknownLocaleError, ValueError):\n return Locale(*settings.LANGUAGE_CODE.split('-'))", "def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language", "def get_lang(self):\n return self.langs.lang", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def requestLanguage(request):\n # Return the user language preferences for registered users\n if request.user.valid and request.user.language:\n return request.user.language\n\n # Or try to return one of the user browser accepted languages, if it\n # is available on this wiki...\n available = wikiLanguages()\n if not request.cfg.language_ignore_browser:\n for lang in browserLanguages(request):\n if lang in available:\n return lang\n \n # Or return the wiki default language...\n if request.cfg.language_default in available:\n lang = request.cfg.language_default\n # If everything else fails, read the manual... or return 'en'\n else:\n lang = 'en'\n return lang", "def getVKBLanguage(self):\r\n\r\n return self.phone.sx('(send (send (get-input-locale-manager) get-current-locale) get-iso)', convertToString=False)", "def get_locale(self):\n\n return to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\")", "def getLanguage(self):\n return self.getOrDefault(self.language)", "def GetMUILanguage(self):\n mui_resource = self.GetMUIResource()\n if not mui_resource:\n return None\n\n return mui_resource.language", "def get_user_lang(user: str = None) -> str:\n\tuser = user or frappe.session.user\n\tlang = frappe.cache.hget(\"lang\", user)\n\n\tif not lang:\n\t\t# User.language => Session Defaults => frappe.local.lang => 'en'\n\t\tlang = (\n\t\t\tfrappe.db.get_value(\"User\", user, \"language\")\n\t\t\tor frappe.db.get_default(\"lang\")\n\t\t\tor frappe.local.lang\n\t\t\tor \"en\"\n\t\t)\n\n\t\tfrappe.cache.hset(\"lang\", user, lang)\n\n\treturn lang", "def get_user_lang(user=None):\n\tif not user:\n\t\tuser = frappe.session.user\n\n\t# via cache\n\tlang = frappe.cache().hget(\"lang\", user)\n\n\tif not lang:\n\n\t\t# if defined in user profile\n\t\tlang = frappe.db.get_value(\"User\", user, \"language\")\n\t\tif not lang:\n\t\t\tlang = frappe.db.get_default(\"lang\")\n\n\t\tif not lang:\n\t\t\tlang = frappe.local.lang or 'en'\n\n\t\tfrappe.cache().hset(\"lang\", user, lang)\n\n\treturn lang" ]
[ "0.7412174", "0.7108176", "0.69538045", "0.69421023", "0.68359756", "0.67491263", "0.67384523", "0.6731309", "0.650032", "0.63808835", "0.6361166", "0.63441944", "0.63441944", "0.6340159", "0.62725174", "0.61259097", "0.6097418", "0.6096102", "0.60663605", "0.60374147", "0.6036436", "0.6031369", "0.60306174", "0.60046834", "0.5967092", "0.5957959", "0.5913882", "0.59019977", "0.587678", "0.58416796" ]
0.73380387
1
Decodes a Base58Check encoded key.
def from_b58check(key): return HDKey.from_bytes(base58.b58decode_check(key))[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def base58_decode(v: bytes) -> bytes:\n try:\n prefix_len = next(\n len(encoding[2])\n for encoding in base58_encodings\n if len(v) == encoding[1] and v.startswith(encoding[0])\n )\n except StopIteration:\n raise ValueError('Invalid encoding, prefix or length mismatch.')\n\n return base58.b58decode_check(v)[prefix_len:]", "def decode(self, crypto):", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def decode(s):\n try:\n if not s:\n return b''\n\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in b58_digits:\n raise InvalidBase58Error('Character %r is not a valid base58 character' % c)\n digit = b58_digits.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = unhexlify(h.encode('utf8'))\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == b58_digits[0]: pad += 1\n else: break\n return hexlify(b'\\x00' * pad + res).decode('utf8')", "def decode_key(as_bytes: typing.List[int]) -> str:\n raise NotImplementedError()", "def decode_base58(v):\n prefix = b''\n while v.startswith(B58[0]):\n prefix += b'\\0' \n v = v[1:]\n if v:\n return prefix + bytes(changebase(map(B58.index,v),58,256))\n else:\n return prefix", "def base58_decode(s):\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n # res = \"\"\n res = bytearray.fromhex(h)\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]: pad += 1\n else: break\n return b'\\x00' * pad + res", "def decodeBase58(s):\n if not s:\n return b''\n\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in B58_DIGITS:\n raise InvalidBase58Error('Character %r is not a valid base58 character' % c)\n digit = B58_DIGITS.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = unhexlify(h.encode('utf8'))\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == B58_DIGITS[0]:\n pad += 1\n else:\n break\n return b'\\x00' * pad + res", "def deserialize_key(key: bytes) -> str:\n return key.decode()", "def rc4_decode(data, key, decode=base64.b64decode, salt_length=16):\n if decode:\n data = decode(data)\n salt = data[:salt_length]\n return crypt(data[salt_length:], sha1(key + salt).digest())", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def decode(key: str, enc: str) -> str:\n\n dec = []\n enc = base64.urlsafe_b64decode(enc).decode()\n for i in range(len(enc)):\n key_c = key[i % len(key)]\n dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)\n dec.append(dec_c)\n return \"\".join(dec)", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def decode_and_decrypt(encoded_data, key):\r\n return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)", "def parse_signature(data: bytes):\n return base58_encode(data, b'sig').decode()", "def decode_base58(smartAddress, length):\n n = 0\n for char in smartAddress:\n try:\n n = n * 58 + digits58.index(char)\n except:\n msg = u\"Character not part of SmartCashs's base58: '%s'\"\n raise ValueError(msg % (char,))\n\n return n.to_bytes(length, 'big')", "def b58decode(v, length):\n long_value = 0L\n for (i, c) in enumerate(v[::-1]):\n long_value += __b58chars.find(c) * (__b58base**i)\n \n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n \n nPad = 0\n for c in v:\n if c == __b58chars[0]: nPad += 1\n else: break\n \n result = chr(0)*nPad + result\n if length is not None and len(result) != length:\n return None\n \n return result", "def b58decode(v, length):\n\tlong_value = 0L\n\tfor (i, c) in enumerate(v[::-1]):\n\t\tlong_value += __b58chars.find(c) * (__b58base**i)\n\tresult = ''\n\twhile long_value >= 256:\n\t\tdiv, mod = divmod(long_value, 256)\n\t\tresult = chr(mod) + result\n\t\tlong_value = div\n\tresult = chr(long_value) + result\n\tnPad = 0\n\tfor c in v:\n\t\tif c == __b58chars[0]: nPad += 1\n\t\telse: break\n\tresult = chr(0)*nPad + result\n\tif length is not None and len(result) != length:\n\t\treturn None\n\treturn result", "def b58decode(v, length):\n long_value = 0\n for (i, c) in enumerate(v[::-1]):\n long_value += b58_chars.find(c) * (b58_base ** i)\n\n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n\n nPad = 0\n for c in v:\n if c == b58_chars[0]:\n nPad += 1\n else:\n break\n\n result = chr(0) * nPad + result\n if length is not None and len(result) != length:\n return None\n\n return result", "def b2a_base58check(data):\n\n return encoding.b2a_hashed_base58(data)", "def from_base58(cls, seed: str) -> 'PrivateKey':\n return cls(base58.b58decode(seed))", "def decipher_raw(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n e = [decrypt(u[i], u[i + 1], key) for i in range(len(u))[::2]]\n return b''.join([struct.pack('2I', ee, ef) for ee, ef in e])", "def test_decode(self):\n self.assertEqual(\n hex_to_b64(self.hex_string),\n self.expect_result\n )", "def b58decode(v, length):\r\n long_value = 0L\r\n for (i, c) in enumerate(v[::-1]):\r\n long_value += __b58chars.find(c) * (__b58base**i)\r\n\r\n result = ''\r\n while long_value >= 256:\r\n div, mod = divmod(long_value, 256)\r\n result = chr(mod) + result\r\n long_value = div\r\n result = chr(long_value) + result\r\n\r\n nPad = 0\r\n for c in v:\r\n if c == __b58chars[0]: nPad += 1\r\n else: break\r\n\r\n result = chr(0)*nPad + result\r\n if length is not None and len(result) != length:\r\n return None\r\n\r\n return result", "def forge_base58(value: str) -> bytes:\n return base58_decode(value.encode())", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def decode_key(key: str) -> Tuple[int, int]:\n try:\n mod, exp = key.split(\".\")\n except ValueError:\n raise ValueError(f\"`{key}` is not a valid key\")\n\n return (\n int.from_bytes(base64.urlsafe_b64decode(mod), config.BYTEORDER),\n int.from_bytes(base64.urlsafe_b64decode(exp), config.BYTEORDER, signed=True),\n )", "def decode(self, encoded):", "def decode_base58(bitcoin_address, length):\n n = 0\n for char in bitcoin_address:\n try:\n n = n * 58 + digits58.index(char)\n except:\n msg = u\"Character not part of Bitcoin's base58: '%s'\"\n raise IllegalCharacterError(msg % char)\n try:\n return n.to_bytes(length, 'big')\n except AttributeError:\n # Python version < 3.2\n return _long_to_bytes(n, length, 'big')", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())" ]
[ "0.65961087", "0.6403686", "0.6391344", "0.63413113", "0.6321229", "0.6299899", "0.61630493", "0.6157103", "0.61495435", "0.60469747", "0.600818", "0.59588367", "0.5914103", "0.5791583", "0.57892734", "0.5774782", "0.5729094", "0.5713923", "0.5695626", "0.5672472", "0.5671971", "0.56648386", "0.56602347", "0.5640567", "0.56206065", "0.5570051", "0.55683273", "0.5536966", "0.55233926", "0.5521278" ]
0.74329513
0
Generates either a HDPrivateKey or HDPublicKey from the underlying bytes.
def from_bytes(b): if len(b) < 78: raise ValueError("b must be at least 78 bytes long.") version = int.from_bytes(b[:4], 'big') depth = b[4] parent_fingerprint = b[5:9] index = int.from_bytes(b[9:13], 'big') chain_code = b[13:45] key_bytes = b[45:78] rv = None if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION: if key_bytes[0] != 0: raise ValueError("First byte of private key must be 0x00!") private_key = int.from_bytes(key_bytes[1:], 'big') rv = HDPrivateKey(key=private_key, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION: if key_bytes[0] != 0x02 and key_bytes[0] != 0x03: raise ValueError("First byte of public key must be 0x02 or 0x03!") public_key = PublicKey.from_bytes(key_bytes) rv = HDPublicKey(x=public_key.point.x, y=public_key.point.y, chain_code=chain_code, index=index, depth=depth, parent_fingerprint=parent_fingerprint) else: raise ValueError("incorrect encoding.") return (rv, b[78:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_ecdh_key_pair() -> tuple[X25519PrivateKey, bytes]:\n private_key = X25519PrivateKey.generate()\n public_key_raw = private_key.public_key().public_bytes(\n serialization.Encoding.Raw, serialization.PublicFormat.Raw\n )\n return private_key, public_key_raw", "def mk_keyobj_from_private_key_der(self, derdat):\n self.private_key_obj = serialization.load_der_private_key(derdat, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def from_public_parts(self, x: bytes, y: bytes):\n return asymmetric.ec.EllipticCurvePublicNumbers(\n int.from_bytes(x, 'big'),\n int.from_bytes(y, 'big'),\n asymmetric.ec.SECP256R1()\n ).public_key()", "def gen_private_key():\n return DH.b2i(Random.new().read(DH_SIZE))", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def generate_ecc_public_key(private_key: EllipticCurvePrivateKeyWithSerialization) -> EllipticCurvePublicKey:\n return private_key.public_key()", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def load_pub_key_bytes(bs: bytes) -> rsa.RSAPublicKey:\n k = serialization.load_pem_public_key(bs)\n assert isinstance(k, rsa.RSAPublicKey)\n return k", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def generate_key(seed):\n private_key = sha256(seed)\n public_key = privtopub(private_key)\n return {\"private\": private_key, \"public\": public_key}", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def mk_keyobj_from_private_key_pem(self, pemdat_string):\n if isinstance(pemdat_string, str):\n pemdat_string = pemdat_string.encode()\n self.private_key_obj = serialization.load_pem_private_key(pemdat_string, password=None, backend=default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def dh_get_key():\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)", "def createKeyPair(type, bits):\n pkey = crypto.PKey()\n pkey.generate_key(type, bits)\n return pkey", "def generate_keys() -> tuple:\n private_key = ecdsa.SigningKey.generate(curve=curve)\n public_key = private_key.get_verifying_key()\n\n private_key = encode_private_key(private_key)\n public_key = encode_public_key(public_key)\n\n return public_key, private_key", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()", "def get_key_object(self):\n key_type, data = self.key_data()\n data = base64.b64decode(data)\n\n if key_type == \"ssh-rsa\":\n key = rsakey.RSAKey(data=data)\n elif key_type == \"ssh-dss\":\n key = dsskey.DSSKey(data=data)\n else:\n raise Exception(\"Invalid key type\")\n\n return key", "def get_key_pair() -> typing.Tuple[bytes, bytes]: \n return _get_key_pair_from_sk(ecdsa.SigningKey.generate(curve=CURVE))", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def solve(key_data: bytes) -> PublicKey:\n return { # type: ignore\n Encoding.PEM: load_pem_public_key,\n Encoding.DER: load_der_public_key\n }[real_encoding](key_data, default_backend())", "def _get_pubkey_from_der_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_der_public_key(filedata, backend=backend), None\n except Exception:\n return None, None", "def parse_public(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, algo, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a public key or\n certificate, but rather a private key\n '''\n ))\n\n # When a public key returning from _unarmor_pem has a known algorithm\n # of RSA, that means the DER structure is of the type RSAPublicKey, so\n # we need to wrap it in the PublicKeyInfo structure.\n if algo == 'rsa':\n return PublicKeyInfo.wrap(data, 'rsa')\n\n if key_type is None or key_type == 'public key':\n try:\n pki = PublicKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PublicKeyInfo\n\n try:\n rpk = RSAPublicKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n rpk.native\n return PublicKeyInfo.wrap(rpk, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPublicKey\n\n if key_type is None or key_type == 'certificate':\n try:\n parsed_cert = Certificate.load(data)\n key_info = parsed_cert['tbs_certificate']['subject_public_key_info']\n return key_info\n except (ValueError):\n pass # Data was not a cert\n\n raise ValueError('The data specified does not appear to be a known public key or certificate format')", "def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()", "def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def derive_keypair(seed: str, validator: bool = False) -> Tuple[str, str]:\n decoded_seed, algorithm = addresscodec.decode_seed(seed)\n module = _ALGORITHM_TO_MODULE_MAP[algorithm]\n public_key, private_key = module.derive_keypair(decoded_seed, validator)\n signature = module.sign(_VERIFICATION_MESSAGE, private_key)\n if not module.is_valid_message(_VERIFICATION_MESSAGE, signature, public_key):\n raise XRPLKeypairsException(\n \"Derived keypair did not generate verifiable signature\",\n )\n return public_key, private_key", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()" ]
[ "0.6803569", "0.6298475", "0.6296541", "0.61843973", "0.6125002", "0.6038086", "0.6034926", "0.6032014", "0.6002514", "0.5997152", "0.598115", "0.59766436", "0.59530914", "0.59374905", "0.59370124", "0.5930606", "0.588195", "0.5862614", "0.5829133", "0.5813407", "0.58064556", "0.57275146", "0.5725388", "0.57161444", "0.56934845", "0.5683587", "0.56451404", "0.5638943", "0.55802584", "0.5561091" ]
0.7741583
0
Whether or not this is a hardened node. Hardened nodes are those with indices >= 0x80000000.
def hardened(self): # A hardened key is a key with index >= 2 ** 31, so # we check that the MSB of a uint32 is set. return self.index & 0x80000000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_leaf(self):\r\n return self.zero_son is None and self.one_son is None", "def is_internal(self):\n if self.is_leaf() or self.is_semileaf():\n return False\n return True", "def node_inode(self):\n return False", "def node_inode(self):\n return False", "def is_leaf(self):\n return self.pixel_count > 0", "def _is_leaf(self, index):\r\n return 2*index+1 > self._size - 1", "def IsEulerGraph(self):\n\n for node in self.nodes:\n if ((len(node.neighbours) % 2) == 1) or (len(node.neighbours) == 0):\n return False\n return True", "def has_node(self, n):\n return n in self.node_dict", "def isWellFormedNode(self, *args):\n return _libsbml.ASTBasePlugin_isWellFormedNode(self, *args)", "def _is_left_edge(self, ndx):\n if len(self._dims)== 1:\n return ndx == 0\n return ndx < self._dims[1]", "def __nonzero__(self):\n return self.root.__nonzero__()", "def is_ghost(self):\n\t\treturn False", "def essential_node_count(self) -> int:\n return sum(\n 1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS\n )", "def has_leaf(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"has_leaf\"))\r\n return self._hvac_mode == \"eco\"", "def is_shorter(node, n_node):\r\n return True if node.distance + n_node.value < n_node.distance else False", "def has_node(self, n):\n return n in self.dict", "def has_ghosts(self):\n return not np.all(self.mesh.discretization.ghosts == 0)", "def is_leaf(self):\n return len(self.blocks) == 0", "def has_nei(self, key: int) -> bool:\r\n return self.neighbors.__contains__(key)", "def is_ghost(self):\n return self._is_ghost", "def is_leaf(self):\r\n return self.num_children() == 0", "def is_root(self):\n return self.unpack_word(0x2) & 0x0004 > 0", "def need_neighbor(self):\n return self._need_neighbor", "def is_internal(self):\n # TODO: Check if either left child or right child has a value\n return ... or ...", "def is_leaf(self):\n return not self.children.exists()", "def isNodeLeaf ( self ):\n return self.nodes is None or len ( self.nodes ) == 0\n # End isNodeLeaf", "def _is_trivial(self):\n return self._.d <= 1", "def count_dead_node(self):\n count = 0\n for node in self.node:\n if node.energy < 0:\n count += 1\n return count", "def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP", "def _node_only_used_for_sym_size(node: Node, partition_nodes: List[Node]):\n if _is_sym_size_node(node):\n return True\n\n return all(\n ((user not in partition_nodes) or _is_sym_size_node(user))\n for user in node.users\n )" ]
[ "0.62713253", "0.60492986", "0.5942298", "0.5942298", "0.5901078", "0.57642645", "0.5762194", "0.5736317", "0.57134473", "0.5682844", "0.5674507", "0.5665041", "0.56444204", "0.5642746", "0.56300306", "0.55741334", "0.55603236", "0.55570173", "0.55541235", "0.5552279", "0.55295277", "0.55292237", "0.5527793", "0.55263567", "0.55004317", "0.5482618", "0.5472563", "0.5466461", "0.5457714", "0.54350126" ]
0.68602026
0
Returns the key's fingerprint, which is the first 4 bytes of its identifier.
def fingerprint(self): return self.identifier[:4]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]", "def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()", "def fingerprint(self):\n return self.gpg.list_keys()[0]['fingerprint']", "def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)", "def fingerprint(self):\n\n if self._fingerprint is None:\n self._fingerprint = _fingerprint(self.asn1, None)\n return self._fingerprint", "def host_key_fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"host_key_fingerprint\")", "def fingerprint(self) -> str:\n fp = self.sha256.hex()\n return fp", "def fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"fingerprint\")", "def get_fingerprint(self):\n return self.fp", "def getFingerprint(self):\r\n if self.getNumCerts() == 0:\r\n raise AssertionError()\r\n return self.x509List[0].getFingerprint()", "def host_key_fingerprint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key_fingerprint\")", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def get_short_fingerprint(length=6):\n assert 6 <= length <= 32\n #\n return get_fingerprint(md5=True)[-length:]", "def fingerprint(self):\n return self.pod.hash_file(self.pod_path)", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def label_fingerprint(self) -> str:\n return pulumi.get(self, \"label_fingerprint\")", "def get_fingerprint(filepath):\n ssh_file = open(filepath, 'r')\n ssh_file_contents = ssh_file.readlines()\n ssh_fingerprint = ''.join(ssh_file_contents).strip()\n\n return ssh_fingerprint", "def key(self):\n return self._key.decode('utf-8')", "def _certificate_fingerprint(identity):\n fingerprint, stderr = _check_output([\n \"openssl\",\n \"x509\",\n \"-inform\",\n \"DER\",\n \"-noout\",\n \"-fingerprint\",\n ],\n inputstr=identity)\n fingerprint = fingerprint.strip()\n fingerprint = fingerprint.replace(\"SHA1 Fingerprint=\", \"\")\n fingerprint = fingerprint.replace(\":\", \"\")\n return fingerprint", "def fingerprint(keyed_data, digest_size=16):\n h = blake2b(digest_size=16)\n for key in sorted(keyed_data.keys()):\n val = keyed_data[key]\n s = json.dumps(val, sort_keys=True, cls=NpEncoder).encode()\n h.update(s)\n return h.hexdigest()", "def fingerprint_from_file(filename):\n cmd = flatten([gnupg_bin(), gnupg_home(), filename])\n outp = stderr_output(cmd).split('\\n')\n if not outp[0].startswith('pub'):\n raise CryptoritoError('probably an invalid gpg key')\n\n return outp[1].strip()", "def _gpg_fingerprints(self) -> List[str]:\n return self._gpg_keys.fingerprints", "def get_fingerprint(md5=False):\n sb = []\n sb.append(p.node())\n sb.append(p.architecture()[0])\n sb.append(p.architecture()[1])\n sb.append(p.machine())\n sb.append(p.processor())\n sb.append(p.system())\n sb.append(str(uuid.getnode())) # MAC address\n text = '#'.join(sb)\n if md5:\n return string_to_md5(text)\n else:\n return text", "def get_fingerprint(self, md='md5'):\n der = self.as_der()\n md = EVP.MessageDigest(md)\n md.update(der)\n digest = md.final()\n return hex(util.octx_to_num(digest))[2:-1].upper()", "def fingerprint(self) -> Text:\n return self.name", "def ssl_fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ssl_fingerprint\")", "def _fingerprint(key_object, load_private_key):\n\n if isinstance(key_object, PrivateKeyInfo):\n key = key_object['private_key'].parsed\n\n if key_object.algorithm == 'rsa':\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n params = key_object['private_key_algorithm']['parameters']\n public_key = Integer(pow(\n params['g'].native,\n key_object['private_key'].parsed.native,\n params['p'].native\n ))\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n public_key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key['public_key'].native\n if public_key is None:\n # This is gross, but since the EC public key is optional,\n # and we need to load the private key and use the crypto lib\n # to get the public key, we have to import the platform-specific\n # asymmetric implementation. This is the reason a bunch of the\n # imports are module imports, so we don't get an import cycle.\n public_key_object = load_private_key(key_object).public_key\n public_key = public_key_object.asn1['public_key'].parsed.native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n if isinstance(key_object, PublicKeyInfo):\n if key_object.algorithm == 'rsa':\n key = key_object['public_key'].parsed\n\n to_hash = '%d:%d' % (\n key['modulus'].native,\n key['public_exponent'].native,\n )\n\n elif key_object.algorithm == 'dsa':\n key = key_object['public_key'].parsed\n params = key_object['algorithm']['parameters']\n\n to_hash = '%d:%d:%d:%d' % (\n params['p'].native,\n params['q'].native,\n params['g'].native,\n key.native,\n )\n\n elif key_object.algorithm == 'ec':\n public_key = key_object['public_key'].native\n\n to_hash = '%s:' % key_object.curve[1]\n to_hash = to_hash.encode('utf-8')\n to_hash += public_key\n\n if isinstance(to_hash, str_cls):\n to_hash = to_hash.encode('utf-8')\n\n return hashlib.sha256(to_hash).digest()\n\n raise ValueError(pretty_message(\n '''\n key_object must be an instance of the\n asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n classes, not %s\n ''',\n type_name(key_object)\n ))", "def key_pair_finger_print(self) -> str:\n return pulumi.get(self, \"key_pair_finger_print\")", "def _load_fingerprint(self):\n path = os.path.join(self._cache_path, '%s.fingerprint' % self._name)\n\n if not os.path.exists(path):\n return None\n\n with open(path) as f:\n fingerprint = f.read()\n\n return fingerprint" ]
[ "0.77391666", "0.76859707", "0.7634195", "0.7514036", "0.7471615", "0.7366445", "0.7218066", "0.71334374", "0.70777875", "0.7041138", "0.68841106", "0.68285894", "0.6695113", "0.6680278", "0.66001993", "0.66001993", "0.66001993", "0.65500057", "0.6528393", "0.6461605", "0.6425575", "0.6385517", "0.63824254", "0.63767177", "0.6375026", "0.6371168", "0.63486654", "0.6319337", "0.63149935", "0.6297245" ]
0.7981021
0
Get inventory list from config files builds a NetworkRunner inventory object and a mac_map dictionary according to ansible inventory file yaml definition
def __init__(self): self.inventory = {} self.mac_map = {} for conffile in CONF.config_file: # parse each config file sections = {} parser = cfg.ConfigParser(conffile, sections) try: parser.parse() except IOError as e: LOG.error(str(e)) # filter out sections that begin with the driver's tag hosts = {k: v for k, v in sections.items() if k.startswith(c.DRIVER_TAG)} # munge the oslo_config data removing the device tag and # turning lists with single item strings into strings for host in hosts: dev_id = host.partition(c.DRIVER_TAG)[2] dev_cfg = {k: v[0] for k, v in hosts[host].items()} for b in c.BOOLEANS: if b in dev_cfg: dev_cfg[b] = types.Boolean()(dev_cfg[b]) self.inventory[dev_id] = dev_cfg # If mac is defined add it to the mac_map if 'mac' in dev_cfg: self.mac_map[dev_cfg['mac'].upper()] = dev_id LOG.info('Ansible Host List: %s', ', '.join(self.inventory))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n # endpdoint = \"restconf/data/ietf-interfaces:interfaces\"\n # endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface={name}\"\n\n if len(argv) > 1:\n try:\n inventory = load_inventory(argv[1])\n except FileExistsError as err:\n print(\"FileExistsError: \", err)\n else:\n print(\"You must provide a path to your inventory file.\")\n sys.exit()\n\n r1 = inventory['dev-r1']\n loop = [interface for interface in r1[\"interface\"] if interface[\"name\"] == \"Loopback0\"][0]\n\n payload = render_payload(\n loop,\n \"interface.j2\"\n )\n\n session = create_session(r1[\"username\"], r1[\"password\"])\n endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface=Loopback0\"\n results = put_request(r1[\"host\"],session, endpoint, payload)\n print(results)\n\n save_endpoint = \"restconf/operations/cisco-ia:save-config/\"\n saved = save_config(r1[\"host\"], session, save_endpoint)\n\n # target_routers = [\"dev-r1\"]\n\n # for host_key, attribs in inventory.items():\n\n # if host_key in target_routers:\n # print(f\"configuring interfaces on {host_key}\")\n\n # # create a session imported from restconf_api\n # session = create_session(attribs)\n\n # # get all interfaces\n # results = get_interface(attribs, session, \"Loopback0\")\n\n # interface = results[\"ietf-interfaces:interface\"]\n\n # print(json.dumps(interface))\n # # convert to yaml\n # # yaml_output = yaml.safe_dump(results)\n # # with open(\"vars/interfaces.yml\", \"w\") as file:\n # # file.write(yaml_output)\n\n # # results = update_interfaces(attribs, session)\n # # print(results.text, results.status_code)\n\n # # print(get_interfaces(attribs, session))", "def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'public':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.add_host(droplet['id'], dest)\n\n self.add_host(droplet['name'], dest)\n\n # groups that are always present\n for group in ('digital_ocean',\n 'region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n self.add_host(group, dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n self.add_host(image, dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n self.add_host(tag, dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info", "def readConfig(file=\"config.ini\"):\n ip_pool = []\n cmd_pool = []\n Config=ConfigParser.ConfigParser()\n Config.read(file)\n machines = Config.items(\"MACHINES\")\n commands = Config.items(\"COMMANDS\")\n for ip in machines:\n ip_pool.append(ip[1])\n for cmd in commands:\n cmd_pool.append(cmd[1])\n print cmd[1]\n return ip_pool,cmd_pool", "def load_config(self, config_src, report_metadata):\n for card_type in config_src.keys(): #card_type is project|assignment|epic\n for board_t in config_src[card_type].keys():\n board_id = config_src[card_type][board_t][':board_id']\n if not board_id in report_metadata: # initialize if the board wasn't present during the iterations over other card_type's\n if not board_id in report_metadata[':boards']:\n report_metadata[':boards'][board_id] = {};\n report_metadata[':boards'][board_id][':board_id'] = config_src[card_type][board_t][':board_id'] #copy board id\n report_metadata[':boards'][board_id][':board_name'] = board_t\n if not ':lists' in report_metadata[':boards'][board_id]:\n report_metadata[':boards'][board_id][':lists'] = []\n\n #iterate through all the lists and populate them\n for list_t in config_src[card_type][board_t][':lists'].keys():\n self.logger.debug(\"Adding board %s, list %s to the report\" % (config_src[card_type][board_t][':board_id'], config_src[card_type][board_t][':lists'][list_t]))\n list_id = config_src[card_type][board_t][':lists'][list_t]\n report_metadata[':lists'][list_id] = {};\n report_metadata[':lists'][list_id][':list_id'] = list_id\n report_metadata[':lists'][list_id][':completed'] = False;\n report_metadata[':lists'][list_id][':card_type'] = card_type;\n report_metadata[':lists'][list_id][':board_id'] = board_id\n report_metadata[':boards'][board_id][':lists'].append(list_id)\n if ':done_lists' in config_src[card_type][board_t]:\n for list_t in config_src[card_type][board_t][':done_lists'].keys():\n self.logger.debug(\"Adding board %s, Done list %s to the report\" % (config_src[card_type][board_t][':board_id'], config_src[card_type][board_t][':done_lists'][list_t]))\n list_id = config_src[card_type][board_t][':done_lists'][list_t]\n report_metadata[':lists'][list_id] = {};\n report_metadata[':lists'][list_id][':list_id'] = list_id\n report_metadata[':lists'][list_id][':completed'] = True;\n report_metadata[':lists'][list_id][':card_type'] = card_type;\n report_metadata[':lists'][list_id][':board_id'] = board_id\n report_metadata[':boards'][board_id][':lists'].append(list_id)", "def load(identifier, network):\n file = f\"{network}.{DEPLOYMENTS_FILENAME}\"\n\n if not os.path.exists(file):\n return\n\n with open(file) as fp:\n for line in fp:\n [address, abi, *alias] = line.split(\":\")\n identifiers = [x.strip() for x in [address] + alias]\n if identifier in identifiers:\n yield address, abi", "def generate_config(self):\n self.log.debug(\"generate-config\")\n self.qemu.args = [\n \"-nodefaults\",\n \"-only-migratable\",\n \"-cpu {cpu_model},enforce\",\n # Watch out: kvm.name is used for sanity checking critical actions.\n \"-name {name},process=kvm.{name}\",\n \"-chroot {{chroot}}\",\n \"-runas nobody\",\n \"-serial file:/var/log/vm/{name}.log\",\n \"-display vnc={{vnc}}\",\n \"-pidfile {{pidfile}}\",\n \"-vga std\",\n # We use this '-m' flag to find what a running VM is actually\n # using at the moment. If this flag is changed then that code must\n # be adapted as well. This is used in incoming.py and qemu.py.\n \"-m {memory}\",\n \"-readconfig {{configfile}}\",\n ]\n self.qemu.args = [a.format(**self.cfg) for a in self.qemu.args]\n\n vhost = ' vhost = \"on\"' if self.vhost else \"\"\n\n netconfig = []\n for net, net_config in sorted(self.cfg[\"interfaces\"].items()):\n ifname = \"t{}{}\".format(net, self.cfg[\"id\"])\n netconfig.append(\n \"\"\"\n[device]\n driver = \"virtio-net-pci\"\n netdev = \"{ifname}\"\n mac = \"{mac}\"\n\n[netdev \"{ifname}\"]\n type = \"tap\"\n ifname = \"{ifname}\"\n script = \"/etc/kvm/kvm-ifup\"\n downscript = \"/etc/kvm/kvm-ifdown\"\n{vhost}\n\"\"\".format(\n ifname=ifname, mac=net_config[\"mac\"], vhost=vhost\n )\n )\n\n with open(self.vm_config_template) as f:\n tpl = f.read()\n accelerator = (\n ' accel = \"{}\"'.format(self.accelerator)\n if self.accelerator\n else \"\"\n )\n machine_type = detect_current_machine_type(self.machine_type)\n self.qemu.config = tpl.format(\n accelerator=accelerator,\n machine_type=machine_type,\n disk_cache_mode=self.qemu.disk_cache_mode,\n network=\"\".join(netconfig),\n **self.cfg,\n )", "def parse_inventory(filename):\n data = {}\n group = None\n state = None\n\n try:\n inventory = open(filename)\n except Exception as e:\n msg('E', 'Cannot open inventory file %s. %s' % (filename, str(e)))\n\n # Walk through the file and build the data structure\n for line in inventory:\n line = line.strip()\n\n # Skip comments and blank lines\n if line.startswith('#') or line.startswith(';') or len(line) == 0:\n continue\n\n if line.startswith('['):\n # Get group name\n section = line[1:-1]\n\n # Parse subsection\n if ':' in line:\n group, state = line[1:-1].split(':')\n else:\n group = section\n state = 'hosts'\n\n if group not in data:\n data[group] = {}\n\n if state not in data[group]:\n if 'children' not in state:\n data[group][state] = {}\n else:\n data[group][state] = []\n else:\n # Parse hosts or group members/vars\n try:\n tokens = shlex.split(line, comments=True)\n except ValueError as e:\n msg('E', \"Error parsing host definition '%s': %s\" % (line, e))\n\n # Create 'all' group if no group was defined yet\n if group is None:\n group = 'all'\n state = 'hosts'\n data['all'] = {\n 'hosts': []\n }\n\n # Get parsed hostname\n hostname = tokens[0]\n\n # Parse variables\n variables = []\n if state == 'hosts':\n variables = tokens[1:]\n elif state == 'vars':\n variables = tokens\n\n if 'hosts' in state:\n data[group][state].update({hostname: {}})\n\n if 'children' in state:\n data[group][state].append(hostname)\n\n for var in variables:\n if '=' not in var:\n msg(\n 'E',\n \"Expected key=value host variable assignment, \"\n \"got: %s\" % var)\n\n (key, val) = var.split('=', 1)\n\n if 'hosts' in state:\n data[group][state][hostname].update({key: val})\n if 'vars' in state:\n data[group][state].update({key: val})\n # Close file\n try:\n inventory.close()\n except IOError as e:\n msg('E', 'Cannot close inventory file %s. %s' % (filename, str(e)))\n\n return data", "def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory", "def ibns_intf(task):\n # init lists of interfaces\n access_interfaces = []\n uplink_interfaces = []\n # iterate over all interfaces\n for intf in task.host[\"intfs\"]:\n\n # uplink interfaces\n if intf[\"interface\"] in task.host[\"uplinks\"]:\n uplink_interfaces.append(intf)\n\n # other non-excluded access ports\n elif intf[\"interface\"] not in task.host[\"excluded_intf\"]:\n if intf[\"access_vlan\"] in task.host[\"vlans\"]:\n access_interfaces.append(intf)\n\n # assign uplink interface list to task.host\n task.host[\"uplink_interfaces\"] = uplink_interfaces\n # render uplink interface configs\n uplink_intf_cfg = task.run(\n task=text.template_file,\n template=\"IBNS_uplink_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n # assign access interface list to task.host\n task.host[\"access_interfaces\"] = access_interfaces\n # render access interface configs\n access_intf_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS{task.host['ibns_ver']}_access_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n\n # init list of L3 vlan interfaces\n l3_vlan_int = [\"Vlan777\"]\n # list of vlan interfaces that will not relay\n no_relay_ints = [\"1\", \"666\", \"667\"]\n # iterate over active L3 interfaces\n for intf in task.host[\"ip_int_br\"]:\n # accept only those that are active vlan interfaces\n if intf[\"intf\"].startswith(\"Vlan\") == True and intf[\"status\"] == \"up\":\n # strip vlan id from interface name\n vlan_id = intf[\"intf\"].strip(\"Vlan\")\n # compare with list of no relay ints\n if vlan_id not in no_relay_ints:\n # add to list of interfaces for ISE DHPC relay\n l3_vlan_int.append(intf[\"intf\"])\n\n # save L3 vlan interfaces to task.host\n task.host[\"l3_vlan_int\"] = l3_vlan_int\n\n if \"emea\" in task.host['region']:\n L3VLAN_template = \"IBNS_EMEA_L3VLAN_intf.j2\"\n else:\n L3VLAN_template = \"IBNS_L3VLAN_intf.j2\"\n\n # render L3 vlan interface configs\n l3_vlan_int_cfg = task.run(\n task=text.template_file,\n template=L3VLAN_template,\n path=\"templates/\",\n **task.host,\n )\n\n # return configuration\n return uplink_intf_cfg.result + access_intf_cfg.result + l3_vlan_int_cfg.result", "def main():\n\n PASS = raw_input('password> ')\n\n with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,\n hostkey_verify=False, device_params={'name': 'default'},\n look_for_keys=False, allow_agent=False) as m:\n\n # print all NETCONF capabilities\n with open('output/netconf_101_capability.txt', 'w') as file:\n for capability in m.server_capabilities:\n file.write(str(capability))\n file.write('\\n')\n\n result_xmllist = []\n # run commands on the remote device\n for key in xmlns_dic.keys():\n data = m.get(('subtree', xmlns_dic[key]))\n result_xmllist.append(data)\n\n with open('output/netconf_101_rpc.xml', 'w') as file:\n file.write(str(result_xmllist))\n\n result_jsonlist = []\n for data in result_xmllist:\n # print all in xml\n print(data)\n\n # print all in json\n result_xml_str = repr(data)\n result_json_parsed_str = json.dumps(xmltodict.parse(result_xml_str))\n result_json_parsed_dict = json.loads(result_json_parsed_str)\n\n print(json.dumps(result_json_parsed_dict, indent=4, sort_keys=True))\n result_jsonlist.append(result_json_parsed_dict)\n\n with open('output/netconf_101_rpc.json', 'w') as file:\n json.dump(result_jsonlist, file, indent=4, sort_keys=True)\n\n\n # xml_doc = xml.dom.minidom.parseString(result.xml)\n # mac_address = xml_doc.getElementsByTagName(\"mod:mac_address\")\n # print(mac_address)", "def get_configured_interfaces():\n with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,\n hostkey_verify=False, device_params={'name': 'default'},\n allow_agent=False, look_for_keys=False) as m:\n\n with open(FILE) as f:\n return(m.get_config('running', f.read()))", "def get_hosts(self):\n self.logger.debug(colorama.Fore.BLUE +\n \"jsnapy.cfg file location used : %s\" %\n get_config_location(), extra=self.log_detail)\n self.logger.debug(colorama.Fore.BLUE +\n \"Configuration file location used : %s\" %\n get_path('DEFAULT', 'config_file_path'), extra=self.log_detail)\n \n if self.args.pre_snapfile is not None:\n output_file = self.args.pre_snapfile\n elif self.args.snapcheck is True and self.args.pre_snapfile is None:\n output_file = \"snap_temp\"\n self.snap_del = True\n else:\n output_file = \"\"\n conf_file = self.args.file\n check = self.args.check\n snap = self.args.snap\n if conf_file is not None:\n if os.path.isfile(conf_file):\n config_file = open(conf_file, 'r')\n self.main_file = yaml.load(config_file)\n elif os.path.isfile(os.path.join(get_path('DEFAULT', 'config_file_path'), conf_file)):\n fpath = get_path('DEFAULT', 'config_file_path')\n config_file = open(os.path.join(fpath, conf_file), 'r')\n self.main_file = yaml.load(config_file)\n else:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! Config file '%s' is not present \" %\n conf_file, extra=self.log_detail)\n sys.exit(1)\n else:\n if self.args.hostname and self.args.testfiles:\n temp_dict = {'hosts':[{'device':'', 'username':'', 'passwd':''}], 'tests':[]}\n temp_dict['hosts'][0]['device'] = self.args.hostname\n temp_dict['hosts'][0]['username'] = self.args.login\n temp_dict['hosts'][0]['passwd'] = self.args.passwd\n for tfile in self.args.testfiles:\n temp_dict['tests'].append(tfile)\n self.main_file = temp_dict\n\n\n #### if --check option is given for sqlite, then snap file name is not compulsory ####\n #### else exit the function saying arguments not correct ####\n if self.main_file.__contains__(\n 'sqlite') and self.main_file['sqlite'] and self.main_file['sqlite'][0]:\n self.chk_database(\n self.main_file,\n self.args.pre_snapfile,\n self.args.post_snapfile,\n check,\n snap)\n else:\n if (self.args.check is True and (\n self.args.file is None or self.args.pre_snapfile is None or self.args.post_snapfile is None)):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n self.login(output_file)", "def get_hostsdata_from_hostsfile(hosts_file) -> dict:\n\n if not os.path.isfile(hosts_file):\n logger.error(f\"Suzieq inventory {hosts_file} must be a file\")\n print(f\"ERROR: Suzieq inventory {hosts_file} must be a file\")\n sys.exit(1)\n\n if not os.access(hosts_file, os.R_OK):\n logger.error(\"Suzieq inventory file is not readable: {}\", hosts_file)\n print(\"ERROR: hosts Suzieq inventory file is not readable: {}\",\n hosts_file)\n sys.exit(1)\n\n with open(hosts_file, \"r\") as f:\n try:\n data = f.read()\n hostsconf = yaml.safe_load(data)\n except Exception as e:\n logger.error(\"Invalid Suzieq inventory file:{}\", e)\n print(\"Invalid Suzieq inventory file:{}\", e)\n sys.exit(1)\n\n if not hostsconf or isinstance(hostsconf, str):\n logger.error(f\"Invalid Suzieq inventory file:{hosts_file}\")\n print(f\"ERROR: Invalid hosts Suzieq inventory file:{hosts_file}\")\n sys.exit(1)\n\n if not isinstance(hostsconf, list):\n if '_meta' in hostsconf.keys():\n logger.error(\"Invalid Suzieq inventory format, Ansible format??\"\n \" Use -a instead of -D with inventory\")\n print(\"ERROR: Invalid Suzieq inventory format, Ansible format??\"\n \" Use -a instead of -D with inventory\")\n else:\n logger.error(f\"Invalid Suzieq inventory file:{hosts_file}\")\n print(f\"ERROR: Invalid hosts Suzieq inventory file:{hosts_file}\")\n sys.exit(1)\n\n for conf in hostsconf:\n if any(x not in conf.keys() for x in ['namespace', 'hosts']):\n logger.error(\"Invalid inventory:{}, no namespace/hosts sections\")\n print(\"ERROR: Invalid inventory:{}, no namespace/hosts sections\")\n sys.exit(1)\n\n return hostsconf", "def main():\n # Take path argument and list all text files\n\n ip = '10.1.10.100'\n a_user = 'cisco'\n auth_key = 'cisco123'\n encr_key = 'cisco123'\n snmp_user = (a_user, auth_key, encr_key)\n sw1 = (ip, 161)\n\n sysDescr = '1.3.6.1.2.1.1.1.0'\n sysObjectID = '1.3.6.1.2.1.1.2.0'\n sysUpTime = '1.3.6.1.2.1.1.3.0'\n sysContact = '1.3.6.1.2.1.1.4.0'\n sysNmae = '1.3.6.1.2.1.1.5.0'\n ifNumber = '1.3.6.1.2.1.2.1.0'\n\n\n # Uptime when running config last changed\n RunLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'\n\n # Uptime when running config last saved (note any 'write' constitutes a save)\n RunLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0'\n\n # Uptime when startup config last saved\n StartLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'\n\n ifAlias = '1.3.6.1.2.1.31.1.1.1.18.1'\n ifName = '1.3.6.1.2.1.31.1.1.1.1.1'\n\n snmp_data = snmp_helper.snmp_get_oid_v3(sw1, snmp_user, oid=ifName, auth_proto='sha', encrypt_proto='des')\n #print(snmp_data)\n\n # snmp_get_oid_v3(snmp_device, snmp_user, oid='.1.3.6.1.2.1.1.1.0', auth_proto='sha',\n # encrypt_proto='aes128', display_errors=True):\n\n #snmp_extract(snmp_data):\n\n output = snmp_helper.snmp_extract(snmp_data)\n print output", "def list_inventory(self):\n inventory = {}\n host_vars = {}\n\n for droplet in self.do.droplets:\n for rule in self.group_rules:\n rule.apply(droplet, inventory)\n\n host_vars[droplet[\"ip_address\"]] = {\n \"do_{}\".format(k): v for k, v in droplet.iteritems()\n }\n\n inventory[\"_meta\"] = {\n \"hostvars\": host_vars\n }\n\n return inventory", "def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}", "def _get_config_map():\n path = os.path.join(os.path.dirname(__file__), \"nadamw_configs.json\")\n configs = json.loads(open(path).read())\n return configs", "def readConfig(file=\"dispatcher.conf\"):\n\n parser = configparser.ConfigParser()\n parser.read(file)\n machines = parser.items(\"MACHINES\")\n commands = parser.items(\"COMMANDS\")\n\n return machines, commands", "def main():\n\n\n fab_list = get_fabric_list(SANNAV_IP_ADDRESS, SANNAV_FOS_USERNAME, SANNAV_FOS_PASSWORD)\n\n # Print all known facts about the fabrics and the switches\n # Comment out this print statement if this code will be used to generate\n # an Ansible Tower inventory.\n print(json.dumps(fab_list))\n\n # This section of code formats the results to be in a format acceptable to Ansible Tower (awx).\n # To use it, unblock the following block of code and comment out the preceeding print statement.\n\n _ = \"\"\"\n toAwx = {'_meta': {'hostvars': {}}}\n\n for fabric in fab_list[\"Fabrics\"]:\n toAwx[fabric[\"name\"]] = { 'hosts': []}\n for switch in fabric[\"Switches\"]:\n toAwx[fabric[\"name\"]]['hosts'].append(switch['ipAddress'])\n print(json.dumps(toAwx));\n \"\"\"", "def get_network_config2():\n interfaces = get_interfaces()\n ips = [get_ip_address2(ip) for ip in interfaces]\n return dict(zip(interfaces,ips))", "def network_config(args): # pylint: disable-msg=W0613\n if not NETLOCK.acquire_read(NET_LOCK_TIMEOUT):\n raise HttpReqError(503, \"unable to take NETLOCK for reading after %s seconds\" % NET_LOCK_TIMEOUT)\n try:\n netconf = xivo_config.load_current_configuration()\n return yaml_json.stringify_keys(netconf)\n finally:\n NETLOCK.release()", "def load_networks(self, start=False):\n logging.debug(\"%s load_networks entered\" % self)\n # networks = self.infra['networks']\n all_containers = cf.list_containers()\n if self.container_name in all_containers:\n logging.info(\"found existing container, checking for network configuration\")\n mycontainer = cf.get_container(self.container_name)\n try:\n index = mycontainer.get_object(\"index.json\")\n mconf = json.loads(index.fetch())\n for network in mconf['networks'].keys():\n logging.info(\"loading %s from file\" % network)\n new_network = Network(self, network)\n if mconf['networks'][network].has_key(\"uuid\"):\n uuid = mconf['networks'][network][\"uuid\"]\n # print \"YYY: \", uuid\n new_network.load(uuid, start=start)\n self.networks[network] = new_network\n except Exception, e:\n # print \"ALJKALDFDKSJFLSKJDf\"\n logging.warn(e.message)\n import traceback\n logging.debug(traceback.print_exc())\n \n # check if they exist...\n # for net in networks.keys():\n # # create the network object\n # new_net = Network(self, net) \n # ", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def get_inventory(cls, gen, folder, inv_type='both', return_type='segment', cons_only=True):\n\n inventory = list()\n filename = 'temp_output{}.txt'.format(gen)\n if not os.path.exists(os.path.join(folder,filename)):\n return None\n with open(os.path.join(folder, filename), encoding='utf_8_sig') as f:\n f.readline()\n f.readline()#dunno why i have to do this twice...\n feature_names = f.readline()\n feature_names = feature_names.strip()\n feature_names = feature_names.split('\\t')\n\n for line in f:\n line = line.strip()\n if (not line) or line.startswith('VAR'):\n continue\n elif line.startswith('Phonemes'):\n phonemes = line.split(':')[-1]\n phonemes = phonemes.split(',')\n elif line.startswith('Allophones'):\n allophones = line.split(':')[-1]\n allophones = allophones.split(',')\n allophones = [a.split('~')[-1] for a in allophones]\n phonemes = [p for p in phonemes if not p in allophones]\n break\n else:\n inventory.append(line) #this creates a list of segments with phonological features values\n\n\n if return_type == 'segment':\n new_inventory = dict()\n for line in inventory:\n line = line.split('\\t')\n symbol = line[0]\n features = [sign+name for sign,name in zip(line[1:],feature_names)]\n if inv_type in ['underlying', 'core', 'ur', 'UR'] and symbol in phonemes:\n new_inventory[symbol] = features\n elif inv_type in ['surface', 'sr', 'SR', 'phonetic'] and symbol not in phonemes:\n new_inventory[symbol] = features\n elif inv_type == 'both':\n new_inventory[symbol] = features\n\n elif return_type == 'pyilm':\n new_inventory = list()\n for line in inventory:\n line = line.split('\\t')\n symbol = line[0]\n features = [sign+name for sign,name in zip(line[1:], feature_names)]\n new_inventory.append(phonology.Segment(symbol, features))\n\n elif return_type == 'string':\n new_inventory = [line.split('\\t')[0] for line in inventory]\n if inv_type in ['underlying', 'core', 'ur', 'UR']:\n new_inventory = [seg for seg in new_inventory if seg in phonemes]\n elif inv_type in ['surface', 'sr', 'SR', 'phonetic']:\n new_inventory = [seg for seg in new_inventory if not seg in phonemes]\n #else inv_type=='both', just return the new_inventory variable\n\n return new_inventory", "def ibns_snmp(task):\n snmp_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS_snmp.j2\",\n path=\"templates/\",\n **task.host,\n )\n # return configuration\n return snmp_cfg.result", "def process_inventory(inv_name, output_file):\n try:\n gen_dict = _load_yml(inv_name)\n env_vars_dict = gen_dict.get('deployment-environment')\n out = open(output_file, 'w')\n if env_vars_dict is None or env_vars_dict == {}:\n out.write('---\\n')\n out.write('deployment_environment_variables: {}\\n')\n else:\n out.write('---\\n')\n out.write('deployment_environment_variables:\\n')\n for k in env_vars_dict:\n out.write(' ' + k + ': ' + env_vars_dict[k] + '\\n')\n out.close()\n except Exception:\n sys.stderr.write(\"Unable to write the file: \" + output_file + \"\\n\")\n sys.exit(1)", "def modif_network(self):\n print \"preparation du fichier network interfaces\"\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp %s/etc/sysconfig/network_scripts/ifcfg-eth0 %s/etc/sysconfig/network_scripts/ifcfg-eth0.pre.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n else:\n self.exec_cmd(\"cp %s/etc/network/interfaces %s/etc/network/interfaces.post.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"cp %s/etc/network/interfaces.pre.p2v %s/etc/network/interfaces\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))", "def InitFromCfg(self, cfgfile):\n\n self.cfg = ConfigParser.RawConfigParser()\n self.cfg.read(cfgfile)\n\n # how many universes? read any config items starting with \"universe\"\n\n universes = [item[1] for item in self.cfg.items('DMX')\n if item[0].startswith('universe')]\n\n if len(universes) < 1:\n print 'no universes detected in config file! Bye.'\n exit()\n\n self.universes = universes\n print repr(universes)\n\n board_count = 0\n\n # get a list of pods\n\n podnames = self.cfg.get('pods', 'pods')\n podnames = podnames.split(',')\n\n self.pods = []\n\n for p in podnames:\n\n pname = 'pod' + p\n uni = self.cfg.getint(pname, 'universe')\n new_pod = Pod(pname, uni)\n\n # first, get start addresses of all boards\n nboards = len([item[1] for item in self.cfg.items(pname)\n if item[0].startswith('board')])\n starts = [0] * nboards\n bnames = [(n, 'board' + str(n)) for n in range(nboards)]\n for (n, b) in bnames:\n starts[n] = self.cfg.getint(pname, b)\n\n #print 'pod ' + new_pod.name\n\n # get ordered list of limbs\n lnames = ['branch-1', 'branch-2', 'branch-3', 'branch-4',\n 'branch-5']\n\n for lname in lnames: # for each limb\n\n # get list of branch names for this limb (ending with A, eg)\n lbrnames = [item[0] for item in self.cfg.items(pname)\n if item[0].startswith(lname)]\n\n nbranches = len(lbrnames)\n if nbranches > 0:\n\n # now we have list of branch names for this limb.\n # make a new limb with this many branches\n limb = Limb(p + lname, nbranches)\n\n # now for every branch in this limb, add it to the Limb\n for brname in lbrnames:\n\n data = self.cfg.get(pname, brname)\n data = [int(k) for k in data.split(',')]\n\n # data is a list of [board, rchan,bchan,gchan]\n board = data[0]\n start = starts[board] # start address for this branch\n\n new_branch = Branch(p + brname, start, uni,\n board, (data[1], data[2], data[3]))\n\n data = brname.split('-')\n index = int(data[2])\n\n # print \"adding branch %d\" % index + new_branch.name\n limb.addBranch(index, new_branch)\n\n sys.stdout.flush()\n new_pod.limbs.append(limb)\n self.pods.append(new_pod)\n\n # all boards read in. Now create list of limbs and branches[]\n brcount = 0\n self.branches = []\n self.limbs = []\n self.limblist = []\n for pod in self.pods:\n self.limbs.append(pod.limbs)\n self.limblist.extend(pod.limbs)\n for lb in pod.limbs:\n for br in lb.branches:\n br.brindex = brcount\n self.branches.append(br)\n brcount += 1\n\n self.make_branch_matrix()", "def main():\n dump(inventory(), fp=stdout, indent=4)", "def readConfig():\n hosts = []\n domains = []\n with open(\"./host.conf\", \"r\") as fd:\n for line in fd.readlines():\n line = line.strip().split()\n if line != []:\n # Parse config for zone files and hosts\n if line[0] == \"ZONE_FILE:\":\n zoneFile = line[1]\n if line[0] == \"REVERSE_ZONE_FILE:\":\n reverseZoneFile = line[1]\n if line[0] == \"HOST:\":\n hosts.append((line[1], line[2], line[3]))\n if line[0] == \"DOMAIN:\":\n domains.append((line[1], line[2], line[3]))\n\n return zoneFile, reverseZoneFile, hosts, domains" ]
[ "0.57955146", "0.5357593", "0.5309615", "0.52972466", "0.52823687", "0.52815616", "0.5225071", "0.5193531", "0.51922613", "0.5180207", "0.5171256", "0.5163886", "0.51440394", "0.5126848", "0.5065334", "0.5064806", "0.5061558", "0.50566524", "0.5048825", "0.5046078", "0.50313157", "0.50112766", "0.4965577", "0.49616477", "0.496006", "0.48806787", "0.48702684", "0.48635146", "0.48424098", "0.48233685" ]
0.6786082
0
Test case for get_liveness Get job service liveness
def test_get_liveness(self): response = self.client.open('/api/v1//liveness', method='GET', content_type='application/json') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def liveness():\n return '', 200", "def test_get_refresh_job_status(self):\n pass", "def liveness_probe():\n return \"I am still alive!\"", "def test_estimate_liveness_batch(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect68Landmarks=True)\n estimations = self.livenessEstimator.estimateBatch([self.detection, detection])\n assert isinstance(estimations, list)\n assert len(estimations) == 2\n for estimation in estimations:\n self.assertLivenessEstimation(estimation)", "def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()", "def test_health_get(self):\n pass", "def main(args = sys.argv):\n\n parser = parser_setup()\n poptions = parser.parse_args()\n\n if poptions.quiet:\n logging.basicConfig(level=logging.WARNING, format=log_format)\n elif poptions.debug:\n logging.basicConfig(level=logging.DEBUG, format=log_format)\n else:\n # Set up the default logging levels\n logging.basicConfig(level=logging.INFO, format=log_format)\n # Make this a little less noisy by default\n requests_log = logging.getLogger(\"requests.packages.urllib3.connectionpool\")\n requests_log.setLevel(logging.WARN)\n\n if not poptions.base_api_url and \"LIMS_API_URL\" in os.environ:\n api_url = os.environ[\"LIMS_API_URL\"]\n log.debug(\"Using LIMS API endpoint: %s from environment\" % api_url)\n elif poptions.base_api_url:\n api_url = poptions.base_api_url\n log.debug(\"Using LIMS API endpoint: %s from options\" % api_url)\n else:\n sys.stderr.write(\"Could not find LIMS API URL.\\n\")\n sys.exit(1)\n\n\n if not poptions.token and \"LIMS_API_TOKEN\" in os.environ:\n token = os.environ[\"LIMS_API_TOKEN\"]\n elif poptions.token:\n token = poptions.token\n else:\n sys.stderr.write(\"Could not find LIMS API TOKEN.\\n\")\n sys.exit(1)\n\n monitor = ClusterMonitor(api_url, token, cluster_type=poptions.cluster)\n\n monitor.run()", "def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_async_estimate_liveness(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF))\n task = self.livenessEstimator.estimate(detection, asyncEstimate=True)\n self.assertAsyncEstimation(task, LivenessV1)\n task = self.livenessEstimator.estimateBatch([detection] * 2, asyncEstimate=True)\n self.assertAsyncBatchEstimation(task, LivenessV1)", "def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def test_job_fail(client):\n response = client.get('/status/random')\n assert response.status_code == 400", "def test_livenessv1_as_dict(self):\n livenessDict = self.livenessEstimator.estimate(self.detection).asDict()\n assert (\n jsonValidator(schema=LIVENESSV1_SCHEMA).validate(livenessDict) is None\n ), f\"{livenessDict} does not match with schema {LIVENESSV1_SCHEMA}\"", "def test_query_train_jobs_with_wrong_offse(self, client):\n params = dict(offse=0, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('total') == SUMMARY_DIR_NUM\n assert len(result.get('train_jobs')) == min(10, SUMMARY_DIR_NUM)", "def test_lbheartbeat(self):\n pass", "def test_estimate_liveness_batch_without_landmarks68(self):\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect68Landmarks=False)\n estimations = self.livenessEstimator.estimateBatch([self.detection, detection])\n assert isinstance(estimations, list)\n assert len(estimations) == 2\n for estimation in estimations:\n self.assertLivenessEstimation(estimation)", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def test_estimate_liveness_batch_with_threshold(self):\n qualityThreshold = 0.9\n detection = self.detector.detectOne(VLImage.load(filename=SPOOF))\n estimations = self.livenessEstimator.estimateBatch(\n [self.detection, detection],\n qualityThreshold=qualityThreshold,\n )\n assert isinstance(estimations, list)\n assert len(estimations) == 2\n self.assertLivenessEstimation(estimations[0], LivenessPrediction.Real)\n self.assertLivenessEstimation(estimations[1], LivenessPrediction.Spoof)", "def query_job_progress():\n pass", "def test_query_train_jobs(self, client):\n params = dict(offset=0, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('total') == SUMMARY_DIR_NUM\n assert len(result.get('train_jobs')) == min(10, SUMMARY_DIR_NUM)", "def test_lis_test(desc, inputs, exp_results, condition):\n pywbemlistener_test(desc, inputs, exp_results, condition)", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def test_lti20_get_no_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\"})", "def test_linestatus_for_multiple_input(self, client):\n response = client.get(url_for(\"status\") + \"?line=lsp\")\n\n assert response.status_code == 404", "def check(job, logger, **kwargs):\n resources = Resource.objects.filter(\n attributes__field__name=\"health_check_config\",\n lifecycle='ACTIVE'\n ).distinct()\n set_progress(\n f\"Will run health checks for {resources.count()} resource(s): \"\n f\"{[resource.name for resource in resources]}\")\n\n check_results = []\n\n for resource in resources:\n logger.info(f\"Will run health checks for resource '{resource.name}'.\")\n config_dict = get_config_value(resource)\n failing_health_checks = 0\n\n # Run all the health checks configured for this resource.\n for health_check in config_dict.get('health_checks', {}):\n max_retries = health_check.get('max_retries', 3)\n retry_interval_seconds = health_check.get('retry_interval_seconds', 1)\n\n name = health_check.get('name')\n job.set_progress(f\"Beginning health check '{name}'.\")\n url = health_check.get('url')\n accepted_statuses = health_check.get('accepted_status_codes')\n timeout_seconds = health_check.get('timeout_seconds', 3)\n\n retry_attempts = 0\n while retry_attempts <= max_retries:\n try:\n if retry_attempts > 1:\n logger.info(f\"On retry attempt {retry_attempts}.\")\n status_code = requests.get(url, timeout=timeout_seconds).status_code\n\n if accepted_statuses and status_code not in accepted_statuses:\n # Failure.\n msg = (\n f\"HTTP Request returned {status_code}, \"\n f\"which is not in the accepted statuses: {accepted_statuses}\"\n f\"for health check '{name}'.\"\n )\n logger.debug(msg)\n retry_attempts += 1\n else:\n # Pass - We got a valid status. We can stop now.\n logger.info(f\"Health check '{name}' completed with success.\")\n break\n\n except Exception as e:\n # Bad, could be ConnectionError, which will count as a failure.\n logger.debug(e)\n retry_attempts += 1\n\n # Wait for the specified retry interval before trying again\n time.sleep(retry_interval_seconds)\n\n if retry_attempts == max_retries:\n job.set_progress(f\"Max retries exceeded for health check '{name}'.\")\n failing_health_checks += 1\n\n # Summarize this resource's health check results.\n data_dict = {\n 'time': datetime.datetime.now(),\n 'resource_id': resource.id,\n 'resource_name': resource.name,\n 'failing_health_checks': failing_health_checks,\n }\n\n check_results.append(data_dict)\n\n context = {\n \"health_check_results\": check_results,\n }\n\n # Return the dict to be processed by the \"Then\" action\n return 'SUCCESS', '', '', {'context': context}", "def test_get_hyperflex_health_list(self):\n pass", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def __await_helms_installation(self, job_id, expected_services_count):\n end_waiting = datetime.now().timestamp() + self.TIMEOUT_MIN * 60 * 1000\n curr_status = self.helm_results.get(job_id)\n while datetime.now().timestamp() <= end_waiting:\n curr_status = self.helm_results.get(job_id, {\"services\": []})\n if expected_services_count != len(curr_status[\"services\"]):\n time.sleep(1.)\n else:\n self.helm_results.pop(job_id)\n return curr_status\n self.helm_results.pop(job_id)\n return curr_status", "def test_lti20_get_with_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n SCORE = 0.55 # pylint: disable=invalid-name\r\n COMMENT = u\"ಠ益ಠ\" # pylint: disable=invalid-name\r\n self.xmodule.module_score = SCORE\r\n self.xmodule.score_comment = COMMENT\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\",\r\n \"resultScore\": SCORE,\r\n \"comment\": COMMENT})", "def ping():\n \"\"\"Get the estimator object for this instance, loading it if it's not already loaded.\"\"\"\n checker = os.listdir('/opt/ml')\n health = checker is not None # health check here\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')" ]
[ "0.6060957", "0.60050637", "0.5874817", "0.5739824", "0.56933695", "0.5687158", "0.5626335", "0.56105167", "0.560023", "0.55861163", "0.55428433", "0.5536615", "0.55124384", "0.54971635", "0.5488803", "0.5478777", "0.5478777", "0.54599184", "0.54395", "0.54195917", "0.5348092", "0.5341301", "0.53278685", "0.53211635", "0.5308946", "0.5256032", "0.5236095", "0.5225105", "0.5223274", "0.52098006" ]
0.73525107
0
Test case for get_readiness Get job service readiness
def test_get_readiness(self): response = self.client.open('/api/v1//readiness', method='GET', content_type='application/json') self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_refresh_job_status(self):\n pass", "def test_readiness_endpoint(self):\n url = f'{BASE_URL}/ready'\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 503\n assert response_json['status'] == 503", "def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def test_get_job_queue(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_active(self):\n\n url = '/%s/job-types/status/?is_active=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job1.job_type.name)\n self.assertEqual(result['results'][0]['job_counts'][0]['count'], 1)", "def test_cachedjob_get_status(cached_job):\n \n # Setup\n c_job = cached_job\n \n # Execute\n expected_status = StatusEnum(JOB_DETAILS_HTML['status'])\n cached_status = c_job.status\n\n # Verify\n assert expected_status == cached_status", "def test_running_job(self):\n running_job = json.loads(BASE_JSON % ('null', 'null', 0, 'null'))[0]\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def test_health_get(self):\n pass", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def test_is_active(self):\n\n url = '/%s/job-types/?is_active=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 2)", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def test_successful(self):\n\n url = '/%s/job-types/running/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)\n self.assertIsNotNone(result['results'][0]['longest_running'])", "def test_get_node_status_batterystatus(self):\n pass", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_is_active(self):\n\n url = '/%s/job-type-names/?is_active=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n\n self.assertEqual(len(result['results']), 2)", "def query_job_progress():\n pass", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def test_running_job(self):\n\n running_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"running\"))\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']", "def jobHealthy(self, count):\n job = self.tester.submission_result.job\n for idx in range(count - 1):\n if (job.health == 'healthy'):\n return True\n print(\"health check fail : %d\" % idx )\n time.sleep(1)\n job.refresh()\n self.assertEqual('healthy', job.health)\n return False", "async def test_healthy(coresys: CoreSys):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n\n @Job(conditions=[JobCondition.HEALTHY])\n async def execute(self):\n \"\"\"Execute the class method.\"\"\"\n return True\n\n test = TestClass(coresys)\n assert await test.execute()\n\n coresys.resolution.unhealthy = UnhealthyReason.DOCKER\n assert not await test.execute()", "def test_lbheartbeat(self):\n pass", "def test_get_refresh_status(api: API, account: Account):\n account._latest_refresh_job_id = \"123_job_id\"\n api.candlepin.get_job.return_value = {\"state\": \"FINISHED\"}\n assert account.get_refresh_status() == \"FINISHED\"\n api.candlepin.get_job.assert_called_once_with(\"123_job_id\")", "def test_job_fail(client):\n response = client.get('/status/random')\n assert response.status_code == 400", "async def get_status():", "def test_pending_job(self):\n pending_job = json.loads(BASE_JSON % ('null', 'null', 0, 1433166609))[0]\n pending_job.pop(\"status\")\n self.assertEquals(self.query_api.get_job_status(pending_job), PENDING)", "def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))" ]
[ "0.72830915", "0.6839322", "0.6632437", "0.65635777", "0.63915956", "0.63915956", "0.6313089", "0.6205987", "0.61734414", "0.61597115", "0.614555", "0.6124894", "0.6102056", "0.6053104", "0.604147", "0.60116535", "0.59794265", "0.59655803", "0.5928916", "0.5915668", "0.5885323", "0.5857235", "0.58556134", "0.58373505", "0.5816909", "0.57968056", "0.5785838", "0.5765063", "0.57615423", "0.5757878" ]
0.7425097
0
=============================================================== save_obj(obj, saved_name ) =============================================================== this function is used to save any python object to your hard desk
def save_obj(obj, saved_name ): with open( saved_name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_obj(obj, name):\r\n with open('../pickle/' + name + '.pkl', 'wb') as fout:\r\n pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)\r\n # end with\r", "def save_obj(obj, name):\n \n with open(name + '.pkl', 'wb') as objec:\n pickle.dump(obj, objec)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def _save_obj(obj, name):\n with open('/bigdisk/pickles/' + name, 'w') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def saveObj(obj, name):\n\n os.system(\"touch \" + name + \".pkl\")\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def saveobject(obj, filename):\n # import cPickle as pickle\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename):\n with open(filename, 'wb') as output_file: # Overwrites any existing file.\n pickle.dump(obj, output_file, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, file_name):\n file_name = osp.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, protocol=2)", "def save_object(obj, filename):\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output)", "def save(obj, filename):\n import pickle\n with open(filename, 'w') as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)", "def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)", "def save_object(self, name: str, obj: object):\r\n with open_(self._path_for_pickle(name), \"wb\") as f:\r\n dill.dump(obj, f)", "def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)", "def save_object(obj, filename: str):\n with open(filename, 'wb') as save_file:\n pickle.dump(obj, save_file)", "def save_object(o, fn):\n return dump_object(o, fn)", "def save_to_disk(name, object):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'save_to_disk(%s)' % shortname\n pkl_file = open(shortname , 'wb')\n pickle.dump(object, pkl_file, -1) # Pickle the list using the highest protocol available.\n pkl_file.close()", "def save_obj(obj, path: str):\n with open(path, 'wb') as h:\n pickle.dump(obj, h)", "def save_object(self, name: str, object):\n file_path = self.__get_file_path(name)\n self.__serialize_object(file_path, object)", "def save_object(obj, fpath):\r\n with open(fpath, 'wb') as o:\r\n pickle.dump(obj, o)", "def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)", "def save_pickle(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file)", "def save(self, obj):", "def save(self, obj, filename):\n if not self.enabled:\n return\n\n # get unique filepath and filename\n index = 0\n while True:\n filepath = join(self.path, filename+\"_\"+str(index))\n if os.path.isfile(filepath):\n index = index + 1\n continue\n break\n\n # save object\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"wb\") as f:\n try:\n pickle.dump(obj, f)\n except Exception as e:\n log.exception(e)\n log.warning(f\"save failed for {filename} {type(obj)}\")", "def save(obj, obj_name):\n try:\n _save(obj, os.path.join(KALE_DATA_DIRECTORY, obj_name))\n except KaleMarshalException as e:\n log.error(e)\n log.debug(\"Original Traceback\", exc_info=e.__traceback__)\n utils.graceful_exit(1)", "def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save_obj_joblib(obj, obj_path,obj_name,supersede=False):\n\n obj_path=os.path.join(obj_path,obj_name)\n\n if os.path.isfile(obj_path):\n if supersede:\n try:\n os.remove(obj_path)\n joblib.dump(obj, obj_path)\n print(\"save_obj_joblib: \"+os.path.basename(obj_path)+\" is replaced and saved!\")\n except OSError:\n print(\"save_obj_joblib: Object couldn't be saved\")\n else:\n raise OSError(\"save_obj_joblib: There exists a object with the same name already.\")\n else:\n if os.path.isdir(os.path.dirname(obj_path)):\n pass\n else:\n os.mkdir(os.path.dirname(obj_path))\n joblib.dump(obj, obj_path)\n print(\"save_obj_joblib: \"+os.path.basename(obj_path)+\" is saved!\")", "def save(self, obj):\n raise NotImplementedError", "def pickle_save(file_path, obj):\n with open(file_path, 'wb') as f:\n pickle.dump(obj, f)" ]
[ "0.83799785", "0.83523476", "0.834026", "0.834026", "0.8322952", "0.8320533", "0.81125903", "0.805116", "0.80221593", "0.79732233", "0.7954794", "0.79475254", "0.790895", "0.78999376", "0.7857288", "0.7842152", "0.7814447", "0.7774819", "0.76592255", "0.7642796", "0.76062477", "0.75891984", "0.7582979", "0.75254", "0.7468239", "0.7434214", "0.73950934", "0.72940403", "0.72515553", "0.71275175" ]
0.88148
0
=============================================================== load_obj(saved_name) =============================================================== this function is used to save any python object to your hard desk
def load_obj(saved_name): with open( saved_name + '.pkl', 'rb') as f: return pickle.load(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_obj(obj, saved_name ):\n with open( saved_name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\r\n with open('../pickle/' + name + '.pkl', 'wb') as fout:\r\n pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)\r\n # end with\r", "def save_obj(obj, name):\n \n with open(name + '.pkl', 'wb') as objec:\n pickle.dump(obj, objec)", "def _save_obj(obj, name):\n with open('/bigdisk/pickles/' + name, 'w') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def saveObj(obj, name):\n\n os.system(\"touch \" + name + \".pkl\")\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def load_obj(name):\r\n with open('../pickle/' + name + '.pkl', 'rb') as fout:\r\n return pickle.load(fout)\r\n # end with\r", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def loadObj(name):\n\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def save_object(obj, file_name):\n file_name = osp.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def _load_obj(name):\n with open('/bigdisk/pickles/' + name, 'r') as f:\n return pickle.load(f)", "def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)", "def save_object(self, name: str, obj: object):\r\n with open_(self._path_for_pickle(name), \"wb\") as f:\r\n dill.dump(obj, f)", "def saveobject(obj, filename):\n # import cPickle as pickle\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, protocol=2)", "def save_object(obj, filename):\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output)", "def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)", "def save_to_disk(name, object):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'save_to_disk(%s)' % shortname\n pkl_file = open(shortname , 'wb')\n pickle.dump(object, pkl_file, -1) # Pickle the list using the highest protocol available.\n pkl_file.close()", "def save_object(obj, filename):\n with open(filename, 'wb') as output_file: # Overwrites any existing file.\n pickle.dump(obj, output_file, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, path: str):\n with open(path, 'wb') as h:\n pickle.dump(obj, h)", "def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)", "def pickle_object(obj, ofname: \"Path|str\"):\n ofname = Path(ofname)\n maybe_make_output_dir(ofname)\n with ofname.open(\"wb\") as f:\n pickle.dump(obj, f)", "def save(obj, filename):\n import pickle\n with open(filename, 'w') as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)", "def save_object(obj, filename: str):\n with open(filename, 'wb') as save_file:\n pickle.dump(obj, save_file)", "def save_object(obj, fpath):\r\n with open(fpath, 'wb') as o:\r\n pickle.dump(obj, o)", "def save_object(self, name: str, object):\n file_path = self.__get_file_path(name)\n self.__serialize_object(file_path, object)", "def save_pickle(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file)", "def save_object(o, fn):\n return dump_object(o, fn)" ]
[ "0.81122315", "0.7685448", "0.7685448", "0.7588679", "0.75443125", "0.74489355", "0.74238276", "0.7387553", "0.730978", "0.730978", "0.7296159", "0.7197405", "0.71187323", "0.7099137", "0.7085125", "0.7082581", "0.70419323", "0.70103645", "0.70006555", "0.6940859", "0.6908531", "0.6907691", "0.6880611", "0.6829744", "0.6826534", "0.6794035", "0.6790917", "0.6768082", "0.674998", "0.6625734" ]
0.82277983
0
=========================================================== DateFormatedSQL(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormatedSQL(x): x=[i[0] for i in x] x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def sql_date(date):\n return \"to_date('{}', 'dd.mm.yyyy')\".format(date)", "def DateFormated(x):\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def datetime_to_sql(connection, obj):\n return connection.string_literal(datetime_to_str(obj))", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def get_date():\n temp = pd.read_sql_query(_query['date'], connect())\n return temp.values", "def to_date(d, date_format = \"%Y-%m-%d %H:%M:%S.%f\"):\n if type(d) == pd.core.series.Series:\n d = list(d)\n if type(d) == list:\n return [datetime.strptime(date,date_format) if type(date) == str else date for date in d]\n elif type(d) == str:\n return datetime.strptime(d,date_format)\n else:\n raise ValueError(\"Either String or list of Strings is accepted.\")", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def timefstr(date_list, timeformat):\n time_start = time.strptime(date_list[0], timeformat)\n time_start = dtime(*time_start[3:5])\n day_start = date.today()\n dtstart = datetime.combine(day_start, time_start)\n date_list.pop(0)\n return dtstart", "def fixDate(weatherRDDRecord):\n fieldList = weatherRDDRecord.split(\",\")\n fieldList = [i.replace('\\\"', '') for i in fieldList] #remove quotation marks\n fieldList[0] = fieldList[0].replace('-', '/')\n \n swapDateOrder = fieldList[0].split('/')\n fieldList[0] = swapDateOrder[2] + '/' + swapDateOrder[1] + '/' + swapDateOrder[0]\n \n return (fieldList[0],(fieldList[1:]))", "def convert_column_dates2str(self, info_in, output='list'):\n if hasattr(info_in, 'keys'):\n items = [(el, el) for el in self._columns.keys()]\n elif hasattr(info_in, '__getitem__'):\n items = [(ii, el) for ii,el in enumerate(self._columns.keys())]\n else:\n raise Exception('Only accepts dict, dict or list')\n \n if output == 'dict':\n return dict([(el1, self.date2str(info_in[el0])) if self.column_is_date[el1] else (el1, info_in[el0]) for el0, el1 in items])\n elif output == 'list':\n return [self.date2str(info_in[el0]) if self.column_is_date[el1] else info_in[el0] for el0, el1 in items]\n else:\n raise Exception('output type %s unkown'%output)", "def convert_date_string(df,col_name):\n df[col_name] = pd.to_datetime(df[col_name], infer_datetime_format=True)\n return df", "def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r", "def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]", "def sas_date_converter(row, base_date='1960-01-01'):\n if row is None:\n return row\n return datetime.strptime(base_date, '%Y-%m-%d') + timedelta(int(row))", "def _convert(frame):\n frame = frame.convert_objects(convert_numeric=True)\n for column in frame:\n if column in c.dates:\n frame[column] = frame[column].astype('datetime64')\n return frame", "def _handle_sql_types(value):\n if type(value) is datetime:\n return value.isoformat()\n return str(value)", "def date_trans_z(x):\n \"\"\"2017.01.09->2017/01/09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def transform_datetimes(data: Any) -> Any:\n\n if isinstance(data, (datetime, date)):\n return data.isoformat()\n\n if isinstance(data, (list, tuple)):\n tmp_data = [transform_datetimes(elem) for elem in data]\n\n return tuple(tmp_data) if isinstance(data, tuple) else tmp_data\n\n if isinstance(data, dict):\n for key, val in data.items():\n data[key] = transform_datetimes(val)\n\n return data", "def get_dt_string(type_list):\n output = ''\n for entry in type_list:\n output = output+entry+'64,'\n return output[0:-1]", "def serialize(date):\n # From database to client\n\n # Convert date-object to datetime\n # See: https://stackoverflow.com/questions/1937622/convert-date-to-datetime-in-python\n dt = datetime.combine(date, datetime.min.time())\n date_format = \"%Y-%m-%d\"\n return datetime.strftime(dt, date_format)", "def convert_column_str2dates(self, info_in, output='list'):\n if hasattr(info_in, 'keys'):\n items = [(el, el) for el in self._columns.keys()]\n elif hasattr(info_in, '__getitem__'):\n items = [(ii, el) for el in enumerate(self._columns.keys())]\n else:\n raise Exception('Only accepts dict, dict or list')\n \n if output == 'dict':\n return dict([(el1, self.str2date(info_in[el0])) if self.column_is_date[el1] else (el1, info_in[el0]) for el0, el1 in items])\n elif output == 'list':\n return [self.str2date(info_in[el0]) if self.column_is_date[el1] else info_in[el0] for el0, el1 in items]\n else:\n raise Exception('output type %s unkown'%output)", "def join_date_strings(dates, separator=\"','\", df=\"%d-%m-%Y\"):\n return separator.join([x.strftime(df) for x in dates])", "def change_format_to_database_index(self, date):\n year = date[0:4] + ','\n month = date[4:6]\n day = date[6:8]\n if month[0] == '0':\n month = month[1]\n\n if day[0] == '0':\n day = day[1]\n\n day = ' ' + day + ','\n month = ' ' + month\n\n return year + day + month", "def dateToString(self, date_objs: list) -> list:\n date_strings = []\n try:\n if isinstance(date_objs, list) == False:\n return date_strings\n\n for date_obj in date_objs:\n if isinstance(date_obj, datetime) == False:\n continue\n date_strings.append(datetime.strftime(date_obj, '%d %b %Y'))\n\n return date_strings\n except Exception as e:\n logging.error(e)", "def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data", "def format_datetimes(self, datetimes, format=\"%B %d %Y %I:%M %p\"):\n date, times, space_character = datetimes.split(\", \")\n start_time, end_time = times.split(\" - \")\n year = datetime.now().strftime(\"%Y\")\n return (\n datetime.strptime(\n date + \" \" + year + \" \" + start_time.replace(\".\", \"\"), format\n ),\n datetime.strptime(\n date + \" \" + year + \" \" + end_time.replace(\".\", \"\"), format\n ),\n )", "def _date_to_string(v):\n\n if not isinstance(v,(list,tuple)):\n raise InstrumentParameterException('Value %s is not a list, tuple.' % str(v))\n \n if not len(v)==3:\n raise InstrumentParameterException('Value %s is not length 3.' % str(v))\n \n months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep',\n 'Oct','Nov','Dec']\n day = v[0]\n month = v[1]\n year = v[2]\n \n if len(str(year)) > 2:\n year = int(str(year)[-2:])\n \n if not isinstance(day,int) or day < 1 or day > 31:\n raise InstrumentParameterException('Value %s is not a day of month.' % str(day))\n \n if not isinstance(month,int) or month < 1 or month > 12:\n raise InstrumentParameterException('Value %s is not a month.' % str(month))\n\n if not isinstance(year,int) or year < 0 or year > 99:\n raise InstrumentParameterException('Value %s is not a 0-99 year.' % str(year))\n \n return '%02i-%s-%02i' % (day,months[month-1],year)" ]
[ "0.66734904", "0.65000284", "0.6259414", "0.59757656", "0.5600508", "0.5579302", "0.5578522", "0.5551475", "0.5513122", "0.54512274", "0.5435365", "0.52705914", "0.52298", "0.5214014", "0.5199284", "0.51939476", "0.5177129", "0.5144611", "0.51139647", "0.5111645", "0.5084982", "0.50844026", "0.50539654", "0.50317484", "0.5007062", "0.50055915", "0.5004682", "0.5003799", "0.49934232", "0.494639" ]
0.7940835
0
=========================================================== dateformated(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormated(x): x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")])\n\n return data_output", "def DateFormatedSQL(x):\n x=[i[0] for i in x]\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) ))\n# elif len(i)==13:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) ))\n# else:\n# x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) ))\n# del i,x\n return x1", "def convert_datetime_objs(list_of_dates):\n datetime_list = []\n for date in list_of_dates:\n date_obj = datetime.datetime.strptime(date, '%d.%m.%Y')\n datetime_list.append(date_obj)\n return datetime_list", "def datetimefstr(date_list, datetimeformat, longdatetimeformat):\n try:\n # including year\n parts = longdatetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, longdatetimeformat)\n for _ in range(parts):\n date_list.pop(0)\n except ValueError:\n # without year\n parts = datetimeformat.count(' ') + 1\n dtstring = ' '.join(date_list[0:parts])\n dtstart = datetime.strptime(dtstring, datetimeformat)\n if dtstart.timetuple()[0] == 1900:\n dtstart = datetime(date.today().timetuple()[0],\n *dtstart.timetuple()[1:5])\n # if start date lies in the past use next year\n #if dtstart < datetime.today():\n #dtstart = datetime(dtstart.timetuple()[0] + 1,\n #*dtstart.timetuple()[1:6])\n for _ in range(parts):\n date_list.pop(0)\n return dtstart", "def to_date(d, date_format = \"%Y-%m-%d %H:%M:%S.%f\"):\n if type(d) == pd.core.series.Series:\n d = list(d)\n if type(d) == list:\n return [datetime.strptime(date,date_format) if type(date) == str else date for date in d]\n elif type(d) == str:\n return datetime.strptime(d,date_format)\n else:\n raise ValueError(\"Either String or list of Strings is accepted.\")", "def timefstr(date_list, timeformat):\n time_start = time.strptime(date_list[0], timeformat)\n time_start = dtime(*time_start[3:5])\n day_start = date.today()\n dtstart = datetime.combine(day_start, time_start)\n date_list.pop(0)\n return dtstart", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def _reformat_date(exp_dates):\n time_fmt = \"%Y-%m-%dT%H:%M:%S\"\n wrong_time_fmt = \"%Y-%m-%d %H:%M:%S\"\n if exp_dates == 'NN':\n return exp_dates\n if exp_dates != 'NN' and not isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates, time_fmt)\n except ValueError:\n try:\n exp_dates = datetime.strptime(exp_dates,\n wrong_time_fmt).strftime(time_fmt)\n except ValueError:\n exp_dates = datetime.strptime(exp_dates,\n \"%m/%d/20 %H:%M\").strftime(time_fmt)\n\n if exp_dates != 'NN' and isinstance(exp_dates, list):\n try:\n datetime.strptime(exp_dates[0], time_fmt)\n except ValueError:\n exp_dates = [datetime.strptime(c, wrong_time_fmt).strftime(time_fmt)\n for c in exp_dates]\n\n return exp_dates", "def date_parser(dates):\n\n #splitting the dates(containing datetime data) list and returning only the datetime\n return([item.split()[0] for item in dates])\n pass", "def date_trans_z(x):\n \"\"\"2017.01.09->2017/01/09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'/'+date_list[1]+'/'+date_list[2]", "def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day", "def _tr_cal_date(self, date):\n items = []\n for code in self._datefmt:\n if code == 'Y':\n items += [date.year_str]\n elif code == 'M':\n if '/' in self._datefmt or '.' in self._datefmt:\n month = date.month_num\n if month is not None:\n month = \"{:02d}\".format(month)\n else:\n month = self._monthName(date.month)\n if month is not None:\n items += [month]\n elif code == 'D':\n day = date.day\n if day is not None and ',' in self._datefmt:\n items += [str(\"{:02d},\".format(day))]\n elif day is not None:\n items += [\"{:02d}\".format(day)]\n if '/' in self._datefmt:\n sep = '/'\n elif '.' in self._datefmt:\n sep = '.'\n elif '-' in self._datefmt:\n sep = '-'\n else:\n sep = ' '\n return sep.join(items)", "def date_to_operate_format(self, date):\n date = date.replace(\" \", \"\")\n date = date.split(',')\n day = date[1]\n month = date[2]\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n right_format = date[0] + month + day\n return right_format", "def datemake(datestring):\n return dtt.datetime.strptime(datestring,'%m/%d/%Y')", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r", "def translate_dates(dates):\r\n formatted_dates = list()\r\n year = dt.today().year\r\n for dat in dates:\r\n if dat == '':\r\n continue\r\n day = dat[:2]\r\n mont = dat[6:]\r\n if int(day) < 10:\r\n day = '0' + day[1]\r\n if mont != '':\r\n # Month from Comuniazo\r\n month = \\\r\n {'enero': '01', 'febrero': '02', 'marzo': '03', 'abril': '04',\r\n 'mayo': '05', 'junio': '06', 'julio': '07', 'agosto': '08',\r\n 'septiembre': '09', 'octubre': '10', 'noviembre': '11', 'diciembre': '12'}[mont]\r\n else:\r\n # Month from Comunio\r\n month = dat[3:5]\r\n\r\n if month + day == '0101' or (formatted_dates and int(month) > formatted_dates[-1].month):\r\n # One year less\r\n year -= 1\r\n\r\n p_date = datetime.strptime('%s-%s-%s' % (year, month, day), \"%Y-%m-%d\").date()\r\n formatted_dates.append(p_date)\r\n return formatted_dates", "def date_trans_x(x):\n \"\"\"2017.01.09->2017.1.09 \"\"\"\n date_list=x.split('.')\n return date_list[0]+'.'+str(int(date_list[1]))+'.'+date_list[2]", "def buildDate(date):\n parts = date.split(\"-\")\n yDate = parts[1] + \" \" + parts[2] + ', ' + parts[0]\n return yDate", "def convert_date_of_attendance(attendance):\n if isinstance(attendance,list):\n for a in attendance:\n a.date_of_att = datetime.datetime.strptime(a.DATE_OF_ATTENDANCE,'%d/%m/%Y').date()\n elif isinstance(attendance,models.AttendanceModel):\n attendance.date_of_att = datetime.datetime.strptime\\\n (attendance.DATE_OF_ATTENDANCE, '%d/%m/%Y').date()", "def convert_date(raw_date):\n if raw_date:\n date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n return date.strftime(\"%m/%d/%YZ\")", "def trost2date(trost_date):\n year, month, day = (int(val) for val in trost_date.split('-'))\n return datetime.date(year, month, day)", "def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))", "def _reformat_date_jan_1999():\n reader = csv.reader(open(\"temperatures_1999.csv\"), delimiter=\";\")\n for (day, month, temp) in reader:\n date = datetime.datetime.strptime(\"-\".join([\"1999\", month, day]), \n \"%Y-%m-%d\")\n print \"%s; %s\" % (date.strftime(\"%Y-%m-%d\"), temp)", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def convertDate(indate):\n a = datetime.datetime.fromtimestamp(indate / 1000.0)\n a_str = a.strftime('%m/%d/%y')\n return datetime.datetime.strptime(a_str, '%m/%d/%y').date()", "def fixDate(weatherRDDRecord):\n fieldList = weatherRDDRecord.split(\",\")\n fieldList = [i.replace('\\\"', '') for i in fieldList] #remove quotation marks\n fieldList[0] = fieldList[0].replace('-', '/')\n \n swapDateOrder = fieldList[0].split('/')\n fieldList[0] = swapDateOrder[2] + '/' + swapDateOrder[1] + '/' + swapDateOrder[0]\n \n return (fieldList[0],(fieldList[1:]))", "def format_datetimes(self, datetimes, format=\"%B %d %Y %I:%M %p\"):\n date, times, space_character = datetimes.split(\", \")\n start_time, end_time = times.split(\" - \")\n year = datetime.now().strftime(\"%Y\")\n return (\n datetime.strptime(\n date + \" \" + year + \" \" + start_time.replace(\".\", \"\"), format\n ),\n datetime.strptime(\n date + \" \" + year + \" \" + end_time.replace(\".\", \"\"), format\n ),\n )", "def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data" ]
[ "0.73220545", "0.6644235", "0.64673054", "0.63785565", "0.6323779", "0.63159305", "0.6256937", "0.6174311", "0.5986844", "0.58866596", "0.5878423", "0.58775616", "0.58339506", "0.579193", "0.57800907", "0.57769805", "0.5757611", "0.57572365", "0.57409817", "0.5732213", "0.5722158", "0.5721827", "0.56974846", "0.5678983", "0.5675491", "0.5659683", "0.5645013", "0.56444377", "0.56371844", "0.5635159" ]
0.75249213
0
Check if a record exists matching the service pattern with the current host's ip
def record_exists(route53_zone, service_name, ip): # Match records belonging to the service for particular service and # environment. match_regex = "{}\d+\.{}\.?".format(service_name, route53_zone.name) for record in route53_zone.get_records(): match = re.match(match_regex, record.name) if match and ip in record.resource_records: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cusip_exists(record):\n cusips = helper.query_db('instruments', 'cusip')\n assert record['cusip'] in cusips", "def name_matches_ip(name, ip, state):\n for client in state['clients']:\n if client['name'] == name:\n if client['ip'] == ip:\n return True\n else:\n return False\n return False", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def member_exists(self, service, bigip):\n pool = self.service_adapter.get_pool(service)\n member = self.service_adapter.get_member(service)\n part = pool[\"partition\"]\n try:\n p = self.pool_helper.load(bigip,\n name=pool[\"name\"],\n partition=part)\n\n m = p.members_s.members\n if m.exists(name=urllib.quote(member[\"name\"]), partition=part):\n return True\n except Exception as e:\n # log error but continue on\n LOG.error(\"Error checking member exists: %s\", e.message)\n return False", "def _host_exists(self, host_name):\n hosts = self.host_obj.search_by_name(host_name)\n\n if len(hosts) > 0:\n for host in hosts:\n hostname = host['match']\n if host_name == hostname:\n return hostname\n return hostname\n LOG.debug(\"no host found for:\" + host_name)\n return None", "def match_api_keys(key, ip):", "def exist(self, key):\n record = self._storage.get(key, None)\n if record:\n return record.ttl >= time.time()\n return False", "def canDo_url(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostEnd in self.highwireHosts:\n if hostname.endswith(hostEnd):\n logging.log(5, 'url hostname %s ends with %s -> highwire' % (hostname, hostEnd))\n return True\n\n if hostname in self.hostCache:\n ipAddr = self.hostCache[hostname]\n else:\n logging.debug('Looking up IP for %s' % hostname)\n try:\n ipAddr = socket.gethostbyname(hostname)\n self.hostCache[hostname] = ipAddr\n except socket.gaierror:\n raise pubGetError('Illegal hostname %s in link' % hostname, 'invalidHostname', hostname)\n\n ipParts = ipAddr.split('.')\n ipParts = [ int(x) for x in ipParts ]\n result = ipParts[0] == 171 and ipParts[1] in range(64, 68)\n if result == True:\n logging.log(5, 'hostname %s is highwire host' % hostname)\n return result", "def has_host(self, host):\n assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))\n assert 'scan' in self._scan_result, 'Do a scan before trying to get result !'\n\n if host in list(self._scan_result['scan'].keys()):\n return True\n\n return False", "def match(self, _ip):\n try:\n return bool(ip_address(_ip) in self.network)\n except ValueError:\n return False", "def _check_queryinfo_existence(self, hostname: str, job: str) -> bool:\n with self.lock:\n hosts = self.host_query_info.all()\n for host in hosts:\n if host['hostname'] == hostname and host['job'] == job:\n return True\n return False", "def check_table(self, ip:str='127.0.0.1', date:str='2000-01-01'):\n stmt=\"SELECT COUNT(*) FROM aws_ip_list WHERE ip='%s' AND create_date='%s'\" % (ip, date)\n self.cur.execute(stmt)\n return self.cur.fetchall()[0][0]", "def record_exists(self, record):\n record_exists = False\n\n logging.debug('Check if record exists in table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n date = record.get('date', '')\n time = record.get('time', '')\n location = record.get('location', '')\n node_id = record.get('nodeID', '')\n\n self._cursor.execute(\"\"\"SELECT count(*) FROM {} WHERE \\\n date == ? and time = ? and location = ? and nodeID = ?\"\"\".format(self._name), (date, time, location, node_id))\n\n if self._cursor.fetchone()[FIRST_ROW] == SINGLE_RECORD:\n record_exists = True\n\n logging.debug('Record exists? : {}'.format(record_exists))\n return record_exists", "def query_host(self, name):\n z = dns.zone.from_xfr(dns.query.xfr(self.server_address, self.domain))\n try:\n z.find_node(name)\n return True\n except KeyError:\n return False", "def exists(self, conn, key):\n return conn.exists(key)", "def contains_addr(self, addr):\n return self.find_loadable_containing(addr) is not None", "def find_by_status(self, host, state):", "def raw_exist(self, table: str, data: dict) -> bool:\n table_check_limits = {'temperature': 2, 'pressure': 1}\n request = '''SELECT * \n FROM %s \n WHERE datetime=:datetime \n AND service=:service \n ORDER BY timestamp DESC \n LIMIT %s''' % (table, table_check_limits[table])\n self.c.execute(request, {\n 'table': table,\n 'datetime': data['datetime'],\n 'service': data['service'],\n })\n db_data = self.c.fetchall()\n\n if len(db_data) == 1:\n if data['value'] == db_data[0][3]:\n return True\n else:\n return False\n\n if len(db_data) == 2:\n result = False\n # comparing timestamps.\n if db_data[0][1][0:16] == db_data[1][1][0:16]:\n for raw in db_data:\n if data['value'] == raw[3]:\n result = True\n else:\n if data['value'] == db_data[0][3]:\n result = True\n return result", "def containsip(url):\r\n try:\r\n if ip.ip_address(url):\r\n return 1\r\n except:\r\n return 0", "def check_input(data):\n if data.has_key('fqdn') and data.has_key('ip'):\n\n try:\n socket.inet_aton(data['ip'])\n return True\n except socket.error:\n return False", "def exists(self):\n query = db.session.query(Farmer.btc_addr)\n return query.filter(Farmer.btc_addr == self.btc_addr).count() > 0", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def isOverlappingWithAnyDynamicEntry(ipAddress):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n ip = int(ipaddress.IPv4Address(ipAddress))\n nat_pool_dict = config_db.get_table('NAT_POOL')\n\n if not nat_pool_dict:\n return False\n\n for values in nat_pool_dict.values():\n global_ip = values[\"nat_ip\"]\n ipAddr = global_ip.split('-')\n if (len(ipAddr) == 1):\n startIp = int(ipaddress.IPv4Address(ipAddr[0]))\n endIp = int(ipaddress.IPv4Address(ipAddr[0]))\n else:\n startIp = int(ipaddress.IPv4Address(ipAddr[0]))\n endIp = int(ipaddress.IPv4Address(ipAddr[1]))\n\n if ((ip >= startIp) and (ip <= endIp)):\n return True\n\n return False", "def matchIP(self, ip):\n return self._ip == ip", "def record_exists(self, date):\n for record in self.records:\n if self.date_str == record[\"date\"]:\n return True\n return False", "def test_getdnsrecord_notfound(kasserver):\n assert not kasserver.get_dns_record(\"www.example.com\", \"MX\")", "def has(self, hostname: str) -> bool:\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return True\n return False", "def is_service_endpoint(path):\n return re.match(r'^[a-zA-Z0-9.-]+:\\d+$', path)", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def test_ip_addresses_exists():\n load_ips()\n validate_names()" ]
[ "0.6178328", "0.6014736", "0.59269434", "0.58188164", "0.56742144", "0.56725705", "0.56573004", "0.5653265", "0.56383586", "0.56271714", "0.5579105", "0.5578969", "0.55783236", "0.5542878", "0.55369085", "0.55349123", "0.5531589", "0.5526352", "0.5510327", "0.5509592", "0.54834443", "0.5450964", "0.5449097", "0.54182833", "0.5385543", "0.53623646", "0.5361515", "0.5353488", "0.53499764", "0.533174" ]
0.7733625
0
Creates record with record_name and ip; updates record if it already exists with different ip does nothing if record already exists with same ip
def upsert_record(route53_zone, record_name, ip): # Only upsert the dns record if it doesn't resolve to us. try: record_ip = socket.gethostbyname(record_name) except socket.error: # Ignore if we can't connect to the host pass else: if ip == record_ip: return print str(dt.now()), "Registering host as", record_name record = route53_zone.get_a(record_name) if record and ip not in record.resource_records: route53_zone.update_a(record_name, ip) elif not record: route53_zone.add_a(record_name, ip)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_record(self, record_name, ip):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n \n list_record = self._api.domain.zone.record.list(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n for record in list_record:\n if record['name'] == record_name:\n myrecord = record\n # Create new record\n self._api.domain.zone.record.update(self._api_key, self._current_zone['id'], \n self._new_zone_version_number, {'id': myrecord['id']}, \n {\n 'name': myrecord['name'],\n 'type': myrecord['type'],\n 'value': ip,\n 'ttl': myrecord['ttl']\n })\n logging.info('Update record %s with ip %s successfully.' % (record_name, ip))", "def saw_ip(self, ip):\n from sqlalchemy.exc import IntegrityError\n c = self.ipSurvey.columns\n v = {\n c[\"ipAddress\"]: ip,\n c[\"lastSeen\"]: \"now()\",\n }\n # Update if already in table, otherwise insert new row\n if self.session.execute(self.ipSurvey.update(c[\"ipAddress\"] == ip, values=v)).rowcount == 0:\n self.session.execute(self.ipSurvey.insert(values=v))", "def insert_or_update(self, table, record):\n try:\n request = s.query(table=table, query={'sys_id': record['sys_id']})\n #request.get_single()\n response = request.update(record)\n print >> sys.stderr, 'update'\n except NoResults:\n # Record does not exist so create it\n response = self.snow.insert(table=table, payload=record)\n print >> sys.stderr, 'create'\n return response", "def create(self, key, record, overwrite=False):\n if key in self.db and not overwrite:\n raise ValueError(\"A record for key \\\"%s\\\" already exists.\" % key)\n self.db[key] = copy(record)", "def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def new_ip(self, ip):\n if not ip in self.ip_list:\n self.ip_list.add(ip)\n host = self.hs.id_to_object(ip)\n host.add_tag('sniffer')\n host.save()\n print_success(\"New ip address: {}\".format(ip))", "def upload_record(self,\n record: Optional[Record] = None,\n style: Optional[str] = None,\n name: Optional[str] = None,\n model: Union[str, io.IOBase, DM, None] = None,\n workspace: Union[str, pd.Series, None] = None,\n overwrite: bool = False,\n verbose: bool = False):\n if record is None:\n record = load_record(style, model, name=name)\n \n try:\n self.remote_database.add_record(record=record, workspace=workspace,\n verbose=verbose) \n except ValueError as e:\n if overwrite:\n self.remote_database.update_record(record=record, workspace=workspace,\n verbose=verbose)\n else:\n raise ValueError('Matching record already exists: use overwrite=True to change it') from e", "def cli_add_record(record_data):\n new_record = None\n try:\n new_record = api.insert_record( record_data)\n except DuplicateRecord as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n except MissingRequiredInformaton as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n\n return new_record", "def update_record(self):\n new_record = self.create_record()\n for record in self.records:\n if self.date_str == record[\"date\"] and not record == new_record:\n record.update(new_record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def add_record():\n if 'json' not in request.files:\n # use an HTML record that seems appropriate\n return \"no json file in the request!\", 400\n try:\n # can't assume that JSON file is valid\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with identifier, post failed!\", 400\n with RECORD_LOCK:\n # just check if the name already exists in the global RECORD list\n if len([r for r in RECORDS if r.get('name') == _record['name']]):\n return \"already in the records!\", 409\n RECORDS.append(_record)\n return \"OK\"", "def create_record(self, context, record):\n record = self.dns_manager.create_record(context, record)\n return record", "def test_2_resource_records_actions(self):\n record_type = 'AAAA'\n name = 'test.example.com'\n ttl = 60\n rdata = {\n 'ip': '2001::1'\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n name = 'test.example43.com'\n ttl = 120\n rdata = {\n 'ip': '2002::2'\n }\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id,\n type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204", "def insert_update(record,name):\n try:\n conn = db_conection()\n cur = conn.cursor()\n except Exception as db:\n __log(2, \"DataBase Connection Error\" + db.args[1])\n\n try:\n query_insert = \"INSERT into \"+name+\" VALUES ('\" + str(record['Id']) + \"', '\" + record[\n 'First name'] + \"', '\" + record['Last Name'] + \"', '\" + record['deparment'] + \"', '\" + str(\n int(record['salary'])) + \"')\"\n cur.execute(query_insert)\n conn.commit()\n # print (\"Insert Success\",record['Id'])\n __log(1, 'Insert Success. ID: ' + str(record['Id']))\n\n except pymysql.Error:\n # print ('Duplicate Error Found ID: ',record['Id'])\n __log(2, 'Duplicate Error Found ID: ' + str(record['Id']))\n\n query_update = \"UPDATE \"+name+\" SET `First name` = %s , `Last Name`= %s,`deparment`= %s,`salary` = \" \\\n \"'%s' WHERE `Id` = '%s' \"\n val = (record[1], record[2], record[3], record[4], record[0])\n cur.execute(query_update, val)\n conn.commit()\n # print (\"Update Success, ID\",record['Id'])\n __log(1, 'Duplicate Error Updated with New Values, ID: ' + str(record['Id']))\n\n except Exception as e:\n # print (e)\n __log(2, 'Unknown Error, Skipping Record having id' + str(record['Id']))", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def create(self, ip): # pylint: disable=invalid-name\n return self.request(\"POST\", data={\"ip\": ip})", "def update_record():\n if 'json' not in request.files:\n return \"no json file in the request!\", 400\n try:\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with a name, post failed!\", 400\n with RECORD_LOCK:\n for _index, _rec in enumerate(RECORDS):\n if _rec['name'] == _record['name']:\n RECORDS[_index] = _record\n return \"OK\"\n return \"Failed to update record!\", 500", "def save_record(self,\n record: Optional[Record] = None,\n style: Optional[str] = None,\n name: Optional[str] = None,\n model: Union[str, io.IOBase, DM, None] = None,\n overwrite: bool = False,\n verbose: bool = False):\n if record is None:\n record = load_record(style, model, name=name)\n \n try:\n self.local_database.add_record(record=record, verbose=verbose)\n except ValueError as e:\n if overwrite:\n self.local_database.update_record(record=record,\n verbose=verbose)\n else:\n raise ValueError('Matching record already exists: use overwrite=True to change it') from e", "def add_record(self, record: Dict, src_name: SourceName) -> None:\n concept_id = record[\"concept_id\"]\n record[\"src_name\"] = src_name.value\n label_and_type = f\"{concept_id.lower()}##identity\"\n record[\"label_and_type\"] = label_and_type\n record[\"item_type\"] = \"identity\"\n try:\n self.batch.put_item(Item=record)\n except ClientError as e:\n logger.error(\n \"boto3 client error on add_record for \"\n f\"{concept_id}: {e.response['Error']['Message']}\"\n )\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in record:\n value = record.get(attr_type)\n if not value:\n continue\n if isinstance(value, str):\n items = [value.lower()]\n else:\n items = {item.lower() for item in value}\n for item in items:\n self._add_ref_record(\n item, record[\"concept_id\"], item_type, src_name\n )", "def save(self, ip='', result='', dt=datetime.datetime.now()):\n self.ping_table.insert({\"host\": ip, \"result\": result, \"datetime\": str(dt)})\n return", "def add_remote_duplicate_entry(self, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n l3out = OutsideL3('l3out', tenant)\n other_epg = OutsideEPG('other', l3out)\n subnet = OutsideNetwork(ip, other_epg)\n subnet.ip = ip + '/32'\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)", "def add_remote_duplicate_entry(self, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n l3out = OutsideL3('l3out1', tenant)\n other_epg = OutsideEPG('other', l3out)\n subnet = OutsideNetwork(ip, other_epg)\n subnet.ip = ip + '/32'\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "def update_dns(self):\n if self.ptr:\n which_zone = None\n zones = dns.models.Zone.objects.all()\n for zone in zones:\n if self.ptr.endswith(zone.name) or self.ptr.endswith(zone.name + '.'):\n which_zone = zone\n break\n\n if which_zone:\n zone_name = which_zone.name\n record_name = self.ptr[:-len(zone_name)] if not self.ptr.endswith('.') else self.ptr[:-len(zone_name) - 1]\n if record_name.endswith('.'):\n record_name = record_name[:-1]\n record_type = 'A' if self.family == 4 else 'AAAA'\n\n dns.models.Record.objects.get_or_create(\n name=record_name,\n record_type=record_type,\n zone=which_zone,\n address=self\n )", "def update_A_record(self, heroku_host_ip, dns_a_record):\n r = self.api.post_update_record(\n record_id = dns_a_record.get('id'),\n prio = dns_a_record.get('prio'),\n content = heroku_host_ip,\n ttl = dns_a_record.get('ttl'))\n dns_a_record = self.extract_A_records(r[\"record\"])\n return dns_a_record", "def create_record(self, zone_id, record, record_type, data, ttl=60):\r\n self.record.createObject({\r\n 'domainId': zone_id,\r\n 'ttl': ttl,\r\n 'host': record,\r\n 'type': record_type,\r\n 'data': data})", "def add_route53_record(emr_internal_ips, cr):\n\n conn = connect_route53(aws_access_key_id = cr.get_config(\"aws_access_key\"), aws_secret_access_key = cr.get_config(\"aws_secret_key\"))\n\n zone = conn.get_zone(\"alpinenow.local\")\n\n print \"Adding DNS Records for: {0}\".format(emr_internal_ips)\n for ip in emr_internal_ips:\n internal_dns = \"ip-\" + ip.replace(\".\", \"-\") + \".alpinenow.local\"\n response = zone.add_a(internal_dns, ip) # TODO: Do something with response", "def test_zone_cant_have_duplicate_records(self):\n zone = Zone('test.example.com')\n recordA = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n recordB = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n zone.add_record(recordA)\n with self.assertRaises(DuplicateException):\n zone.add_record(recordB)", "def test_6_resource_records_actions(self):\n\n record_type = 'A'\n name = 'example76'\n content = '2.2.2.2'\n ttl = 60\n rdata = {\n 'ip': content\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n record_type = 'A'\n name = 'example43'\n content = '2.2.2.3'\n ttl = 60\n rdata = {\n 'ip': content\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type, ttl=ttl, name=name, rdata=rdata)\n assert resp is not None\n assert resp.status_code == 200\n\n record_type = 'PTR'\n name = '2.2.2.2'\n ttl = 60\n rdata = {\n \"ptrdname\": \"example76.test.example36.com\"\n }\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type,\n ttl=ttl, rdata=rdata, name=name)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n ttl = 120\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id, ttl=ttl)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204", "def post(self, record_type, record_id, record, metadata):\n \n if not self.cache.get(record_type, None):\n self.cache[record_type] = {}\n\n if not self.cache[record_type].get(record_id, None):\n self.cache[record_type][record_id] = {}\n\n \n self.cache[record_type][record_id]['record'] = record\n self.cache[record_type][record_id]['metadata'] = metadata\n\n \n d = Date()\n self.cache[record_type][record_id]['last_updated'] = d.now()\n\n # Check space, remove old items if not enough space", "def test_5_resource_records_actions(self):\n record_type = 'SRV'\n name = 'test.example76.com'\n ttl = 60\n rdata = {\n \"priority\": 100,\n \"weight\": 100,\n \"port\": 8000,\n \"target\": \"test.example76.com\"\n }\n service_name = \"_sip\"\n protocol = \"udp\"\n # create resource record\n resp = self.record.create_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, type=record_type,\n ttl=ttl, name=name, rdata=rdata, service=service_name, protocol=protocol)\n assert resp is not None\n assert resp.status_code == 200\n record_id = resp.get_result().get(\"id\")\n\n # update resource record\n name = 'test.example43.com'\n ttl = 120\n rdata = {\n \"priority\": 200,\n \"weight\": 200,\n \"port\": 8001,\n \"target\": \"test.example43.com\"\n }\n service_name = \"_sip\"\n protocol = \"tcp\"\n resp = self.record.update_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id,\n type=record_type, ttl=ttl, name=name, rdata=rdata, service=service_name, protocol=protocol)\n assert resp is not None\n assert resp.status_code == 200\n\n # get resource record\n resp = self.record.get_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 200\n\n # delete resource record\n resp = self.record.delete_resource_record(\n instance_id=self.instance_id, dnszone_id=self.zone_id, record_id=record_id)\n assert resp is not None\n assert resp.status_code == 204" ]
[ "0.72148836", "0.66041684", "0.62076926", "0.6207673", "0.5900262", "0.58394516", "0.58283144", "0.57992715", "0.57729447", "0.5762532", "0.57310665", "0.5682131", "0.5672659", "0.5666608", "0.5655603", "0.5637485", "0.56220573", "0.56179416", "0.560817", "0.56004244", "0.5599704", "0.5577504", "0.5555616", "0.5548527", "0.5533867", "0.55329686", "0.546559", "0.54505575", "0.5434616", "0.5421104" ]
0.7590886
0
Create a new EC2 instance with specific parameters SecurityGroup (sg) and KeyPair (key) have to be previously created (see cassandgo initSG and cassandgo initKP)
def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None): reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data) instance = reservation.instances[0] return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def create():\n\n # remember what is created or not\n vpc = False\n igw = False\n sg = False\n sub = False\n vm = False\n\n vpc = _create_resource('vpc', CidrBlock=args.cidr, InstanceTenancy='default')\n igw = _create_resource('igw')\n\n if vpc and igw:\n _attach_vpc_igw(vpc=_existing.vpc, igw=_existing.igw)\n else:\n print('Cannot attach an igw to a vpc as at least one of them could not be created.')\n\n if vpc:\n sg = _create_resource(\n 'sg',\n GroupName=args.role,\n Description='SG for ' + args.role,\n VpcId=getattr(_existing.vpc, 'id', None)\n )\n else:\n print('Cannot create a sg as the vpc to attach it to could not be created.')\n\n if sg:\n _add_ingress_rules()\n else:\n print('Cannot create ingress rule as the sg could not be created.')\n\n if vpc:\n sub = _create_resource(\n 'sub',\n VpcId=getattr(_existing.vpc, 'id', None),\n CidrBlock=args.cidr\n )\n else:\n print('Cannot create a subnet as the vpc to attach it to could not be created.')\n\n if vpc and sub:\n _link_route_table()\n else:\n print('Cannot link subnet and VPC in the route table as vpc or sub not created.')\n\n if sub and sg:\n vm = _create_resource(\n 'vm',\n ImageId=args.ami,\n MinCount=1,\n MaxCount=1,\n KeyName=args.keypair,\n InstanceType=args.instance,\n # Note that there will be no internal name.\n # To get one, create first a DHCP options set and associate it with the VPC.\n NetworkInterfaces=[{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0, # needs to be 0 to get a public IP\n 'SubnetId': getattr(_existing.sub, 'id', None),\n 'Groups': [getattr(_existing.sg, 'id', None)],\n }],\n )\n else:\n print('Cannot create an instance as the sub or sg to use could not be created.')\n\n if vm:\n if not dry:\n print('Waiting for the instance to be up and running, usually done in less than 45 seconds...')\n _existing.vm.wait_until_running()\n _tag_volume()\n print('you can reach your VM at ' + _existing.vm.public_ip_address)\n\n else:\n print('VM not created for some reason.')", "def create_instance(self, image='ami-660c3023', key_name='linuxonEC2', instance_type='t1.micro', security_groups=['default']):\n return self.conn.run_instances(image,\n key_name=key_name,\n instance_type=instance_type,\n security_groups=security_groups).instances[0]", "def init_region ( aws, region_name, aws_account_type, init_params ) :\n ec2_conn = aws.ec2_conn( )\n keypair_savedir = os.environ[ 'PWD' ]\n print \"Creating new keypairs for region \" + region_name\n for keytype in init_params.get( 'keypairs', [] ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keytype )\n keypair = ec2_conn.get_key_pair( keypair_name )\n if keypair :\n print 'Keypair ' + keypair_name + ' already exists. Skipping.'\n else :\n keypair = ec2_conn.create_key_pair( keypair_name )\n keypair.save( keypair_savedir )\n keypair_filename = keypair_savedir + '/' + keypair_name + '.pem'\n print 'Created keypair ' + keypair_filename\n store_keypair( s3_infra_conn = aws.s3_infrastructure_conn( ),\n region_name = region_name,\n aws_account_type = aws_account_type,\n keypair_name = get_keypair_keypath( aws_account_type ) + keypair_name,\n keypair_filename = keypair_filename )\n print 'Stored keypair in S3 at: ' + get_keypair_keypath( aws_account_type )\n os.remove( keypair_filename )\n\n if init_params.get( 'init-deployment', 'YES' ) == 'YES' :\n print \"Creating Deployment security group.\"\n deploy_secgrp = ec2_conn.create_security_group( get_deployment_secgrp_name( ),\n \"Used by the deployment server.\" )\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = build_server_cidr ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = build_server_cidr ) \n\n if init_params.get( 'init-ami-update', 'YES' ) == 'YES' :\n print \"Creating ami-update security group.\"\n amiupdate_secgrp = ec2_conn.create_security_group( get_amiupdate_secgrp_name( ),\n \"Used by the ami update instances.\" )\n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list )", "def amazonEc2_create(amazonEc2):\n\treturn amazonEc2", "def create_ec2_with_eip(ec2, ec2_client, subnet_pub_ec2):\n ## create EC2 instance\n print(\"\\n===Creating an EC2 instance\")\n instances = ec2.create_instances(\n ImageId=AMI_ID,\n MinCount=1,\n MaxCount=1,\n InstanceType=EC2_TYPE,\n KeyName=KEY_PAIR_NAME,\n NetworkInterfaces=[{\n \"DeviceIndex\":0,\n \"SubnetId\": subnet_pub_ec2.id}],\n TagSpecifications=[{\n \"ResourceType\":\"instance\",\n \"Tags\":[{\"Key\": \"Name\", \"Value\": EC2_NAME}]\n }]\n )\n \n ## get instance ids\n instances_ids = [i.instance_id for i in instances]\n\n ## wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"An EC2 instance is ready.\")\n\n ## create new EIP and attach it to existing EC2 instance\n instance_id = instances[0].instance_id\n try:\n allocation = ec2_client.allocate_address(Domain=\"vpc\")\n response = ec2_client.associate_address(AllocationId=allocation[\"AllocationId\"],\n InstanceId=instance_id)\n print(response)\n except ClientError as e:\n print(e)\n print(f\"===EIP {allocation['PublicIp']} has been assigned to the EC2 instance!\")\n return instances, allocation[\"PublicIp\"]", "def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()", "def create_asg(AvailabilityZone):\n lc_name= lib.get_lc_name(stackname, ELBTargetGroupName, AvailabilityZone)\n\n logger.info('Creating launch-config for a new ASG: ' + lc_name)\n userdata='vmseries-bootstrap-aws-s3bucket=' + s3master\n \n try:\n response=asg.create_launch_configuration(LaunchConfigurationName=lc_name, \n ImageId=imageID, KeyName=keyname, SecurityGroups=[sg_untrust], InstanceType=instanceType,\n AssociatePublicIpAddress=False, EbsOptimized=True,\n IamInstanceProfile=iamprofilebs,\n BlockDeviceMappings=[\n {'DeviceName': \"/dev/xvda\", \n 'Ebs': \n {'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n }\n }\n ],\n UserData=userdata)\n except Exception as e:\n logger.error(\"[ASG LC error]: {}\".format(e))\n return False\n #Get ELB ARN\n tgtGrp = elbv2.describe_target_groups(Names=[ELBTargetGroupName])\n if tgtGrp == None:\n tgtGrp_arn = None\n logger.info('ELB target group is not found!')\n else:\n tgtGrp_d = tgtGrp['TargetGroups']\n tgtGrp_arn = tgtGrp_d[0].get('TargetGroupArn')\n print(\"targetgroup arn: \" + tgtGrp_arn)\n print( \"ELBTargetGroupName: \" +ELBTargetGroupName)\n \n asg_name = lib.get_asg_name(stackname, ELBTargetGroupName, AvailabilityZone)\n logger.info('Creating Auto-Scaling Group with name: ' + asg_name)\n tags={'ResourceId': asg_name, 'ResourceType': 'auto-scaling-group', 'Key': 'Name', 'Value': asg_name, 'PropagateAtLaunch':True}\n \n subnet=lib.choose_subnet(subnetuntrust, AvailabilityZone)\n try:\n response=asg.create_auto_scaling_group(AutoScalingGroupName=asg_name, LaunchConfigurationName=lc_name,\n MinSize=MinInstancesASG, MaxSize=MaximumInstancesASG, DesiredCapacity=MinInstancesASG,\n DefaultCooldown=ScalingPeriod, TargetGroupARNs=[tgtGrp_arn],\n VPCZoneIdentifier=subnet,\n Tags=[tags],\n HealthCheckGracePeriod=900)\n except Exception as e:\n logger.error(\"[ASG create error]: {}\".format(e))\n return False\n \n if create_asg_life_cycle(asg_name, AvailabilityZone) == False:\n return False\n \n scalein=asg_name + '-scalein'\n try:\n response = asg.put_scaling_policy(AutoScalingGroupName=asg_name, PolicyName=scalein, AdjustmentType='ChangeInCapacity',\n ScalingAdjustment=-1, Cooldown=600)\n arn_scalein=response['PolicyARN']\n except Exception as e:\n logger.error(\"[ASG ScaleIn12 Policy]: {}\".format(e))\n return False\n \n scaleout=asg_name + '-scaleout'\n try:\n response = asg.put_scaling_policy(AutoScalingGroupName=asg_name, PolicyName=scaleout, AdjustmentType='ChangeInCapacity',\n ScalingAdjustment=1, Cooldown=600)\n arn_scaleout=response['PolicyARN']\n except Exception as e:\n logger.info(\"[ASG ScaleOut123]: {}\".format(e))\n return False\n \n logger.info('ARN of Scale In and Scale Out: ' + arn_scalein + ' ' + arn_scaleout)\n logger.info('Adding Cloud Watch Alarm : ' + ScalingParameter + ' for ASG: ' + asg_name)\n if cw_func_add_alarms[ScalingParameter](asg_name, arn_scalein, arn_scaleout) == False:\n return False\n \n return True", "def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)", "def prepareInstance(username, sshId):\n print os.environ['EC2_KEYPAIR_PATH']\n with settings(user='ubuntu',\n key_filename=os.environ['EC2_KEYPAIR_PATH']):\n password = getpass('Enter a new password for user %s:' % username)\n password2 = getpass('Enter the password a again:')\n if password != password2:\n raise RuntimeError(\"Passwords don't match\")\n sudo('adduser --disabled-password --gecos \",,,\" %s' % username)\n cryptedPassword = _hashPassword(password)\n sudo('usermod --password %s %s' % (cryptedPassword, username))\n sudo('gpasswd --add %s admin' % username)\n authorizeSshKey(username, sshId)\n sudo('apt-get update')\n sudo('DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y')\n if exists('/var/run/reboot-required'):\n reboot()", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass", "def create_keypair(econfig_file=None, region=None, keyname=\"bcbio\"):\n import boto\n import boto.ec2\n if econfig_file:\n keypair_dir = os.path.dirname(econfig_file).replace(\"elasticluster\", \"aws_keypairs\")\n else:\n keypair_dir = os.path.join(os.getcwd(), \"aws_keypairs\")\n if not os.path.exists(keypair_dir):\n os.makedirs(keypair_dir)\n private_key = os.path.join(os.path.join(keypair_dir, keyname))\n new_key = not os.path.exists(private_key)\n if new_key:\n cmd = [\"ssh-keygen\", \"-t\", \"rsa\", \"-N\", \"\", \"-f\", private_key, \"-C\", \"bcbio_aws_keypair\"]\n subprocess.check_call(cmd)\n public_key = private_key + \".pub\"\n if region:\n ec2 = boto.ec2.connect_to_region(region)\n else:\n ec2 = boto.connect_ec2()\n key = ec2.get_key_pair(keyname)\n if key and new_key:\n print(\"Non matching key %s found in AWS, removing.\" % keyname)\n ec2.delete_key_pair(keyname)\n key = None\n if not key:\n print(\"Key %s not found in AWS, importing created key\" % keyname)\n with open(public_key) as in_handle:\n body = in_handle.read()\n try:\n ec2.import_key_pair(keyname, body)\n except TypeError as e:\n body = body.encode('utf-8')\n ec2.import_key_pair(keyname, body)\n return {\"user_key_name\": keyname, \"user_key_private\": private_key,\n \"user_key_public\": public_key}", "def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances", "def createSG(ec2,name,rules):\n\t# check if the security group exists\n\tgroup = None\n\tsgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]\n\tif sgGroups:\n\t\tgroup = sgGroups[0]\n\t\tec2.delete_security_group(name=name, group_id=group)\t\n\tprint \"Creating %s Security Group\" % name\n\tgroup = ec2.create_security_group(name, 'group for %s' % name)\n\tif group:\n\t\t# Set the inbound rules\n\t\tfor rule in rules:\n\t\t\tif rule.src_group_name:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)\n\t\t\telse:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)\n\t\treturn True\n\telse:\n\t\tlogError('Error during '+name+' Security Group update')\n\t\treturn False", "def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)", "def create_instance(\r\n image_id, instance_type, key_name, security_group_names=None):\r\n try:\r\n instance_params = {\r\n 'ImageId': image_id, 'InstanceType': instance_type, 'KeyName': key_name\r\n }\r\n if security_group_names is not None:\r\n instance_params['SecurityGroups'] = security_group_names\r\n instance = ec2.create_instances(**instance_params, MinCount=1, MaxCount=1)[0]\r\n logger.info(\"Created instance %s.\", instance.id)\r\n except ClientError:\r\n logging.exception(\r\n \"Couldn't create instance with image %s, instance type %s, and key %s.\",\r\n image_id, instance_type, key_name)\r\n raise\r\n else:\r\n return instance", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def new_instance(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n image_id = args[\"Image-ID\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n ec2.create_instances(ImageId=image_id, MinCount=1, MaxCount=5)\n\n message.message_text = \"New Instance Created\"\n return message.to_json()", "def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair", "def generate(cls, params = None, quiet = False):\n\n if params is None:\n if not quiet:\n logger.debug(\"Generating new ECDSA key parameters\")\n params = KeyParams.generateEC()\n\n assert isinstance(params, KeyParams)\n\n if not quiet:\n logger.debug(\"Generating new ECDSA key\")\n\n return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))", "def spin_ec2(self):\n #message = event['message']\n init_script = \"\"\"#!/bin/bash\necho \"sleep 50\" >> /etc/rc.local\necho \"shutdown -H +5 >> /etc/rc.local\"\nsleep 50\nshutdown -H +5\"\"\"\n\n print ('Running script:')\n print (init_script)\n\n instance = EC2.run_instances(\n ImageId=AMI,\n InstanceType=INSTANCE_TYPE,\n MinCount=1, # required by boto, even though it's kinda obvious.\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='stop', # make shutdown in script terminate ec2\n UserData=init_script # file to run on instance init.\n \n )\n\n print (\"New instance created.\")\n instance_id = instance['Instances'][0]['InstanceId']\n print (instance_id)\n print (instance)\n EC2.create_tags(Resources=[instance_id], Tags=[{\"Key\" : \"Name\", 'Value': 'test01',},],)", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance", "def __deploy_instance_helper__(self, instance_name,\n network_name=None, network_cidr=None, is_network_new=True,\n keypair_name=None, is_keypair_new=True,\n sec_group_name=None, metadata=None):\n\n flavor_id = self.nova_operations.get_any_flavor_id()\n self.assertIsNotNone(flavor_id, \"Problems retrieving a flavor\")\n\n image_id = self.nova_operations.find_image_id_by_name(image_name=BASE_IMAGE_NAME)\n self.assertIsNotNone(image_id, \"Problems retrieving the image '{}'\".format(BASE_IMAGE_NAME))\n\n # instance prerequisites\n try:\n network_id_list = None\n if network_name:\n if is_network_new:\n # Create the given network\n cidr = network_cidr or TEST_CIDR_DEFAULT\n network = self.neutron_operations.create_network_and_subnet(network_name, cidr=cidr)\n self.test_world['networks'].append(network['id'])\n network_id_list = [{'net-id': network['id']}]\n else:\n # Look for the network id\n net_list = self.neutron_operations.find_networks(name=network_name)\n self.assertTrue(len(net_list) != 0, \"Required network '%s' could not be found\" % network_name)\n network_id_list = [{'net-id': net_list[0]['id']}]\n\n except NeutronClientException as e:\n self.logger.debug(\"Required network could not be created: %s\", e)\n self.fail(e)\n\n try:\n if keypair_name:\n if is_keypair_new:\n self.nova_operations.create_keypair(keypair_name)\n self.test_world['keypair_names'].append(keypair_name)\n else:\n keypair_found = self.nova_operations.find_keypair(name=keypair_name)\n self.assertIsNotNone(keypair_found, \"Required Keypair '%s' could not be found\" % keypair_name)\n except NovaClientException as e:\n self.logger.debug(\"Required keypair could not be created: %s\", e)\n self.fail(e)\n\n try:\n security_group_name_list = None\n if sec_group_name:\n sec_group_id = self.nova_operations.create_security_group_and_rules(sec_group_name)\n self.test_world['sec_groups'].append(sec_group_id)\n security_group_name_list = [sec_group_name]\n except NovaClientException as e:\n self.logger.debug(\"Required security group could not be created: %s\", e)\n self.fail(e)\n\n # create new instance\n try:\n server_data = self.nova_operations.launch_instance(instance_name=instance_name,\n flavor_id=flavor_id,\n image_id=image_id,\n metadata=metadata,\n keypair_name=keypair_name,\n security_group_name_list=security_group_name_list,\n network_id_list=network_id_list)\n except Forbidden as e:\n self.logger.debug(\"Quota exceeded when launching a new instance\")\n self.fail(e)\n except OverLimit as e:\n self.logger.debug(\"Not enough resources to launch new instance: %s\", e)\n self.fail(e)\n else:\n self.test_world['servers'].append(server_data['id'])\n\n # Wait for status=ACTIVE\n status, detail = self.nova_operations.wait_for_task_status(server_data['id'], 'ACTIVE')\n self.assertEqual(status, 'ACTIVE', \"{detail}. Current status is {status}\".format(detail=detail, status=status))\n\n return server_data['id']" ]
[ "0.7219868", "0.68883014", "0.67771643", "0.6717516", "0.67171246", "0.66807824", "0.6674608", "0.66468847", "0.65976626", "0.6490849", "0.6442486", "0.63030493", "0.62883216", "0.6278427", "0.6269443", "0.61487126", "0.61450726", "0.61255866", "0.6090022", "0.6052359", "0.6048319", "0.60351664", "0.6017799", "0.6004755", "0.5988949", "0.595655", "0.5952687", "0.5948316", "0.59315515", "0.59129" ]
0.7226294
0
List all instances for a specific region and zone
def listInstancesRegionZone(region,zone): print "-"*80 print "# Region :",region," Zone", zone print "-"*80 instances = getInstancesRegionZone(region,zone) if instances: for instance in instances: print "[",instance.ami_launch_index,"]",instance.ip_address," (",instance.private_ip_address,") ",instance.instance_type," key=",instance.key_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def yield_instances_in_zone(self, zone, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n try:\n resp = self.call_api(\n '/zones/%s/instances' % zone, params=params, deadline=120)\n except net.Error as exc:\n if not page_token and exc.status_code == 400:\n return # no such zone, this is fine...\n raise\n for instance in resp.get('items', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def yield_instances_in_zones(self, zones, instance_filter=None):\n for zone in zones:\n for instance in self.yield_instances_in_zone(zone, instance_filter):\n yield instance", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def run(self):\n ilist = []\n key_filter = filters[self.args['filter_group']]\n for item in self.client.describe_instances()['Reservations']:\n for instance in item['Instances']:\n idict = {}\n for tag in instance['Tags']:\n if not any(t['Key'] == 'Name' for t in instance['Tags']):\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n if tag['Key'] == 'Name':\n if tag['Value'] == \"\":\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n for key in key_filter:\n try:\n if key in ['AvailabilityZone','Tenancy']:\n idict[key] = instance['Placement'][key]\n elif key == 'SecurityGroups':\n sg_list = []\n for sg in instance[key]:\n sg_list.append(sg['GroupId'])\n if self.args['output'] == 'csv':\n sg_string = \" \\n\"\n idict[key] = sg_string.join(sg_list)\n else:\n idict[key] = ','.join(sg_list)\n elif key == 'BlockDeviceMappings':\n devices = []\n for dev in instance[key]:\n devices.append(dev['DeviceName'])\n if self.args['output'] == 'csv':\n dev_string = \" \\n\"\n idict[key] = dev_string.join(devices)\n else:\n idict[key] = ','.join(devices)\n elif key == 'State':\n idict[key] = instance[key]['Name']\n else:\n if instance[key]:\n idict[key] = instance[key]\n except Exception as e:\n idict[key] = 'N/A'\n ilist.append(idict)\n self.template(self.sortList(ilist))", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def show_instances():\n return get_instances()", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def list_zones(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"dc\", \"objectClass\"]\n else:\n attributes = ALL\n\n self.display(\n self.engine.query(\n self.engine.ZONES_FILTER(),\n attributes, base=','.join([\"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n ),\n verbose\n )", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos" ]
[ "0.73566705", "0.7152035", "0.69493294", "0.6796809", "0.67603457", "0.6752783", "0.6647198", "0.6643667", "0.64551127", "0.64104474", "0.6406182", "0.6393634", "0.63902986", "0.6389321", "0.63319427", "0.629142", "0.628562", "0.6270777", "0.6261531", "0.6253145", "0.621456", "0.6178527", "0.6153025", "0.61499745", "0.6134551", "0.61043745", "0.61033905", "0.6071402", "0.60678333", "0.606688" ]
0.82746065
0
Create all Cassandra security groups in all regions
def createAllSG(): for info in conf_HVM: ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone']) createSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret", "def add_security_groups(port, context):\n dbcontext = context._plugin_context\n groups = [context._plugin.get_security_group(dbcontext, sg)\n for sg in port['security_groups']]\n port['security_groups'] = groups", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)", "def test_aws_service_api_security_groups_get(self):\n pass", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def list_secgroups(self, name=None):", "def init_valet_groups(self):\n\n for rk, r in self.stack.items():\n properties = r.get(\"properties\", {})\n metadata = properties.get(\"metadata\", {})\n\n if len(metadata) > 0:\n valet_rules = metadata.get(\"valet_groups\", None)\n\n if valet_rules is not None and valet_rules != \"\":\n rule_list = []\n if isinstance(valet_rules, six.string_types):\n rules = valet_rules.split(\",\")\n for gr in rules:\n rule_list.append(gr.strip())\n else:\n self.status = \"incorrect valet group metadata format\"\n self.logger.error(self.status)\n return\n\n # Check rule validation of valet_groups.\n self.status = self.resource.check_valid_rules(self.tenant_id,\n rule_list,\n use_ex=self.use_dha)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n self.status = self._make_valet_groups(properties.get(\"name\"),\n properties[\"availability_zone\"][0],\n rule_list)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n # Check and create server groups if they do not exist.\n scheduler_hints = properties.get(\"scheduler_hints\", {})\n if len(scheduler_hints) > 0:\n for hint_key in scheduler_hints.keys():\n if hint_key == \"group\":\n hint = scheduler_hints[hint_key]\n self.status = self._make_group(properties.get(\"name\"), hint)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def get_groups():\n\n groups = [\"shelter\", \"sharing\", \"unsheltered\", \"motel\"]\n\n for item in groups:\n group = Group(group_name=item)\n\n db.session.add(group)\n\n db.session.commit()", "def sg_lookup_all(session, vpc_id):\n if session is None:\n return NoneDict()\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]}])\n\n if len(response['SecurityGroups']) == 0:\n return NoneDict()\n else:\n sgs = NoneDict()\n for sg in response['SecurityGroups']:\n key = _find(sg.get('Tags', []), lambda x: x[\"Key\"] == \"Name\")\n if key:\n key = key['Value']\n sgs[key] = sg['GroupId']\n\n return sgs", "def _generate_rds_instances_and_sg(resource, session):\n for db_instance in resource.describe_db_instances()[\"DBInstances\"]:\n for security_group in db_instance[\"VpcSecurityGroups\"]:\n yield db_instance, security_group, _get_sg_name(security_group[\"VpcSecurityGroupId\"], session)", "def add_users_to_groups(output=True):\n\n for group in DEFAULT_GROUPS:\n user = User.objects.get(username=group)\n role_title = Group.objects.get(name=group)\n user.groups.add(role_title)", "def setup(self):\n base = automap_base()\n engine = create_engine(\"mysql+pymysql://\" + csconfig.config.db_user + \":\" +\n csconfig.config.db_password + \"@\" +\n csconfig.config.db_host + \":\" +\n str(csconfig.config.db_port) +\n \"/\" + csconfig.config.db_name)\n base.prepare(engine, reflect=True)\n session = Session(engine)\n cloud_yaml = base.classes.csv2_group_resource_yaml\n\n for cloud in self.group_resources:\n cloud_yamls = session.query(cloud_yaml).\\\n filter(cloud_yaml.group_name == self.name,\n cloud_yaml.cloud_name == cloud.cloud_name)\n cloud_yaml_list = []\n for yam in cloud_yamls:\n cloud_yaml_list.append([yam.yaml_name, yam.yaml, yam.mime_type])\n if cloud.cloud_type == 'localhost':\n newcloud = cloudscheduler.localhostcloud.LocalHostCloud(extrayaml=cloud_yaml_list, resource=cloud)\n else:\n newcloud = cloudscheduler.openstackcloud.\\\n OpenStackCloud(extrayaml=cloud_yaml_list, resource=cloud)\n self.clouds[newcloud.name] = newcloud\n self.log.debug(\"Added all clouds for group: %s\", self.name)", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def test_create_resource_group(self):\n pass", "def create(self, name, desc, tenant_id):\n data = {\"security_group\": {\"name\": name, \n \"description\": desc, \n \"tenant_id\":tenant_id}}\n\n path = '%s/security-groups' % self.ver\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create openstack security group: %s' % truncate(res))\n return res[0]['security_group']", "def grant_grp_access ( ec2_conn, incoming_grps, tgt_grp, port, protocol = 'tcp' ) :\n for grp in incoming_grps :\n if not does_grp_rule_exist( tgt_grp, grp, port, port, protocol ) :\n tgt_grp.authorize( ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group_id = tgt_grp.id )", "def add_all_regions():\n gene_id = request.json['gene_id']\n panel_id = request.json['panel_id']\n tx_id = request.json['tx_id']\n gene_name = request.json['gene_name']\n project_id = get_project_id_by_panel_id(s, panel_id)\n\n add_preftxs_to_panel(s, project_id, [{\"gene\": gene_name, \"tx_id\": tx_id}, ])\n add_genes_to_panel_with_ext(s, panel_id, gene_id)\n return jsonify({\"genes\": [gene_id, ]})", "def create_endpoints_new_network():\n\n for i in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(i)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n # 0/ - detect region of explorers\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, gracefully exit\n # verify_nodes_same_region(reg, array_instance_ip)\n\n print(\"\\n######################################### Creating complete pipeline for shard\", str(i),\n \"in AWS region: \", reg, \"#########################################\\n\")\n # 1/ - request certificates\n print(\"\\n==== step 1: request SSL certificates, CertificateArn will be stored into dict_region_sslcerts \\n\")\n domain_name = ''.join(['api.s', str(i), \".\", BASE_DOMAIN_NAME])\n dict_existing_certs = get_existing_certs(reg, domain_name)\n dict_region_sslcerts.clear()\n if dict_existing_certs[domain_name]:\n print(\"[INFO] SSL certificate of\", domain_name, \"exists, skipping..\")\n dict_region_sslcerts[reg].append(dict_existing_certs[domain_name][0])\n else:\n print(\"[INFO] SSL certificate of\", domain_name, \"does NOT exist, requesting..\")\n request_ssl_certificates(reg, domain_name)\n\n print(\"[RESULT] OF STEP 1\")\n pp.pprint(dict_region_sslcerts)\n\n # 2/ - create target group\n dict_region_tgarn.clear()\n array_tgs = create_name_target_group(i, ID_DOMAIN_NAME)\n pp.pprint(array_tgs)\n create_target_group(reg, array_tgs)\n print(\"[RESULT] OF STEP 2\")\n pp.pprint(dict_region_tgarn)\n\n # 3/ - create elb\n dict_region_elb2arn.clear()\n elb2_name = ''.join('s' + str(i) + '-' + ID_DOMAIN_NAME + '-' + reg)\n array_dns_hostedzone = create_elb2(reg, elb2_name)\n print(\"[RESULT] OF STEP 3\")\n pp.pprint(dict_region_elb2arn)\n\n # 4/ - create listener\n dict_region_ListenerArn.clear()\n create_listener(reg, dict_region_elb2arn, dict_region_sslcerts, dict_region_tgarn)\n print(\"[RESULT] OF STEP 4\")\n pp.pprint(dict_region_ListenerArn)\n\n # 5/ - create one more rule for the current listener\n host_header_value = ''.join('ws.s' + str(i) + '.' + BASE_DOMAIN_NAME)\n create_rule(reg, dict_region_ListenerArn, dict_region_tgarn, dict_region_elb2arn, host_header_value)\n\n # 6/ - register explorer instances into the target group\n register_explorers(reg, array_instance_id, dict_region_tgarn)\n\n # 7/ - create entries on Route 53\n array_record_set = create_name_record_set(i, BASE_DOMAIN_NAME)\n create_dns_entries(HOSTED_ZONE_ID, array_record_set, array_dns_hostedzone)", "def create_groups(groups):\n for group_name in groups:\n try:\n Group.objects.get_or_create(name=group_name)\n except Exception as e:\n raise CouldNotCreateGroup(group_name, e)" ]
[ "0.65404814", "0.641292", "0.6315235", "0.63037133", "0.62225246", "0.611194", "0.6016119", "0.589653", "0.58127975", "0.5788823", "0.57725734", "0.5730021", "0.571415", "0.56577706", "0.56387746", "0.55405027", "0.54687124", "0.5450472", "0.54221886", "0.5418091", "0.5405399", "0.5370526", "0.5361652", "0.5351294", "0.53435224", "0.5334952", "0.5327787", "0.5292629", "0.5288266", "0.52800274" ]
0.71990424
0
Create all key pairs in all regions
def createAllKP(): if not os.path.exists(keysDir): os.makedirs(keysDir) for info in conf_HVM: keyName = 'Key-'+info['region']+'-'+info['zone'] try: os.remove(keysDir+'/'+keyName+'.pem') except OSError: pass print "Key creation :",keyName ec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone']) # check if the key pair exists kps = [kp for kp in ec2.get_all_key_pairs() if kp.name == keyName] if kps: ec2.delete_key_pair(keyName) key = ec2.create_key_pair(keyName) key.save(keysDir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_keys():", "def createAllSG():\n\tfor info in conf_HVM:\n\t\tec2 = boto.ec2.connect_to_region(info['region']+'-'+info['zone'])\n\t\tcreateSG(ec2,'SG-Cassandra-'+info['region']+'-'+info['zone'],CASSANDRA_RULES)", "def setup_space_keys(cls):\n if cls.KEYS:\n return\n\n from pkg_resources import iter_entry_points\n\n for entry_point in iter_entry_points(group=cls.CATKIN_SPACES_GROUP):\n ep_dict = entry_point.load()\n cls.STORED_KEYS.append(entry_point.name + '_space')\n cls.SPACES[entry_point.name] = ep_dict\n cls._create_space_methods(entry_point.name)\n\n cls.KEYS = cls.STORED_KEYS + cls.EXTRA_KEYS", "def init_region ( aws, region_name, aws_account_type, init_params ) :\n ec2_conn = aws.ec2_conn( )\n keypair_savedir = os.environ[ 'PWD' ]\n print \"Creating new keypairs for region \" + region_name\n for keytype in init_params.get( 'keypairs', [] ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keytype )\n keypair = ec2_conn.get_key_pair( keypair_name )\n if keypair :\n print 'Keypair ' + keypair_name + ' already exists. Skipping.'\n else :\n keypair = ec2_conn.create_key_pair( keypair_name )\n keypair.save( keypair_savedir )\n keypair_filename = keypair_savedir + '/' + keypair_name + '.pem'\n print 'Created keypair ' + keypair_filename\n store_keypair( s3_infra_conn = aws.s3_infrastructure_conn( ),\n region_name = region_name,\n aws_account_type = aws_account_type,\n keypair_name = get_keypair_keypath( aws_account_type ) + keypair_name,\n keypair_filename = keypair_filename )\n print 'Stored keypair in S3 at: ' + get_keypair_keypath( aws_account_type )\n os.remove( keypair_filename )\n\n if init_params.get( 'init-deployment', 'YES' ) == 'YES' :\n print \"Creating Deployment security group.\"\n deploy_secgrp = ec2_conn.create_security_group( get_deployment_secgrp_name( ),\n \"Used by the deployment server.\" )\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = build_server_cidr ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = build_server_cidr ) \n\n if init_params.get( 'init-ami-update', 'YES' ) == 'YES' :\n print \"Creating ami-update security group.\"\n amiupdate_secgrp = ec2_conn.create_security_group( get_amiupdate_secgrp_name( ),\n \"Used by the ami update instances.\" )\n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list )", "def keysAll():", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def create_key ():", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def create_all_taxonomic_keys(point_locations: dict, location_species: dict, location_range_species: dict,\n trait_data: dict, all_taxa_data: dict) -> dict:\n\n all_keys = {}\n\n # find all unique sets of species\n species_sets = set()\n for p in point_locations:\n loc = point_locations[p]\n all_species = set()\n all_species |= location_species[loc.name]\n if loc.n_direct_children() > 0:\n for c in loc.direct_children():\n all_species |= fetch_child_data(c, location_species)\n\n range_species = set(find_species_by_name(s) for s in location_range_species[loc])\n all_species |= range_species\n if len(all_species) > 0:\n species_sets.add(frozenset(all_species))\n\n # create keys for each unique set of species\n warnings = set()\n for sp_set in species_sets:\n taxa_data = {}\n for s in sp_set:\n try:\n taxa_data[\"Male \" + s.binomial()] = all_taxa_data[\"♂ Male {{\" + s.species + \"}}\"]\n taxa_data[\"Female \" + s.binomial()] = all_taxa_data[\"♀ Female {{\" + s.species + \"}}\"]\n except KeyError:\n report_error(\"Missing taxonomic key data: \" + s.species)\n\n all_keys[sp_set], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, taxa_data, verbose=False)\n warnings |= new_warning\n\n # global key for all species\n all_keys[\"all\"], new_warning = TMB_TaxKeyGen.generate_taxonomic_key(trait_data, all_taxa_data, verbose=False)\n warnings |= new_warning\n\n for w in sorted(warnings):\n report_error(w)\n\n return all_keys", "def test_aws_service_api_keypair_generate_post(self):\n pass", "def _create_keys(bucket_name, keys=[]):\n bucket = connection.create_bucket(bucket_name)\n\n for s in keys:\n key = bucket.new_key(s)\n key.set_contents_from_string(s)\n\n return bucket", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def Dictionary_create(nMarkers, markerSize):\n pass", "def secondary_keys_dicts(self):", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def initialize(self, keys: List[str]):", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def create_inbound(self, keys):", "def make_s3_keys(self):\n # Write the data twice:\n for fmt in (VERSION_FMT, LATEST_FMT):\n yield make_s3_keys(self, fmt)", "def write_regions(pathfolder, key_firms, regions, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['nif'] = key_firms\n db['regions'] = regions\n db['methodvalues'] = methodvalues\n db.close()", "def generate_map(self):\n\n # Create main streets first\n self.create_main_streets()\n\n # Then create the commercial buildings in the center of town\n self.create_commercial_center()\n\n # Then create the neighborhoods that populate the rest of the city\n while(self.create_neighborhood()):\n pass\n\n # Clean up any invalid buildings that were created\n self.delete_inaccessible_buildings()", "def create_ssh_keys(self):\n self.random_ssh()\n\n return self.keys", "def create_and_fill_bucket(self):\n EmrProcessing.bucket = \\\n self.s3_handle.create_bucket(EmrProcessing.bucket_name)\n key = EmrProcessing.bucket.new_key('input/test.csv')\n input_file_path = '../data/test.csv'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')\n\n key = EmrProcessing.bucket.new_key('mapper/mapper.py')\n input_file_path = '../src/mapper/mapper.py'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')", "def _create_sections(self):\n self._SECTIONS = {}", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def create_endpoints_new_network():\n\n for i in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(i)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n # 0/ - detect region of explorers\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, gracefully exit\n # verify_nodes_same_region(reg, array_instance_ip)\n\n print(\"\\n######################################### Creating complete pipeline for shard\", str(i),\n \"in AWS region: \", reg, \"#########################################\\n\")\n # 1/ - request certificates\n print(\"\\n==== step 1: request SSL certificates, CertificateArn will be stored into dict_region_sslcerts \\n\")\n domain_name = ''.join(['api.s', str(i), \".\", BASE_DOMAIN_NAME])\n dict_existing_certs = get_existing_certs(reg, domain_name)\n dict_region_sslcerts.clear()\n if dict_existing_certs[domain_name]:\n print(\"[INFO] SSL certificate of\", domain_name, \"exists, skipping..\")\n dict_region_sslcerts[reg].append(dict_existing_certs[domain_name][0])\n else:\n print(\"[INFO] SSL certificate of\", domain_name, \"does NOT exist, requesting..\")\n request_ssl_certificates(reg, domain_name)\n\n print(\"[RESULT] OF STEP 1\")\n pp.pprint(dict_region_sslcerts)\n\n # 2/ - create target group\n dict_region_tgarn.clear()\n array_tgs = create_name_target_group(i, ID_DOMAIN_NAME)\n pp.pprint(array_tgs)\n create_target_group(reg, array_tgs)\n print(\"[RESULT] OF STEP 2\")\n pp.pprint(dict_region_tgarn)\n\n # 3/ - create elb\n dict_region_elb2arn.clear()\n elb2_name = ''.join('s' + str(i) + '-' + ID_DOMAIN_NAME + '-' + reg)\n array_dns_hostedzone = create_elb2(reg, elb2_name)\n print(\"[RESULT] OF STEP 3\")\n pp.pprint(dict_region_elb2arn)\n\n # 4/ - create listener\n dict_region_ListenerArn.clear()\n create_listener(reg, dict_region_elb2arn, dict_region_sslcerts, dict_region_tgarn)\n print(\"[RESULT] OF STEP 4\")\n pp.pprint(dict_region_ListenerArn)\n\n # 5/ - create one more rule for the current listener\n host_header_value = ''.join('ws.s' + str(i) + '.' + BASE_DOMAIN_NAME)\n create_rule(reg, dict_region_ListenerArn, dict_region_tgarn, dict_region_elb2arn, host_header_value)\n\n # 6/ - register explorer instances into the target group\n register_explorers(reg, array_instance_id, dict_region_tgarn)\n\n # 7/ - create entries on Route 53\n array_record_set = create_name_record_set(i, BASE_DOMAIN_NAME)\n create_dns_entries(HOSTED_ZONE_ID, array_record_set, array_dns_hostedzone)", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def keys(self) -> None:\r\n path = Path('./config/key')\r\n global key\r\n # If the file path does not exist, create one \r\n if not path.exists():\r\n os.makedirs(path)\r\n while True:\r\n # read key.key file\r\n try:\r\n file = open(path / 'key.key', 'rb')\r\n key = file.read()\r\n file.close\r\n # when key.key file does not exist. Create one\r\n except FileNotFoundError:\r\n key = Fernet.generate_key()\r\n file = open(path / 'key.key', 'wb')\r\n file.write(key)\r\n file.close()\r\n continue\r\n break" ]
[ "0.65445393", "0.6132496", "0.60415924", "0.5918604", "0.59134144", "0.587338", "0.5822073", "0.5657623", "0.5650972", "0.5635661", "0.5604821", "0.5563397", "0.5509014", "0.5504165", "0.5494566", "0.54587173", "0.5435119", "0.54258394", "0.54082614", "0.5393367", "0.5384728", "0.5372961", "0.532285", "0.532167", "0.5318754", "0.5316679", "0.5307809", "0.5272147", "0.5269188", "0.52466214" ]
0.7191877
0
Interpret for left != right
def _op_ne(self, left: Any, right: Any) -> BoolOrIter: out = self._op_eq(left, right) if isinstance(out, (numpy.ndarray, Series)): neout = ~out # neout[pandas.isna(out)] = numpy.nan return neout # out is always a numpy.ndarray return not out # pragma: no cover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(left, right):\n return (not (left == right))", "def ne (x,y):\n\n return not (le(x,y) and le(y,x))", "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def ne (self, other):\n return not (self == other) # opposite of eq", "def _isLeft(P0, P1, P2):\n return (P1.x - P0.x)*(P2.y - P0.y) - (P2.x - P0.x)*(P1.y - P0.y)", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def __ne__(self, other):\n return np.all(self.grid != other.grid) or np.all(self.pos != other.pos)", "def __neq__(self, other): \n return not self == other", "def __ne__(self, other):\n return self.x != other.x or self.y != other.y", "def assert_equal(left, right):\n msg = \"{} != {}\".format(left, right)\n assert left == right, msg", "def equal_mirror(t, s):\n if t is None and s is None:\n return True\n if t is None or s is None:\n return False\n if t.value != s.value:\n return False\n return equal_mirror(t.left, s.right) and equal_mirror(t.right, s.left)", "def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def __neq__(self, other):\n return self.index != other.index", "def __ne__(self, other):\r\n if isinstance(other, vec4):\r\n return self.x!=other.x or self.y!=other.y or self.z!=other.z\r\n else:\r\n return 1", "def __ne__(self, other):\n return not self == other", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def __ne__(self, other):\n\t\treturn not self == other", "def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __invert__(self) -> BooleanExpression:", "def __ne__(self, other):\n return tuple(self) != tuple(other)", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False", "def __ne__(self, other):\n return not self == other", "def __ne__(self,other):\n return not self == other", "def __ne__(self, other):\r\n return not (self == other)", "def pairwise(a,b):\n return a != b", "def __ne__(self,other):\n return not (self == other)" ]
[ "0.8098229", "0.70341194", "0.6655749", "0.64999557", "0.64726496", "0.6470631", "0.64697796", "0.6455494", "0.6385901", "0.63324577", "0.6331694", "0.6274154", "0.6211507", "0.6198755", "0.61944705", "0.6193615", "0.6182596", "0.6180644", "0.6173583", "0.6173583", "0.6162615", "0.6160705", "0.6140654", "0.6140654", "0.6138237", "0.6127185", "0.61099315", "0.60963964", "0.6086259", "0.6075714" ]
0.72011685
1
Recycle left right operands to each other
def _recycle_left_right(left: Any, right: Any) -> Tuple: try: left = recycle_value(left, length_of(right)) except DataUnrecyclable: right = recycle_value(right, length_of(left)) return left, right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def RewriteOR(self, left, right):\n return None", "def __call__(self):\n return self._left() + self._right()", "def __radd__(self, left):\n return self.value() + left", "def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)", "def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)", "def _op_or_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n return Collection(left, right)\n\n left, right = _recycle_left_right(left, right)\n left = Series(left).fillna(False)\n right = Series(right).fillna(False)\n return left | right", "def __rmul__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mul, other)", "def apply(self) -> Operation:\n op = self.popleft()\n op()\n return op", "def __ror__(self, other):\n return whitespaces.CURRENT.normalize(other) | self", "def __rmod__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mod, other)", "def commutator(left_operator, right_operator):\n if not isinstance(left_operator, type(right_operator)):\n raise TypeError('operator_a and operator_b are not of the same type.')\n valueable_type = (QubitOperator, FermionOperator, QubitExcitationOperator)\n if not isinstance(left_operator, valueable_type):\n raise TypeError(\n \"Operator should be QubitOperator, FermionOperator or QubitExcitationOperator.\"\n )\n\n result = left_operator * right_operator\n result -= right_operator * left_operator\n return result", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)", "def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)", "def r_degenerate(self):\n self.tmp = self.left\n self.left = self.right", "def test_chained_right(self):\n n1, n2, n3 = Node('a'), Node('b'), Node('c')\n result = n1 | n2 * 'foo' | n3\n self.assertEqual(n1.eout, [Edge(n1, n2)])\n self.assertEqual(n2.ein, [Edge(n1, n2)])\n self.assertEqual(n2.eout, [Edge(n2, n3, 'foo')])\n self.assertEqual(n3.ein, [Edge(n2, n3, 'foo')])", "def commute_operands(self, node):\n def is_assumption(n):\n \"\"\"Return whether a node is an assumption.\"\"\"\n if not isinstance(n, types.Symbol):\n return False\n symbol = self.symbol_table.lookup(n.name)\n if symbol and symbol.type_ == SymbolType.StackItem:\n return True\n return False\n\n def has_assumption(n):\n \"\"\"Return whether a BinOpCode contains an assumption.\"\"\"\n if not isinstance(n, types.BinOpCode):\n return False\n return any(is_assumption(i) for i in [n.left, n.right])\n\n def should_commute(n):\n return is_assumption(n) or has_assumption(n)\n\n # Commute operands of different operations.\n # e.g. 2 + assumption + 3 --> 2 + 3 + assumption\n if self.is_commutative(node) and has_assumption(node.left) and node.left.name == node.name:\n # Move the assumption so we can be sure it's in the attribute 'right'.\n if is_assumption(node.left.left):\n node.left.left, node.left.right = node.left.right, node.left.left\n\n self.debug('Commuting operations for %s and %s' % (format_structural_op(node.left), format_structural_op(node.right)), node.lineno)\n right = node.right\n node.right = node.left.right\n node.left.right = right\n\n if should_commute(node.left) or not should_commute(node.right):\n return\n\n if self.is_commutative(node):\n self.debug('Commuting operands for %s' % format_structural_op(node), node.lineno)\n node.left, node.right = node.right, node.left\n elif self.has_logical_equivalent(node):\n logmsg = 'Replacing %s with logical equivalent ' % format_structural_op(node)\n node.name = logical_equivalents[node.name]\n node.left, node.right = node.right, node.left\n logmsg += format_structural_op(node)\n self.debug(logmsg, node.lineno)", "def reverse_operate(\n self, op: OperatorType, other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def l_un_degenerate(self):\n self.right = self.tmp", "def __rtruediv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(truediv, other)", "def rotate_right(self):\n\t\ttemp = self.left\n\t\tself.left = temp.right\n\t\ttemp.right = self\n\t\tself = temp", "def __mul__(self,other):\n return compositeORGenerator(left = self, right = other)", "def reverse_distribute(node: NodeT) -> NodeT:\n\n def visitor(node: NodeT, left_distribute: bool) -> NodeT:\n \"\"\"Apply left- or right-distributive property in reverse, if possible\n\n Args:\n node: ir.Node to process.\n left_distribute: Whether to apply *left*-distributive property.\n\n Returns:\n Processed node.\n \"\"\"\n if isinstance(node, ir.AddSub):\n items = OrderedDict() # type: Dict[ir.Node, List[Tuple[str, ir.Node]]]\n new_operators = []\n new_operands = []\n for operator, operand in zip(('+',) + getattr(node, 'operator'),\n getattr(node, 'operand')):\n if (operator == '+' and isinstance(operand, ir.MulDiv) and\n getattr(operand, 'operator') == ('*',)):\n if left_distribute:\n coeff, item = getattr(operand, 'operand')\n else:\n item, coeff = getattr(operand, 'operand')\n items.setdefault(coeff, []).append((operator, item))\n else:\n new_operators.append(operator)\n new_operands.append(operand)\n for coeff, item in items.items():\n operator, operand = zip(*item)\n assert operator[0] == '+'\n new_operators.append(operator[0])\n if len(operand) > 1:\n new_item = ir.AddSub(operator=operator[1:], operand=operand)\n else:\n new_item = operand[0]\n if left_distribute:\n children = coeff, new_item\n else:\n children = new_item, coeff\n new_operands.append(ir.MulDiv(operator=('*',), operand=children))\n if len(new_operands) > 1:\n assert new_operators[0] == '+'\n new_node = ir.AddSub(operator=tuple(new_operators[1:]),\n operand=tuple(new_operands))\n if new_node != node:\n return new_node # type: ignore\n elif new_operands and new_operands[0] != node:\n return new_operands[0]\n return node\n\n return node.visit(visitor, True).visit(visitor, False)", "def __rrshift__(self, other):\n if isinstance(other, Callable):\n return self @ other\n else:\n return self(other) # Function application", "def right_shift(lhs, rhs):\n return _make.right_shift(lhs, rhs)", "def r_un_degenerate(self):\n self.left = self.tmp", "def __rshift__(self, other):\n other.set_upstream(self)\n # return other so a >> b >> c works\n return other", "def __rmul__(self, left):\n return Factorization([(left, 1)]) * self", "def l_degenerate(self):\n self.tmp = self.right\n self.right = self.left" ]
[ "0.68653905", "0.6566106", "0.6530686", "0.6372209", "0.6346159", "0.62722206", "0.6268917", "0.6268676", "0.6260108", "0.6258728", "0.6246955", "0.6224608", "0.62071043", "0.61998665", "0.6179604", "0.6081393", "0.6077263", "0.6012032", "0.5989172", "0.5984579", "0.59795463", "0.592946", "0.5928578", "0.5902864", "0.59005296", "0.5877099", "0.5871691", "0.58697677", "0.5861441", "0.5858072" ]
0.6620655
1
returns the sparsity penalty on network activations combined as a sum
def get_sparsity_penalty(nnet, inputs, sparsity, mode="mean", deterministic=False): assert mode in ("mean", "l1") rho = sparsity penalty = 0 eps = 0.0001 # for numerical stability for layer in nnet.all_layers: if layer.isactivation: activation = lasagne.layers.get_output(layer, inputs=inputs, deterministic=deterministic) if mode == "mean": if layer.isrelu: avg_activation = T.mean(T.gt(activation, T.zeros_like(activation)), axis=0, dtype='floatX') if layer.issigmoid: avg_activation = T.mean(activation, axis=0, dtype='floatX') KL_div = T.sum((rho+eps) * (T.log(rho+eps) - T.log(avg_activation+eps)) + (1-rho+eps) * (T.log(1-rho+eps) - T.log(1-avg_activation+eps)), dtype='floatX') penalty = penalty + KL_div if mode == "l1": penalty = penalty + T.sum(abs(activation), dtype='floatX') return T.cast(penalty, dtype='floatX')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def s_penalty(self, triples, nodes):\n\n s_index, p_index, o_index = split_spo(triples)\n\n s, p, o = nodes[s_index, :], self.relations[p_index, :], nodes[o_index, :]\n\n return s.pow(2).mean() + p.pow(2).mean() + o.pow(2).mean()", "def penalty(self):\n assert len(self.weights) == len(self.means), \"Dimensions!\"\n out = np.exp(self.data['riskfree'] * self.data['maturity'])\n for weight, mean in zip(self.weights, self.means):\n out -= weight * np.exp(mean * self.data['maturity'])\n return (out**2).mean()**.5", "def penalty(self):\n return 0", "def _construct_reg_costs(self):\n param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])\n return param_reg_cost", "def get_spectral_penalty(nnet, include_bias=False):\n\n penalty = 0\n\n for layer in nnet.trainable_layers:\n if not layer.issvm:\n eigenvalues, eigvec = T.nlinalg.eigh(T.dot(layer.W, layer.W.T))\n eig_max = T.max(eigenvalues)\n penalty = penalty + eig_max\n\n if include_bias:\n for layer in nnet.trainable_layers:\n if (not layer.issvm) and (layer.b is not None):\n penalty = penalty + T.sum(abs(layer.b) ** 2)\n\n return T.cast(penalty, dtype='floatX')", "def calc_sparsity (data): \n matrix_size = data.shape[0]*data.shape[1] # Number of possible interactions in the matrix\n num_purchases = len(data.nonzero()[0]) # Number of items interacted with\n sparsity = 100*(1 - (num_purchases/matrix_size))\n print('{:.2f} % of the user interaction matrix is sparse'.format(sparsity,2))", "def combined_costs(matrix_MSLL_IO):\r\n return", "def sparsity(self):\n nelem = self._nelem\n\n if nelem is None:\n self._logger.warning(\n \"this matrix will be considered as dense as it has not had its number of elements defined\")\n nelem = self._size\n\n return 1.0 - nelem / self._size", "def overall_sensitivity(self):\n if self.mod1:\n s = torch.max(torch.max(self.weight, -1)[0], -1)[0].item()\n else:\n s = torch.max(torch.sqrt(torch.sum(self.weight * self.weight, -1)))[0].item()\n s *= np.sqrt(2. / np.e)\n return s", "def perplexity(self, sents):\n return 2 ** self.cross_entropy(sents)", "def model_sparsity(model, param_dims=[2, 4], param_types=['weight', 'bias']):\n sparsity, _, _ = model_params_stats(model, param_dims, param_types)\n return sparsity", "def get_prod_penalty(nnet):\n\n assert Cfg.ocsvm_loss is True\n\n penalty = 0\n layers = nnet.trainable_layers\n num_layers = len(layers) - 1 # do not regularize parameters of oc-svm layer\n assert num_layers > 0\n\n W_norm_prod = 1.0\n\n if layers[num_layers-1].b is not None:\n penalty += T.sum(layers[num_layers-1].b ** 2)\n\n for i in range(num_layers-1):\n W_norm_prod *= T.sum(layers[num_layers-1-i].W ** 2)\n if layers[num_layers-2-i].b is not None:\n penalty += W_norm_prod * T.sum(layers[num_layers-2-i].b ** 2)\n\n W_norm_prod *= T.sum(layers[0].W ** 2)\n\n penalty += W_norm_prod\n penalty *= T.sum(nnet.ocsvm_layer.W ** 2)\n\n return penalty", "def test_calc_layer_sparsity():\n test_ndarray = np.array([[0, 2, 0], [1, 0, 1]])\n assert lu.calc_layer_sparsity(test_ndarray) == 3 / 6, 'correct sparsity value'\n\n test_ndarray = np.array([[0, 0, 0], [1, 0, 1]])\n assert abs(lu.calc_layer_sparsity(test_ndarray) - 4 / 6) < 10**-8, 'correct sparsity value'\n assert lu.calc_layer_sparsity(np.zeros((20, 20))) == 1.0, 'zero array should have 1.0 sparsity'\n assert lu.calc_layer_sparsity(\n np.random.rand(20, 20)) == 0.0, 'random array should have 0.0 sparsity'\n assert type(lu.calc_layer_sparsity(np.zeros((10, 10)))) is float, 'return value should be of type float'", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def _learn_node_parameter_w(outputs, inputs=None):\n num_inputs = 0 if inputs is None else inputs.shape[1]\n weights = np.zeros(shape=num_inputs + 1)\n\n \"\"\" YOUR CODE HERE \"\"\"\n # Ax = b, A\n N_observe = outputs.shape[0]\n A = np.zeros(shape = (num_inputs+1, num_inputs+1))\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if i==0 and j==0:\n A[i][j] = N_observe\n elif i==0 and j!=0:\n A[i][j] = np.sum(inputs[:,j-1])\n elif i!=0 and j==0:\n A[i][j] = np.sum(inputs[:,i-1])\n else:\n for k in range(N_observe):\n A[i][j] += inputs[k,i-1]*inputs[k, j-1]\n b = np.zeros(shape=num_inputs + 1)\n for i in range(len(b)):\n if i==0:\n b[i] = np.sum(outputs)\n else:\n for k in range(N_observe):\n b[i] += inputs[k,i-1]*outputs[k]\n\n weights = np.linalg.solve(A, b)\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return weights", "def get_strength(self):\n return 10 - self.get_agility()", "def cost(self) -> float:", "def nSensParams(self):\n sum = 0\n for r in self._reactors:\n sum += r.nSensParams()\n return sum", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def test_sparsity(config):\n total_zeros = 0\n total_nonzeros = 0\n\n print ('<===sparsity type is {}'.format(config.sparsity_type))\n print ('<===layers to be pruned are {}'.format(config._prune_ratios))\n if config.masked_progressive and (config.sparsity_type == 'filter' or config.sparsity_type =='column'or config.sparsity_type == \"bn_filter\" ):\n ### test both column and row sparsity\n print (\"***********checking column sparsity*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print (\"***********checking filter sparsity*************\") \n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print (\"************checking overall sparsity in conv layers*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy() \n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n return\n \n if config.sparsity_type == \"irregular\":\n for name,W in config.model.named_parameters():\n if 'bias' in name:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n total_zeros+=zeros\n nonzeros = np.sum(W!=0)\n total_nonzeros+=nonzeros\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))\n total_weight_number = total_zeros+total_nonzeros\n print ('overal compression rate is {}'.format(total_weight_number/total_nonzeros))\n elif config.sparsity_type == \"column\":\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros)) \n elif config.sparsity_type == \"filter\":\n print ('inside if')\n print (config.prune_ratios)\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n elif config.sparsity_type == \"bn_filter\":\n print ('inside bn_filter')\n print (config.prune_ratios)\n for i,(name,W) in enumerate(config.model.named_parameters()):\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n nonzeros = np.sum(W!=0)\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))", "def get_sparsity(self, exclude=[]):\n nnz = 0 # number of nonzero elements\n nz = 0 # number of zero elements\n for key in self.variables:\n if key in exclude:\n continue\n nnz += amath.sum(self.variables[key] != 0)\n nz += amath.sum(self.variables[key] == 0)\n sparsity = float(nz) / (nnz + nz)\n return sparsity", "def sparsity(model: keras.Model):\n zero = tf.constant(0, dtype=tf.float32)\n model_weight_size = 0\n model_zeros = 0\n sparsity_dict = {}\n\n for layer in model.layers:\n layer_sparsity_dict = {}\n\n for i, weight in enumerate(layer.trainable_weights):\n mask = tf.cast(tf.equal(weight, zero), tf.uint8)\n\n weight_size = tf.size(weight)\n zeros = tf.cast(tf.math.count_nonzero(mask), tf.int32)\n layer_sparsity_dict[weight.name] = zeros / weight_size\n\n model_weight_size += weight_size\n model_zeros += zeros\n\n sparsity_dict[layer.name] = layer_sparsity_dict\n\n model_sparsity = model_zeros / model_weight_size\n\n return model_sparsity, sparsity_dict", "def update_weights_sum(self):\n vals = self.nn.get_param_values()\n # only use the last layer for summation (w, b)\n self.w_sum = np.sum(vals[-2]) + np.sum(vals[-1])", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def get_weights(self):", "def weights(self) -> List[float]:", "def strength(self) -> float:\n ...", "def _learn_node_parameter_var(outputs, weights, inputs):\n var = 0.\n\n \"\"\" YOUR CODE HERE \"\"\"\n temp = 0\n N_observe = outputs.shape[0]\n if inputs is None:\n temp = np.sum((outputs-weights[0])**2)\n else:\n for i in range(N_observe):\n temp += (outputs[i] - (np.sum(weights[1:] * inputs[i]) +weights[0]))**2\n var = temp/N_observe\n\n\n\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return var", "def calculateCost(self,sol,weights):\n\t\treturn sum([x.value*y if x != None else 0 \\\n\t\t\t\t\tfor x,y in zip(sol,weights)])", "def getWeights(self, gameState, action):\n\n return {\n 'successorScore': 1.0\n }" ]
[ "0.67767024", "0.62359196", "0.6125899", "0.60773647", "0.5991481", "0.596333", "0.5899081", "0.58852255", "0.5814007", "0.57617986", "0.5758206", "0.57509667", "0.57433456", "0.57304865", "0.56787086", "0.56762224", "0.5659587", "0.5657149", "0.56555104", "0.5648981", "0.5642741", "0.5632752", "0.56002814", "0.5582769", "0.5571518", "0.55609745", "0.5560528", "0.55443484", "0.5533882", "0.55334055" ]
0.6444512
1
returns the offset to balance the polynomial parameters possible by the bias terms of the network.
def get_bias_offset(nnet): offset = 0 L = len(nnet.trainable_layers) for l in range(L-1): layer = nnet.trainable_layers[l] if layer.b is not None: W_prod = T.eye(int(layer.b.shape.eval()[0])) for k in range(1, L-1): W_prod = T.dot(nnet.trainable_layers[k].W.T, W_prod) offset = offset + T.dot(W_prod, layer.b) offset = T.dot(nnet.ocsvm_layer.W.T, offset) return T.sum(offset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_bias(self) -> JTensor:\n p = self.params\n b = self.local_theta().b\n if p.forget_gate_bias != 0.0:\n b = b + self.get_adjustment()\n\n return b", "def get_bias(self):", "def bias(self):\n return self.mbmod.bias", "def bias(self):\n return self._bias", "def get_bias(self):\n return self._bias", "def coordinate_delta_bias(sum_grad, sum_hess):\n return -sum_grad / sum_hess", "def bias_index(self):\n return _div(abs(self.FN - self.FP), self.grand_total)", "def gradient_bias(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the bias\n # The following line is just a placeholder\n return np.subtract(np.dot(np.transpose(predict(X, model)), np.ones(len(Y))), np.dot(np.transpose(len(Y)), np.ones(10))) #np.zeros(Y.shape[1])", "def bias_(self):\n return self.get_tensor_value('logistic_regression/bias:0')", "def bias(self) -> Optional[str]:\n return pulumi.get(self, \"bias\")", "async def get_focus_offset(self, **kwargs: Any) -> float:\n return 0", "def get_parameters(self):\n if self.add_bias:\n params = np.concatenate((self.bias, self.W), 0)\n else:\n params = self.W\n return params", "def bias(self):\n if self._bias is None:\n with self:\n self._bias = nengo.Node([1], label='cortical bias')\n return self._bias", "def chain_offset(self):\n return self._chain_offset", "def bias_prior(self):", "def wm_offset(self):\n return self.get_par(\"offset\")", "def calc_bias(a,b):\n comb = a + b\n idx = np.array(range(len(a)))[~np.isnan(comb)]\n a1=a[idx]\n b1=b[idx]\n N = len(a1)\n bias = np.sum(a1-b1)/N\n return bias", "def bottom_offset(self):\n raise NotImplementedError", "def get_bprop_pow(self):\n pow_op = P.Pow()\n ln = P.Log()\n\n def bprop(x, power, out, dout):\n bc_dx = power * pow_op(x, power - 1.0) * dout\n bc_dpower = out * ln(x) * dout\n return binop_grad_common(x, power, bc_dx, bc_dpower)\n return bprop", "def __getxyB(x, y):\n\t\treturn x*3+y", "def backward(self, next_layer_weights, next_layer_delta):\n delta = np.dot(next_layer_weights.T, next_layer_delta)\n delta = delta * self.mask * self.activation_derivative(self.z)\n self.delta = delta\n return delta", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def get_bias_for_op(op):\n bias = None\n if op.type in _BIAS_TYPES:\n bias = op.inputs[constants.OP_WEIGHT_INDICES[op.type]]\n return bias", "def getBalanceFactor(self):\n \n return (self._leftSubtreeHeight - self._rightSubtreeHeight)", "def get_panels_pcb_offset():\n mount_hole_offset = arena_assembly['panels_to_hallway_gap']\n mount_hole_offset -= arena_assembly['panels_assembly_offset']\n mount_hole_offset += 0.5*hallway_bottom_plate['width']\n mount_hole_offset += 0.5*panels_pcb['width']\n return mount_hole_offset", "def get_b(self):\n return ((self.b_plus_bstar / self.n_pos) + (self.b_minus_bstar / self.n_neg)) / 2", "def offset_slope(self):\n foc_um_slope = self.focus_slope * self.pix_size\n offset_slope = 0.5 * foc_um_slope / np.tan(self.convergence_angle)\n return offset_slope", "def pbias(self) -> float:\n return float(100.0 * sum(self.predicted - self.true) / sum(self.true))" ]
[ "0.6315012", "0.6108197", "0.58673644", "0.5829149", "0.5699308", "0.5665857", "0.56237257", "0.56060565", "0.5575922", "0.55285054", "0.55088294", "0.54091525", "0.5405784", "0.5395721", "0.5392568", "0.53660536", "0.53445673", "0.5333317", "0.52875805", "0.52750427", "0.52747667", "0.52479863", "0.52479863", "0.52479863", "0.5238909", "0.5233136", "0.5215768", "0.52109665", "0.516689", "0.5132227" ]
0.70303106
0
create a OCSVM loss for network given in argument with rho=1 fixed
def compile_update_ocsvm_rho_fixed(nnet, inputs, targets): floatX = Cfg.floatX C = Cfg.C nu = Cfg.nu if len(nnet.all_layers) > 1: feature_layer = nnet.all_layers[-2] else: feature_layer = nnet.input_layer final_layer = nnet.ocsvm_layer trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) # Regularization Wsvm_penalty = T.sum(abs(final_layer.W) ** Cfg.pow) l2_penalty = get_l2_penalty(nnet, include_bias=Cfg.include_bias, pow=Cfg.pow) l2_penalty += Wsvm_penalty l2_penalty *= (1/C) # Backpropagation prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) scores = T.ones_like(prediction) - prediction objective, train_acc = final_layer.objective(-scores, targets) # OC-SVM loss train_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX') train_obj = T.cast(floatX(0.5) * l2_penalty + train_loss, dtype='floatX') updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [train_obj, train_acc], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) test_scores = T.ones_like(prediction) - test_prediction objective, test_acc = final_layer.objective(-test_scores, targets) # Get network feature representation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) test_ball_penalty = T.cast(0, dtype='floatX') test_l2_output = T.cast(0, dtype='floatX') # OC-SVM test loss test_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX') test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX') test_obj = T.cast(floatX(0.5) * l2_penalty + test_loss, dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, -test_scores, floatX(0.5) * l2_penalty, floatX(0.5) * test_l2_output, test_rep, test_rep_norm, test_loss, floatX(0.5) * test_ball_penalty])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_loss(self):\n\n with tf.name_scope(\"loss\"):\n \n # gini=(tf.nn.l2_loss( self.score))/100000\n gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)\n \n promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),\n axis=1)\n inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value\n inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost\n \n\n\n # determine loss function based on self.obj_rule\n if self.obj_rule == 'cpiv':\n self.objective = inc_cost / inc_value\n\n elif self.obj_rule == 'ivc':\n # maximize ivc\n self.objective = - inc_value / inc_cost\n\n elif self.obj_rule == 'lagrangian':\n assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'\n self.objective = inc_cost - self.shadow * inc_value\n\n elif self.obj_rule == 'value':\n # maximize delta values\n self.objective = - inc_value\n\n # use only cost as objective\n elif self.obj_rule == 'cost':\n # maximize delta cost\n self.objective = - inc_cost\n\n else:\n raise Exception('Invalid obj_rule!')\n\n # regularization\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n # weights = tf.trainable_variables() # all vars of your graph\n # reg_loss = tf.norm( weights,ord=1)\n\n # final loss\n self.loss = self.objective +reg_loss+.1*gini", "def compute_loss(self, obs, returns):", "def loss_creator(config):\n return torch.nn.BCELoss()", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def compile_update_svdd(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n B = Cfg.B\n C = Cfg.C\n nu = Cfg.nu\n\n # initialize R\n if nnet.R_init > 0:\n nnet.Rvar = shared(floatX(nnet.R_init), name=\"R\")\n else:\n nnet.Rvar = shared(floatX(1), name=\"R\") # initialization with R=1\n\n # Loss\n feature_layer = nnet.all_layers[-1]\n rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=False)\n\n # initialize c (0.5 in every feature representation dimension)\n rep_dim = feature_layer.num_units\n # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))),\n # name=\"c\")\n nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name=\"c\")\n\n dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n scores = dist - nnet.Rvar\n stack = T.stack([T.zeros_like(scores), scores], axis=1)\n loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu),\n dtype='floatX')\n\n y_pred = T.argmax(stack, axis=1)\n acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32')\n * 1. / targets.shape[0]), 'floatX')\n\n # Network weight decay\n if Cfg.weight_decay:\n l2_penalty = (1/C) * get_l2_penalty(nnet,\n include_bias=Cfg.include_bias,\n pow=Cfg.pow)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Network activation sparsity regularization\n if Cfg.sparsity_penalty:\n sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=False)\n else:\n sparsity_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation (hard-margin: only minimizing everything to a ball\n # centered at c)\n trainable_params = lasagne.layers.get_all_params(feature_layer,\n trainable=True)\n if Cfg.gaussian_blob:\n avg_dist = T.mean(1-T.exp(-dist), dtype=\"floatX\")\n else:\n avg_dist = T.mean(dist, dtype=\"floatX\")\n obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty,\n dtype='floatX')\n updates_ball = get_updates(nnet, obj_ball, trainable_params,\n solver=nnet.solver)\n nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc],\n updates=updates_ball)\n\n # Backpropagation (without training R)\n obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty,\n dtype='floatX')\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n # Backpropagation (with training R)\n trainable_params.append(nnet.Rvar) # add radius R to trainable parameters\n updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets], [obj, acc],\n updates=updates)\n\n\n # Forwardpropagation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n\n test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2),\n axis=1, dtype='floatX')\n\n test_scores = test_dist - nnet.Rvar\n test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1)\n test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu),\n dtype='floatX')\n\n test_y_pred = T.argmax(test_stack, axis=1)\n test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets),\n dtype='int32')\n * 1. / targets.shape[0]), dtype='floatX')\n\n # Network activation sparsity regularization (with determinisitc=True)\n if Cfg.sparsity_penalty:\n test_sparsity_penalty = ((1 / B) *\n get_sparsity_penalty(nnet, inputs,\n Cfg.sparsity,\n mode=Cfg.sparsity_mode,\n deterministic=True))\n else:\n test_sparsity_penalty = T.cast(0, dtype='floatX')\n\n test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss\n + test_sparsity_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_scores,\n floatX(0.5) * l2_penalty,\n test_sparsity_penalty, test_rep,\n test_rep_norm, test_loss, nnet.Rvar])", "def loss_fun(para):\n\n return -data_processing(my_cir(para))", "def compute_loss(self):", "def loss_fn(self, targets, outputs, model):", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.5, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.tensor4()\n y_batch = T.tensor4()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n print(num_layers)\n\n code_layer = layers[num_layers/2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n # validation cost\n valid_output = lasagne.layers.get_output(network, X_batch)\n valid_cost = lasagne.objectives.mse(valid_output, y_batch) \n valid_cost = valid_cost.mean() \n\n # test the performance of the netowork without noise\n pred = lasagne.layers.get_output(network, X_batch, deterministic=True)\n # pred = T.argmax(test, axis=1)\n accuracy = 1 - T.mean(lasagne.objectives.mse(pred, y_batch), dtype=theano.config.floatX)\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n valid = theano.function(inputs=[X_batch, y_batch], outputs=valid_cost, allow_input_downcast=True)\n predict = theano.function(inputs=[X_batch], outputs=pred, allow_input_downcast=True)\n accuracy = theano.function(inputs=[X_batch,y_batch], outputs=accuracy, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n valid=valid,\n predict=predict,\n accuracy=accuracy,\n code=code\n )", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def model_loss(self,input_real,input_z,out_channel_dim):\t\r\n label_smooth = 0.9 \r\n \r\n #get output of generator\r\n gen_img, gen_logits = self.generator(input_z,out_channel_dim,True)\r\n\r\n\t#pass real image to dicriminator\r\n disc_model_real, disc_logits_real = self.discriminator(input_real)\r\n\t\r\n\t#pass generated image to dicriminator\r\n disc_model_fake, disc_logits_fake = self.discriminator(gen_img,reuse=True)\r\n \r\n\t \t\r\n disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_real,labels=label_smooth*tf.ones_like(disc_model_real))) \r\n disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=tf.zeros_like(disc_model_fake)))\r\n \r\n\r\n\t\"\"\"\r\n\tLoss for discriminator is sum of loss for real image and fake image \r\n\t\"\"\"\t\r\n disc_loss = disc_loss_real + disc_loss_fake\r\n \r\n\r\n \"\"\"\r\n\tTo find loss for generator, fake image is passed with label= real (0.9)\r\n\t\"\"\"\r\n gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_logits_fake,labels=label_smooth*tf.ones_like(disc_model_fake)))\r\n \r\n return disc_loss,gen_loss,gen_img", "def __init__(self, S=7, B=2, C=20): \n super().__init__()\n self.mse = nn.MSELoss(reduction=\"sum\")\n self.S = S\n self.B = B\n self.C = C\n self.l_noobl = 0.5\n self.l_coord = 5", "def geo_loss_interface(pred_odo):\n def geo_loss_det(y_true, y_pred):\n odo_pose = pred_odo[:, 0:3]\n odo_orien = pred_odo[:, 3:]\n geo_pose = 0\n print('In Construction')\n return geo_loss_det", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def loss_fn(outputs, labels):\n #print('this is outputs', outputs.shape) # 2,3,128,128\n #print('this is labels', labels.shape) # 2,3,128,128\n N, C, H, W = outputs.shape\n \n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n mse_loss = torch.sum((outputs - labels) ** 2) / N / C # each photo, each channel\n mse_loss *= 255 * 255\n mse_loss /= H * W \n # average loss on each pixel(0-255)\n return mse_loss", "def __rho2v(self, vm, beta, rhoc, w, rho):\n if rho < 0:\n return float(vm)\n elif rho <= rhoc:\n return float(vm - vm * rho / beta)\n else:\n rhom = rhoc - (vm * rhoc - vm * (rhoc ** 2) / beta) / w\n # print('rho {0}; rhoc {1}'.format(rho, rhoc))\n return float(w * (rho - rhom) / rho)", "def define_loss_variables(n, sns, c):\n if n.df(c).empty or c not in n.passive_branch_components:\n return\n\n active = get_activity_mask(n, c, sns) if n._multi_invest else None\n coords = [sns, n.df(c).index.rename(c)]\n n.model.add_variables(0, coords=coords, name=f\"{c}-loss\", mask=active)", "def loss(L, S, A, R, Y, alpha, beta, gamma, delta):\n return alpha*load_imbalance(Y,L,A) + beta*constraints(Y,A) + gamma*transfers(Y,R,S) + delta*choice(Y)", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def objective(rp,n=5000,C=-2*10**11,a=300,b=1):\n l = log(rp)/n\n r = exp(l)\n rm1 = r-1\n return (rp-1)*((a-b*n)*rm1 + 1) - C*(rm1)*(rm1)\n #return rm1", "def meta_amortized_loss(network, model_indices, params, sim_data):\n\n out_inference, out_evidence = network(model_indices, params, sim_data)\n if out_inference is not None:\n z, log_det_J = out_inference\n kl_loss = tf.reduce_mean(0.5 * tf.square(tf.norm(z, axis=-1)) - log_det_J)\n else:\n kl_loss = 0\n \n if out_evidence is not None:\n model_probs = out_evidence\n model_probs = tf.clip_by_value(model_probs, 1e-15, 1 - 1e-15)\n log_loss = -tf.reduce_mean(tf.reduce_sum(model_indices * tf.math.log(model_probs), axis=1))\n else:\n log_loss = 0\n return kl_loss + log_loss", "def compute_loss(theta_vector, *args):\n\n psi = args[0]\n circ_depth = args[1]\n num_qbits = args[2]\n theta = np.reshape(theta_vector, (circ_depth, num_qbits))\n\n fidelity = get_fidelity(theta, psi)\n loss = get_loss(fidelity)\n return loss", "def loss(self, **kwargs):\n pass", "def loss(posterior, pars_to_penalize, c_rim):\n marginal = posterior.mean(axis=0)\n cond_entropy = misc.cat_entropy(posterior).mean()\n entropy = misc.cat_entropy(marginal.dimshuffle('x', 0)).sum()\n\n nmi = cond_entropy - entropy\n\n n_samples = posterior.shape[0]\n penalties = [(i ** 2).sum() / n_samples for i in pars_to_penalize]\n penalty = sum(penalties)\n\n loss = nmi + c_rim * penalty\n\n return get_named_variables(locals())", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def compile_update_ocsvm(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n A = Cfg.A\n nu = Cfg.nu\n\n if len(nnet.all_layers) > 1:\n feature_layer = nnet.all_layers[-2]\n else:\n feature_layer = nnet.input_layer\n final_layer = nnet.ocsvm_layer\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization (up to feature map)\n if Cfg.weight_decay:\n if Cfg.prod_penalty:\n l2_penalty = (1/C) * get_prod_penalty(nnet)\n elif Cfg.spec_penalty:\n l2_penalty = (1/C) * get_spectral_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = ((1/C) * get_l2_penalty(nnet,\n include_bias=Cfg.include_bias,\n pow=Cfg.pow))\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Bias offset\n if Cfg.bias_offset:\n bias_offset = get_bias_offset(nnet)\n else:\n bias_offset = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n objective, train_acc = final_layer.objective(prediction, targets)\n\n # Normalization\n rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=False)\n rep_norm = rep.norm(L=2, axis=1).dimshuffle((0, 'x'))\n if Cfg.ball_penalty:\n ball_penalty, _ = final_layer.objective(\n T.ones_like(rep_norm) - (rep_norm ** 2), targets)\n else:\n ball_penalty = T.cast(0, dtype='floatX')\n ball_penalty = (1/A) * T.cast(ball_penalty / targets.shape[0],\n dtype='floatX')\n\n # Output regularization\n if Cfg.output_penalty:\n l2_output = (1/C) * (T.sum(abs(final_layer.W) ** Cfg.pow)\n * T.sum(abs(rep) ** 2))\n else:\n l2_output = T.cast(0, dtype='floatX')\n l2_output = T.cast(l2_output / targets.shape[0], dtype='floatX')\n\n # SVM parameter regularization\n if Cfg.Wsvm_penalty:\n Wsvm_penalty = T.sum(abs(final_layer.W) ** Cfg.pow)\n else:\n Wsvm_penalty = T.cast(0, dtype='floatX')\n\n # OC SVM loss has nu parameter and adds margin from origin to objective\n train_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX')\n train_acc = T.cast(train_acc * 1. / targets.shape[0], dtype='floatX')\n train_obj = T.cast(floatX(0.5) * l2_penalty\n + floatX(0.5) * ball_penalty\n + floatX(0.5) * l2_output\n + floatX(0.5) * Wsvm_penalty\n + train_loss\n + T.sum(final_layer.b)\n + bias_offset, dtype='floatX')\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n # get network feature representation\n test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs,\n deterministic=True)\n test_rep_norm = test_rep.norm(L=2, axis=1)\n if Cfg.ball_penalty:\n test_ball_penalty, _ = final_layer.objective(\n T.ones_like(test_rep_norm.dimshuffle((0, 'x')))\n - (test_rep_norm.dimshuffle((0, 'x')) ** 2), targets)\n else:\n test_ball_penalty = T.cast(0, dtype='floatX')\n test_ball_penalty = ((1/A) * T.cast(\n test_ball_penalty / targets.shape[0], dtype='floatX'))\n\n # Output regularization\n if Cfg.output_penalty:\n test_l2_output = (1/C) * (T.sum(abs(final_layer.W) ** Cfg.pow)\n * T.sum(abs(test_rep) ** 2))\n else:\n test_l2_output = T.cast(0, dtype='floatX')\n test_l2_output = T.cast(test_l2_output / targets.shape[0], dtype='floatX')\n\n objective, test_acc = final_layer.objective(test_prediction, targets)\n test_loss = T.cast(objective / (targets.shape[0] * nu), dtype='floatX')\n test_acc = T.cast(test_acc * 1. / targets.shape[0], dtype='floatX')\n test_obj = T.cast(floatX(0.5) * l2_penalty\n + floatX(0.5) * test_ball_penalty\n + floatX(0.5) * test_l2_output\n + floatX(0.5) * Wsvm_penalty\n + test_loss\n + T.sum(final_layer.b), dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_prediction,\n floatX(0.5) * l2_penalty,\n floatX(0.5) * test_l2_output, test_rep,\n test_rep_norm, test_loss,\n floatX(0.5) * test_ball_penalty])", "def compute_loss(\n action_probs: tf.Tensor, values: tf.Tensor, returns: tf.Tensor\n) -> tf.Tensor:\n\n advantage = returns - values\n td = tf.subtract(returns, values)\n\n # actor\n # action_log_probs = tf.math.log(action_probs)\n # actor_loss = -tf.math.reduce_sum(action_log_probs * advantage)\n action_log_probs = tf.math.log(action_probs)\n actor_loss = -tf.math.reduce_mean(action_log_probs * td)\n\n # critic\n # td = tf.subtract(returns, values)\n # critic_loss = tf.reduce_mean(tf.square(td))\n critic_loss = huber_loss(values, returns)\n\n tf.print(\"a_loss:\", actor_loss, \"c_loss:\", critic_loss)\n\n return actor_loss + critic_loss", "def get_loss_fn():\n return reconstruction" ]
[ "0.6449017", "0.6109472", "0.605694", "0.5996417", "0.593703", "0.589075", "0.5883496", "0.58318764", "0.5773389", "0.5742824", "0.5693317", "0.569238", "0.56884587", "0.5637557", "0.56182146", "0.5615014", "0.55934024", "0.55804175", "0.55716777", "0.5567387", "0.5556932", "0.5555117", "0.5550353", "0.5537782", "0.54960036", "0.5486453", "0.5465885", "0.5460442", "0.5458186", "0.54563606" ]
0.6604869
0
create a SVDD loss for network given in argument
def compile_update_svdd(nnet, inputs, targets): floatX = Cfg.floatX B = Cfg.B C = Cfg.C nu = Cfg.nu # initialize R if nnet.R_init > 0: nnet.Rvar = shared(floatX(nnet.R_init), name="R") else: nnet.Rvar = shared(floatX(1), name="R") # initialization with R=1 # Loss feature_layer = nnet.all_layers[-1] rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=False) # initialize c (0.5 in every feature representation dimension) rep_dim = feature_layer.num_units # nnet.cvar = shared(floatX(np.ones(rep_dim) * (1. / (rep_dim ** 0.5))), # name="c") nnet.cvar = shared(floatX(np.ones(rep_dim) * 0.5), name="c") dist = T.sum(((rep - nnet.cvar.dimshuffle('x', 0)) ** 2), axis=1, dtype='floatX') scores = dist - nnet.Rvar stack = T.stack([T.zeros_like(scores), scores], axis=1) loss = T.cast(T.sum(T.max(stack, axis=1)) / (inputs.shape[0] * nu), dtype='floatX') y_pred = T.argmax(stack, axis=1) acc = T.cast((T.sum(T.eq(y_pred.flatten(), targets), dtype='int32') * 1. / targets.shape[0]), 'floatX') # Network weight decay if Cfg.weight_decay: l2_penalty = (1/C) * get_l2_penalty(nnet, include_bias=Cfg.include_bias, pow=Cfg.pow) else: l2_penalty = T.cast(0, dtype='floatX') # Network activation sparsity regularization if Cfg.sparsity_penalty: sparsity_penalty = (1/B) * get_sparsity_penalty(nnet, inputs, Cfg.sparsity, mode=Cfg.sparsity_mode, deterministic=False) else: sparsity_penalty = T.cast(0, dtype='floatX') # Backpropagation (hard-margin: only minimizing everything to a ball # centered at c) trainable_params = lasagne.layers.get_all_params(feature_layer, trainable=True) if Cfg.gaussian_blob: avg_dist = T.mean(1-T.exp(-dist), dtype="floatX") else: avg_dist = T.mean(dist, dtype="floatX") obj_ball = T.cast(floatX(0.5) * l2_penalty + avg_dist + sparsity_penalty, dtype='floatX') updates_ball = get_updates(nnet, obj_ball, trainable_params, solver=nnet.solver) nnet.backprop_ball = theano.function([inputs, targets], [obj_ball, acc], updates=updates_ball) # Backpropagation (without training R) obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + loss + sparsity_penalty, dtype='floatX') updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver) nnet.backprop_without_R = theano.function([inputs, targets], [obj, acc], updates=updates) # Backpropagation (with training R) trainable_params.append(nnet.Rvar) # add radius R to trainable parameters updates = get_updates(nnet, obj, trainable_params, solver=nnet.solver) nnet.backprop = theano.function([inputs, targets], [obj, acc], updates=updates) # Forwardpropagation test_rep = lasagne.layers.get_output(feature_layer, inputs=inputs, deterministic=True) test_rep_norm = test_rep.norm(L=2, axis=1) test_dist = T.sum(((test_rep - nnet.cvar.dimshuffle('x', 0)) ** 2), axis=1, dtype='floatX') test_scores = test_dist - nnet.Rvar test_stack = T.stack([T.zeros_like(test_scores), test_scores], axis=1) test_loss = T.cast(T.sum(T.max(test_stack, axis=1)) / (inputs.shape[0]*nu), dtype='floatX') test_y_pred = T.argmax(test_stack, axis=1) test_acc = T.cast((T.sum(T.eq(test_y_pred.flatten(), targets), dtype='int32') * 1. / targets.shape[0]), dtype='floatX') # Network activation sparsity regularization (with determinisitc=True) if Cfg.sparsity_penalty: test_sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.sparsity, mode=Cfg.sparsity_mode, deterministic=True)) else: test_sparsity_penalty = T.cast(0, dtype='floatX') test_obj = T.cast(floatX(0.5) * l2_penalty + nnet.Rvar + test_loss + test_sparsity_penalty, dtype='floatX') nnet.forward = theano.function([inputs, targets], [test_obj, test_acc, test_scores, floatX(0.5) * l2_penalty, test_sparsity_penalty, test_rep, test_rep_norm, test_loss, nnet.Rvar])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tv_loss(x, name='tv_loss'):\n raise NotImplementedError(\"Please use tensorflow total_variation loss.\")", "def loss_fn(self, targets, outputs, model):", "def tv_loss(input: th.Tensor):\n input = tf.pad(input, (0, 1, 0, 1), \"replicate\")\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])", "def svm_loss(x, y):\n x = np.squeeze(x)\n N = x.shape[0]\n yt = y\n yt[y==0]=-1\n tmp = 1-yt*x\n mask = np.ones_like(tmp)\n mask[tmp<=0] = 0\n tmp = tmp*mask\n loss = np.sum(tmp)/N\n \n dx = -yt*mask/N\n # dx = np.reshape(dx,[dx.shape[0],1])\n return loss, dx", "def ss_loss_(self, batch):\n raise NotImplementedError", "def demo():\n def load_data():\n train = open(\"csv/svd_train.csv\", \"r\")\n r = csv.reader(train)\n next(r)\n\n data = []\n target = []\n\n print \"Prepping data...\"\n for row in r:\n aux = [0 for x in xrange(10)]\n aux[int(row[0])] = 1\n target.append(aux)\n data.append([float(x) for x in row[1:]])\n\n train.close()\n\n data = np.array(data)\n\n target = np.array(target)\n\n #train = [target[:35000],data[:35000]]\n #test = [target[35000:],data[35000:]]\n\n return [target, data]\n\n NN = MLP_NeuralNetwork(101, 75, 35, 10,\n iterations = 200,\n learning_rate = 0.5,\n momentum = 0.05,\n rate_decay = 0.005)\n\n train = load_data()\n\n NN.train(train)\n #NN.test_cross(test)\n #NN.test()\n NN.test_against()", "def compute_loss(self):", "def loss(self):\n return 'mse'", "def loss(self, **kwargs):\n pass", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def svm_loss(x, y):\n N = x.shape[0]\n x = np.squeeze(x)\n loss = np.sum(((1-x*y)>0)*(1-x*y))/N\n dx = ((1-x*y)>0)*(-y)/N\n return loss, dx", "def snn(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 20)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('patience', 10000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(None, 28 * 28), input_var=input_var, name='Input')\n # Dropout Layer\n l_dro1 = AlphaDropout(incoming=l_in, corruption_level=0.2, name='Dropout 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_dro1, n_units=500, W=selu_normal,\n activation=selu, name='Hidden layer 1')\n # Dropout Layer\n l_dro2 = AlphaDropout(incoming=l_hid1, corruption_level=0.1, name='Dropout 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_dro2, n_units=500, W=selu_normal,\n activation=selu, name='Hidden layer 2')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_hid2, n_class=10, name='Logistic regression')\n\n # Create network and add layers\n net = Network('dropout')\n net.add(l_in)\n net.add(l_dro1)\n net.add(l_hid1)\n net.add(l_dro2)\n net.add(l_hid2)\n net.add(l_out)\n\n return net, hp", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def vae_loss_function_factory(reduction='mean'):\n def vae_loss_function(outputs, targets, mean, std_dev):\n outputs_flat = outputs.view(-1, 28 * 28)\n targets_flat = targets.view(-1, 28 * 28)\n if reduction == 'mean':\n image_loss = torch.mean((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.mean((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'sum':\n image_loss = torch.sum((outputs_flat - targets_flat).pow(2).sum(dim=1))\n latent_loss = -0.5 * torch.sum((1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1))\n elif reduction == 'none':\n image_loss = (outputs_flat - targets_flat).pow(2).sum(dim=1)\n latent_loss = -0.5 * (1 + 2 * std_dev - mean.pow(2) - torch.exp(2 * std_dev)).sum(dim=1)\n else:\n raise NotImplementedError('Reduction ' + reduction + ' not implemented.')\n return image_loss + latent_loss\n return vae_loss_function", "def tv_loss(img, tv_weight):\n # Your implementation should be vectorized and not require any loops!\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "def loss_fn(outputs, labels):\n #print('this is outputs', outputs.shape) # 2,3,128,128\n #print('this is labels', labels.shape) # 2,3,128,128\n N, C, H, W = outputs.shape\n \n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n mse_loss = torch.sum((outputs - labels) ** 2) / N / C # each photo, each channel\n mse_loss *= 255 * 255\n mse_loss /= H * W \n # average loss on each pixel(0-255)\n return mse_loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n y_pred = self.run(xs)\n return nn.SoftmaxLoss(y_pred,y)", "def compute_loss(self, obs, returns):", "def wasserstein_d_update(loss, optimizer, clipping_value, var_list=None, name='d_update'):\n # gradients, var_list = zip(*optimizer.compute_gradients(loss, var_list=var_list))\n # optimizer.apply_gradients(zip(gradients, var_list), name=name)\n # return clip_discriminator_var_op\n\n opt_op = optimizer.minimize(loss, var_list=var_list, name=name)\n with tf.control_dependencies([opt_op]):\n clip_discriminator_var_op = [\n var.assign(tf.clip_by_value(var, -clipping_value, clipping_value))\n for var in var_list]\n return clip_discriminator_var_op", "def loss_(self, batch):\n raise NotImplementedError", "def loss(A, Y):\n return A - Y", "def heteroscedastic_loss(network, params, x):\n\n pred_mean, pred_var = network(x)\n logvar = tf.reduce_sum(0.5 * tf.math.log(pred_var), axis=-1)\n squared_error = tf.reduce_sum(0.5 * tf.math.square(params - pred_mean) / pred_var, axis=-1)\n loss = tf.reduce_mean(squared_error + logvar)\n return loss", "def dloss(self, output, labels):\n return 2*(output - labels)/labels.shape[1]", "def svm_loss(x, y):\n\n x = x.reshape((-1,1))\n y = y.reshape((-1,1))\n N,_ = x.shape\n \n y_p = np.where(y == 1,1,-1)\n \n losses = np.maximum(0,1-(x*y_p))\n loss = np.sum(losses)/N\n dx = np.where(losses > 0, 1, 0)*(-y_p)/N\n dx = dx.reshape((-1,))\n\n return loss, dx", "def make_loss(self, logit=None, labels=None):\r\n return nn.functional.mse_loss(logit, labels, reduction='mean') # The MSE Loss\r", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def loss_fn(input_d, reconstructed, mean, logvar, beta=1, batch_size=1, input_size=1):\n\n # mse_criterion = nn.MSELoss() # reduction=sum ?\n # mse_loss = mse_criterion(input_d, reconstructed)\n\n # bce_criterion = nn.BCELoss(size_average=False) # reduction=sum ?\n bce_criterion = nn.BCELoss() # reduction=sum ?\n bce_loss = bce_criterion(input_d, reconstructed)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n\n # for gaussian distribution when\n # generated data passed to the encorder is z~ N(0,1) and generated data is x~N(m,var)\n\n kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())\n\n normalized_kl_loss = kl_loss / (batch_size * input_size)\n scaled_kl_loss = beta*normalized_kl_loss\n # scaled_kl_loss = beta*kl_loss\n\n # return bce_loss + kl_loss, bce_loss, kl_loss\n return bce_loss + scaled_kl_loss, bce_loss, normalized_kl_loss\n # return mse_loss + scaled_kl_loss, mse_loss, kl_loss", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def get_loss_fn():\n return reconstruction", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)" ]
[ "0.63613164", "0.6158965", "0.6017301", "0.5997169", "0.590424", "0.5783352", "0.57639736", "0.5728302", "0.5715514", "0.5708102", "0.5672599", "0.56407714", "0.5635773", "0.5630035", "0.56168777", "0.5610483", "0.5603815", "0.55986285", "0.5573956", "0.5568347", "0.55591923", "0.5530415", "0.5523093", "0.55166364", "0.55079204", "0.5489856", "0.5487464", "0.54823416", "0.5481739", "0.5477149" ]
0.6577575
0
create autoencoder Theano update for network given in argument
def create_autoencoder(nnet): floatX = Cfg.floatX B = Cfg.ae_B C = Cfg.ae_C ndim = nnet.data._X_train.ndim if ndim == 2: inputs = T.matrix('inputs') elif ndim == 4: inputs = T.tensor4('inputs') final_layer = nnet.all_layers[-1] # Backpropagation trainable_params = lasagne.layers.get_all_params(final_layer, trainable=True) prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=False) # use l2 or binary crossentropy loss (features are scaled to [0,1]) if Cfg.ae_loss == "l2": loss = lasagne.objectives.squared_error(prediction, inputs) if Cfg.ae_loss == "ce": loss = lasagne.objectives.binary_crossentropy(prediction, inputs) scores = T.sum(loss, axis=range(1, ndim), dtype='floatX') loss = T.mean(scores) # Regularization if Cfg.ae_weight_decay: l2_penalty = (floatX(0.5) / C) * regularize_network_params(final_layer, l2) else: l2_penalty = T.cast(0, dtype='floatX') # Network activation sparsity regularization if Cfg.ae_sparsity_penalty: sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.ae_sparsity, mode=Cfg.ae_sparsity_mode, deterministic=False)) else: sparsity_penalty = T.cast(0, dtype='floatX') train_obj = loss + l2_penalty + sparsity_penalty updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.ae_solver) nnet.ae_backprop = theano.function([inputs], [loss, l2_penalty, sparsity_penalty, scores], updates=updates) # Forwardpropagation test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs, deterministic=True) # use l2 or binary crossentropy loss (features are scaled to [0,1]) if Cfg.ae_loss == "l2": test_loss = lasagne.objectives.squared_error(test_prediction, inputs) if Cfg.ae_loss == "ce": test_loss = lasagne.objectives.binary_crossentropy(test_prediction, inputs) test_scores = T.sum(test_loss, axis=range(1, ndim), dtype='floatX') test_loss = T.mean(test_scores) # Network activation sparsity regularization (with determinisitc=True) if Cfg.ae_sparsity_penalty: test_sparsity_penalty = ((1 / B) * get_sparsity_penalty(nnet, inputs, Cfg.ae_sparsity, mode=Cfg.ae_sparsity_mode, deterministic=True)) else: test_sparsity_penalty = T.cast(0, dtype='floatX') nnet.ae_forward = theano.function([inputs], [test_loss, l2_penalty, test_sparsity_penalty, test_scores, test_prediction])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_full_conv_autoencoder():\n input_img = Input(shape=(84, 84, 3))\n\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c3')(x)\n encoded = MaxPooling2D((3, 3), border_mode='same')(x)\n\n x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='c4')(encoded)\n x = UpSampling2D((3, 3))(x)\n x = Convolution2D(32, 4, 4, activation='relu', border_mode='same', name='c5')(x)\n x = UpSampling2D((2, 2))(x)\n x = Convolution2D(48, 8, 8, activation='relu', border_mode='same', name='c6')(x)\n x = UpSampling2D((2, 2))(x)\n decoded = Convolution2D(3, 4, 4, activation='sigmoid', border_mode='same', name='c7')(x)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n n_stacks = len(dims) - 1\n # input\n input_img = Input(shape=(dims[0],), name='input')\n x = input_img\n # internal layers in encoder\n for i in range(n_stacks-1):\n x = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(x)\n\n # hidden layer\n encoded = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(x) # hidden layer, features are extracted from here\n\n x = encoded\n # internal layers in decoder\n for i in range(n_stacks-1, 0, -1):\n x = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(x)\n\n # output\n x = Dense(dims[0], kernel_initializer=init, name='decoder_0')(x)\n decoded = x\n return Model(inputs=input_img, outputs=decoded, name='AE'), Model(inputs=input_img, outputs=encoded, name='encoder')", "def autoencoder_model(optimizer, learning_rate, \n filter_block1, kernel_size_block1, \n filter_block2, kernel_size_block2, \n filter_block3, kernel_size_block3, \n filter_block4, kernel_size_block4, \n activation_str, padding):\n # Input Tensors - fully conv\n input_img = Input(shape=(None, None, 1))\n # Encoder Part\n x = Conv2D(filters=filter_block1, kernel_size=kernel_size_block1, padding=padding)(input_img) # 420x540x32\n x = Activation('relu')(x)\n x = MaxPooling2D()(x) # 210x270x32\n encoded = Conv2D(filters=filter_block2, kernel_size=kernel_size_block2, padding=padding)(x) # 105x135x32\n # Decoder Part\n x = Conv2D(filters=filter_block3, kernel_size=kernel_size_block3, padding=padding)(encoded) # 210x270x32\n x = Activation('relu')(x)\n x = UpSampling2D()(x) # 420x540x32\n decoded = Conv2D(filters=filter_block4, kernel_size=kernel_size_block4, activation='sigmoid', padding=padding)(x) # 420x540x1\n\n # Build the model\n autoencoder = Model(inputs=input_img, outputs=decoded)\n opt = optimizer(learning_rate=learning_rate)\n autoencoder.compile(loss=\"binary_crossentropy\", optimizer=opt)\n autoencoder.summary()\n return autoencoder", "def autoencoder(X, inp_dims=2048):\n drop = tf.keras.layers.Dropout(rate=0.2)\n FC1 = tf.layers.Dense(units=inp_dims // 2, activation=\"tanh\", name='fc1')\n FC2 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc2')\n FC3 = tf.layers.Dense(units=inp_dims // 8, activation=None, name='fc3')\n Act = tf.keras.layers.Activation(activation=\"tanh\")\n # FC4 = tf.layers.Dense(units=inp_dims // 8,activation=\"tanh\",name='fc4')\n FC5 = tf.layers.Dense(units=inp_dims // 4, activation=\"tanh\", name='fc5')\n FC6 = tf.layers.Dense(units=inp_dims // 2, activation=None, name='fc6')\n FC7 = tf.layers.Dense(units=inp_dims, activation=None, name='fc7')\n X = FC1(drop(X))\n X = FC2(drop(X))\n X = FC3(X)\n fea = X\n X_up = Act(X)\n X_up = FC5(X_up)\n X_up = FC6(drop(X_up))\n pred = FC7(drop(X_up))\n return pred, fea", "def autoencoder(dims, act='relu', init='glorot_uniform'):\n x = tf.keras.layers.Input(shape=(1,), dtype=tf.string)\n\n h = tf.keras.layers.Lambda(UniversalEmbedding, output_shape=(512,))(x)\n\n return Model(inputs=x, outputs=h, name='encoder')", "def create_update(nnet):\n\n if nnet.data._X_val.ndim == 2:\n inputs = T.matrix('inputs')\n elif nnet.data._X_val.ndim == 4:\n inputs = T.tensor4('inputs')\n\n targets = T.ivector('targets')\n\n # compile theano functions\n if Cfg.softmax_loss:\n compile_update_softmax(nnet, inputs, targets)\n elif Cfg.ocsvm_loss:\n if Cfg.rho_fixed:\n compile_update_ocsvm_rho_fixed(nnet, inputs, targets)\n else:\n compile_update_ocsvm(nnet, inputs, targets)\n elif Cfg.svdd_loss:\n compile_update_svdd(nnet, inputs, targets)\n elif Cfg.reconstruction_loss:\n create_autoencoder(nnet)\n else:\n compile_update_default(nnet, inputs, targets)", "def build_autoencoder(self):\n # first build the encoder model\n inputs = Input(shape=(self.state_dim, ), name='state')\n feature_size = 32\n x = Dense(256, activation='relu')(inputs)\n x = Dense(128, activation='relu')(x)\n feature = Dense(feature_size, name='feature_vector')(x)\n\n # instantiate encoder model\n self.encoder = Model(inputs, feature, name='encoder')\n self.encoder.summary()\n plot_model(self.encoder,\n to_file='encoder.png', \n show_shapes=True)\n\n # build the decoder model\n feature_inputs = Input(shape=(feature_size,), \n name='decoder_input')\n x = Dense(128, activation='relu')(feature_inputs)\n x = Dense(256, activation='relu')(x)\n outputs = Dense(self.state_dim, activation='linear')(x)\n\n # instantiate decoder model\n self.decoder = Model(feature_inputs, \n outputs, \n name='decoder')\n self.decoder.summary()\n plot_model(self.decoder, \n to_file='decoder.png', \n show_shapes=True)\n\n # autoencoder = encoder + decoder\n # instantiate autoencoder model\n self.autoencoder = Model(inputs, \n self.decoder(self.encoder(inputs)),\n name='autoencoder')\n self.autoencoder.summary()\n plot_model(self.autoencoder, \n to_file='autoencoder.png', \n show_shapes=True)\n\n # Mean Square Error (MSE) loss function, Adam optimizer\n self.autoencoder.compile(loss='mse', optimizer='adam')", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder", "def autoencoder(dimensions=[784, 512, 256, 64]):\n # %% input to the network\n x = tf.placeholder(tf.float32, [None, dimensions[0]], name='x')\n current_input = x\n\n # %% Build the encoder\n encoder = []\n for layer_i, n_output in enumerate(dimensions[1:]):\n n_input = int(current_input.get_shape()[1])\n W = tf.Variable(\n tf.random_uniform([n_input, n_output],\n -1.0 / math.sqrt(n_input),\n 1.0 / math.sqrt(n_input)))\n b = tf.Variable(tf.zeros([n_output]))\n encoder.append(W)\n output = tf.nn.tanh(tf.matmul(current_input, W) + b)\n current_input = output\n\n # Latent representation (embedding, neural coding)\n z = current_input\n encoder.reverse()\n\n # Build the decoder using the same weights\n for layer_i, n_output in enumerate(dimensions[:-1][::-1]):\n W = tf.transpose(encoder[layer_i])\n b = tf.Variable(tf.zeros([n_output]))\n output = tf.nn.tanh(tf.matmul(current_input, W) + b)\n current_input = output\n\n # Now have the reconstruction through the network\n y = current_input\n\n # Cost function measures pixel-wise difference\n cost = tf.reduce_sum(tf.square(y - x))\n return {'x': x, 'z': z, 'y': y, 'cost': cost}", "def autoencoder3(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def _define_encoder(self):\n self.encoder = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, padding=1), # B, 32, 32, 32\n nn.SELU(),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.SELU(),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.SELU(),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.SELU(),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.SELU(),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, self.encoding_shape * 2), # B, z_dim*2\n )", "def build_conv_combo_autoencoder():\n input_img = Input(shape=(84, 84, 1))\n\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c1')(input_img)\n x = MaxPooling2D((2, 2), border_mode='same')(x)\n x = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c2')(x)\n x = MaxPooling2D((3, 3), border_mode='same')(x)\n x = Flatten()(x)\n encoded = Dense(512, activation='relu')(x)\n\n encoded_input = Input((512,))\n d1 = Dense(9408, activation='relu')(encoded_input)\n d2 = Reshape((14, 14, 48))(d1)\n d3 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c5')(d2)\n d4 = UpSampling2D((3, 3))(d3)\n d5 = Convolution2D(48, 4, 4, activation='relu', border_mode='same', name='c6')(d4)\n d6 = UpSampling2D((2, 2))(d5)\n decoded = Convolution2D(1, 4, 4, activation='relu', border_mode='same', name='c9')(d6)\n\n encoder = Model(input=input_img, output=encoded, name='conv_encoder')\n decoder = Model(input=encoded_input, output=decoded, name='conv_decoder')\n\n autoencoder = Sequential(name='full_conv_autoencoder')\n autoencoder.add(encoder)\n autoencoder.add(decoder)\n\n encoder.compile(optimizer='adam', loss='mse')\n encoder.summary()\n decoder.compile(optimizer='adam', loss='mse')\n decoder.summary()\n autoencoder.compile(optimizer='adam', metrics=['mse'], loss='mse')\n autoencoder.summary()\n return autoencoder, encoder, decoder", "def SemiAutoencoder(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def train_autoencoder(data, n_iters=10, batch_size=100):\n tqdm.write(f'Training a fully-convolutional autoencoder for {n_iters} iterations.')\n (trainx, trainy), (valx, valy), (testx, testy) = data\n train_size, val_size, test_size = trainx.shape[0], valx.shape[0], testx.shape[0]\n train_batches = (train_size - 1) // batch_size + 1\n val_batches = (val_size - 1) // batch_size + 1\n test_batches = (test_size - 1) // batch_size + 1\n\n model = Network()\n model.add_layer(ConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(ConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(ConvLayer(15, (1, 1), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(10, (1, 1), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(10, (2, 2), (2, 2), 1)) \\\n .add_layer(TransposedConvLayer(1, (2, 2), (2, 2), 1)) \\\n .add_layer(SSELayer())\n for i in range(1, n_iters + 1):\n train_order = np.random.permutation(train_size)\n bar = trange(train_batches, file=sys.stdout)\n for j in bar:\n cost = model.forward(trainx[train_order[j * batch_size: (j + 1) * batch_size]],\n trainx[train_order[j * batch_size: (j + 1) * batch_size]])\n bar.set_description(f'Curr squared error: {cost}')\n model.backward()\n model.adam_trainstep()\n errors = []\n for j in range(val_batches):\n errors.append(model.forward(valx[j * batch_size:(j + 1) * batch_size],\n valx[j * batch_size:(j + 1) * batch_size]))\n tqdm.write(f'Validation squared error: {np.mean(errors)}')\n tqdm.write('-------------------------------------------------------')\n\n errors = []\n for i in range(test_batches):\n errors.append(model.forward(testx[i * batch_size:(i + 1) * batch_size],\n testx[i * batch_size:(i + 1) * batch_size]))\n tqdm.write(f'Test squared error: {np.mean(errors)}')\n tqdm.write('-------------------------------------------------------')", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def encoder_decoder_archi_gan(inputs, is_train):\n\n encoder_layers = []\n\n encoded = inputs\n\n encoder_layers.append(encoded)\n\n for i in range(config.encoder_layers):\n encoded = encoder_conv_block_gan(encoded, i, is_train)\n encoder_layers.append(encoded)\n \n encoder_layers.reverse()\n\n\n\n decoded = encoder_layers[0]\n\n for i in range(config.encoder_layers):\n decoded = decoder_conv_block_gan(decoded, encoder_layers[i+1], i, is_train)\n\n return decoded", "def run_autoencoder2(experiment,\n X_train, y_train, X_valid, y_valid, X_test, y_test,\n model_path, prev_model_path,\n code_size=600, prev_code_size=1000):\n\n if os.path.isfile(model_path) or \\\n os.path.isfile(model_path + \".meta\"):\n return\n\n # Convert training, validation and test set to the new representation\n prev_model = ae(X_train.shape[1], prev_code_size,\n corruption=0.0, # Disable corruption for conversion\n enc=tf.nn.tanh, dec=None)\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n saver = tf.train.Saver(prev_model[\"params\"], write_version=tf.train.SaverDef.V2)\n if os.path.isfile(prev_model_path):\n saver.restore(sess, prev_model_path)\n X_train = sess.run(prev_model[\"encode\"], feed_dict={prev_model[\"input\"]: X_train})\n X_valid = sess.run(prev_model[\"encode\"], feed_dict={prev_model[\"input\"]: X_valid})\n X_test = sess.run(prev_model[\"encode\"], feed_dict={prev_model[\"input\"]: X_test})\n del prev_model\n\n reset()\n\n # Hyperparameters\n learning_rate = 0.002\n corruption = 0.68\n ae_enc = tf.nn.tanh\n ae_dec = None\n\n training_iters = 100\n batch_size = 50\n n_classes = 2\n\n # Load model\n model = ae(prev_code_size, code_size, corruption=corruption, enc=ae_enc, dec=ae_dec)\n\n # Use GD for optimization of model cost\n optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.9).minimize(model[\"cost\"])\n\n # Initialize Tensorflow session\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n\n # Define model saver\n saver = tf.train.Saver(model[\"params\"], write_version=tf.train.SaverDef.V2)\n\n # Initialize with an absurd cost for model selection\n prev_costs = np.array([9999999999] * 3)\n\n # Iterate Epochs\n for epoch in range(training_iters):\n\n # randomly shuffle data\n index = np.arange(X_train.shape[0])\n random.shuffle(index)\n\n X_train = X_train[index,]\n y_train = y_train[index]\n\n # Break training set into batches\n batches = range(len(X_train) // batch_size)\n costs = np.zeros((len(batches), 3))\n\n for ib in batches:\n # Compute start and end of batch from training set data array\n from_i = ib * batch_size\n to_i = (ib + 1) * batch_size\n\n # Select current batch\n batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]\n\n # Run optimization and retrieve training cost\n _, cost_train = sess.run(\n [optimizer, model[\"cost\"]],\n feed_dict={\n model[\"input\"]: batch_xs\n }\n )\n\n # Compute validation cost\n cost_valid = sess.run(\n model[\"cost\"],\n feed_dict={\n model[\"input\"]: X_valid\n }\n )\n\n # Compute test cost\n cost_test = sess.run(\n model[\"cost\"],\n feed_dict={\n model[\"input\"]: X_test\n }\n )\n\n costs[ib] = [cost_train, cost_valid, cost_test]\n\n # Compute the average costs from all batches\n costs = costs.mean(axis=0)\n cost_train, cost_valid, cost_test = costs\n\n # Pretty print training info\n print(format_config(\n \"Exp={experiment}, Model=ae2, Iter={epoch:5d}, Cost={cost_train:.6f} {cost_valid:.6f} {cost_test:.6f}\",\n {\n \"experiment\": experiment,\n \"epoch\": epoch,\n \"cost_train\": cost_train,\n \"cost_valid\": cost_valid,\n \"cost_test\": cost_test,\n }\n ))\n\n # Save better model if optimization achieves a lower cost\n if cost_valid < prev_costs[1]:\n print(\"Saving better model\")\n saver.save(sess, model_path)\n prev_costs = costs\n else:\n print", "def deep_autoencoder(X_train_input, X_test_input, encoding_dim = 20):\r\n input_dim = X_train_input.shape[1]\r\n \r\n autoencoder = Sequential()\r\n \r\n # Encoder Layers\r\n autoencoder.add(Dense(4 * encoding_dim, input_shape=(input_dim,), activation='relu'))\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(encoding_dim, activation='relu'))\r\n \r\n # Decoder Layers\r\n autoencoder.add(Dense(2 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(4 * encoding_dim, activation='relu'))\r\n autoencoder.add(Dense(input_dim, activation='sigmoid'))\r\n \r\n autoencoder.compile(optimizer='adam', loss='binary_crossentropy')\r\n autoencoder.fit(X_train_input, X_train_input,\r\n epochs=50,\r\n batch_size=256,\r\n validation_data=(X_test_input, X_test_input))\r\n \r\n input_img = Input(shape=(input_dim,))\r\n encoder_layer1 = autoencoder.layers[0]\r\n encoder_layer2 = autoencoder.layers[1]\r\n encoder_layer3 = autoencoder.layers[2]\r\n encoder = Model(input_img, encoder_layer3(encoder_layer2(encoder_layer1(input_img))))\r\n \r\n X_train_output = encoder.predict(X_train_input)\r\n X_test_output = encoder.predict(X_test_input)\r\n \r\n return X_train_output, X_test_output", "def compile_update_softmax(nnet, inputs, targets):\n\n floatX = Cfg.floatX\n C = Cfg.C\n\n final_layer = nnet.all_layers[-1]\n trainable_params = lasagne.layers.get_all_params(final_layer,\n trainable=True)\n\n # Regularization\n if Cfg.weight_decay:\n l2_penalty = (floatX(0.5) / C) * get_l2_penalty(nnet, Cfg.include_bias)\n else:\n l2_penalty = T.cast(0, dtype='floatX')\n\n # Backpropagation\n prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=False)\n if Cfg.ad_experiment:\n train_loss = T.mean(l_objectives.binary_crossentropy(\n prediction.flatten(), targets),\n dtype='floatX')\n train_acc = T.mean(l_objectives.binary_accuracy(prediction.flatten(),\n targets),\n dtype='floatX')\n else:\n train_loss = T.mean(l_objectives.categorical_crossentropy(prediction,\n targets),\n dtype='floatX')\n train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), targets),\n dtype='floatX')\n\n\n train_obj = T.cast(train_loss + l2_penalty, dtype='floatX')\n updates = get_updates(nnet, train_obj, trainable_params, solver=nnet.solver)\n nnet.backprop = theano.function([inputs, targets],\n [train_obj, train_acc],\n updates=updates)\n\n # Forwardpropagation\n test_prediction = lasagne.layers.get_output(final_layer, inputs=inputs,\n deterministic=True)\n if Cfg.ad_experiment:\n test_loss = T.mean(l_objectives.binary_crossentropy(\n test_prediction.flatten(), targets), dtype='floatX')\n test_acc = T.mean(l_objectives.binary_accuracy(\n test_prediction.flatten(), targets), dtype='floatX')\n else:\n test_loss = T.mean(l_objectives.categorical_crossentropy(\n test_prediction, targets), dtype='floatX')\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), targets),\n dtype='floatX')\n test_obj = T.cast(test_loss + l2_penalty, dtype='floatX')\n nnet.forward = theano.function([inputs, targets],\n [test_obj, test_acc, test_prediction,\n l2_penalty, test_loss])", "def auto_encoder(data: np.ndarray) -> np.ndarray:\n input_img = Input(shape=(784,))\n encoded = Dense(128, activation='relu')(input_img)\n encoded = Dense(64, activation='relu')(encoded)\n encoded = Dense(32, activation='relu')(encoded)\n\n decoded = Dense(64, activation='relu')(encoded)\n decoded = Dense(128, activation='relu')(decoded)\n decoded = Dense(784, activation='sigmoid')(decoded)\n\n autoencoder = Model(input_img, decoded)\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.fit(x_train, x_train,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test, x_test))", "def define_reparameterization_network(self) -> None:\n self.input_tokens = nn.Parameter(torch.arange(self.prefix_token_num).long(), requires_grad=False) # to allow automatic devicing\n self.wte = nn.Embedding(self.prefix_token_num, self.embed_dim)\n self.control_trans = nn.Sequential(\n nn.Linear(self.embed_dim, self.mid_dim),\n nn.Tanh(),\n nn.Linear(self.mid_dim, self.total_parameters_num//self.prefix_token_num)\n )", "def encoder(enc_input, attn_bias, n_layer, n_head,\n d_key, d_value, d_model, d_inner_hid, pos_enc,\n preporstprocess_dropout, attention_dropout,\n relu_dropout, preprocess_cmd='n',\n postprocess_cmd='da'):\n for i in range(n_layer):\n enc_output = encoder_layer(enc_input, attn_bias, n_head,\n d_key, d_value, d_model,d_inner_hid, pos_enc,\n prepostprocess_dropout, attention_dropout,relu_dropout,\n preprocess_cmd, postprocess_cmd\n )\n enc_input = enc_output\n enc_output = pre_process_layer(enc_output,\n preprocess_cmd, preporstprocess_dropout)\n return enc_output", "def __init__(self, n_inpt, n_hidden, hidden_transfer='identity',\n out_transfer='identity', loss='squared', tied_weights=True,\n batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(AutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer, loss, tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def autoencoder(self, data):\n with tf.variable_scope(\"autoencoder\"):\n latent = self.encoder(data)\n _, output = self.decoder(latent)\n\n return output, latent", "def train_autoencoder_and_embed(adv_examples_path=None):\n import tensorflow as tf\n import tensorflow.examples.tutorials.mnist.input_data as input_data\n import matplotlib.pyplot as plt\n\n # load MNIST as before\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n mean_img = np.mean(mnist.train.images, axis=0)\n ae = autoencoder(dimensions=[784, 256, 64])\n\n learning_rate = 0.001\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])\n\n # We create a session to use the graph\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Fit all training data\n batch_size = 50\n n_epochs = 30\n for epoch_i in range(n_epochs):\n for batch_i in range(mnist.train.num_examples // batch_size):\n batch_xs, _ = mnist.train.next_batch(batch_size)\n train = np.array([img - mean_img for img in batch_xs])\n sess.run(optimizer, feed_dict={ae['x']: train})\n print(epoch_i, sess.run(ae['cost'], feed_dict={ae['x']: train}))\n\n def load_adv_tiff_examples(adv_path):\n data_root = pathlib.Path(adv_path)\n all_image_paths = list(data_root.glob('*.tiff'))\n all_image_paths = [str(path) for path in all_image_paths]\n\n all_adv_images = []\n for p in all_image_paths:\n img = Image.open(p)\n img = np.asarray(img)\n img = img + 0.5\n img = np.ndarray.reshape(img, 28 * 28)\n all_adv_images.append(img)\n return all_adv_images\n\n # Get embeddings.\n # If you have too much to get and that it does not fit in memory, you may\n # need to use a batch size or to force to use the CPU rather than the GPU.\n test = [img - mean_img for img in mnist.test.images]\n if adv_examples_path:\n adv = load_adv_tiff_examples(adv_examples_path)\n adv = [img - mean_img for img in adv]\n test.extend(adv)\n\n embedded_data = sess.run(\n ae['z'],\n feed_dict={ae['x']: test}\n )\n return embedded_data, sess", "def _configure_network(self):\r\n def repeat_vector(args):\r\n [layer_to_repeat, sequence_layer] = args\r\n return RepeatVector(K.shape(sequence_layer)[1])(layer_to_repeat)\r\n\r\n encoder_input = Input(shape=(None, self._input_cells))\r\n encoder_output = LSTM(self._latent_space)(encoder_input)\r\n\r\n # Before feeding the decoder, the encoded data must be repeated as many times as time steps in the input data,\r\n # but the decoder does not know beforehand how many timesteps are fed into the autoencoder.\r\n # Check https://github.com/keras-team/keras/issues/7949 for the solution to this. Basically we take it\r\n # dynamically from the input shape with a Lambda layer for the repeat vector.\r\n # The input shape may vary per sample.\r\n\r\n decoder_input = Lambda(repeat_vector, output_shape=(None, self._latent_space))([encoder_output, encoder_input])\r\n\r\n decoder_output = LSTM(self._input_cells, return_sequences=True)(decoder_input)\r\n\r\n self._autoencoder = Model(encoder_input, decoder_output)\r\n self._encoder = Model(encoder_input, encoder_output)\r\n\r\n self._autoencoder.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"accuracy\"])", "def autoencoder(input_dims, filters, latent_dims):\n e_inputs = keras.Input(input_dims)\n d_inputs = keras.Input(latent_dims)\n\n encoder = e_inputs\n for f in filters:\n encoder = keras.layers.Conv2D(\n f, (3, 3), activation='relu', padding='same')(encoder)\n encoder = keras.layers.MaxPooling2D((2, 2), padding='same')(encoder)\n\n decoder = d_inputs\n for i in reversed(range(1, len(filters))):\n decoder = keras.layers.Conv2D(\n filters[i], (3, 3), activation='relu', padding='same')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n\n decoder = keras.layers.Conv2D(\n filters[0], (3, 3), activation='relu', padding='valid')(decoder)\n decoder = keras.layers.UpSampling2D((2, 2))(decoder)\n decoder = keras.layers.Conv2D(input_dims[-1], (3, 3),\n activation='sigmoid',\n padding='same')(decoder)\n\n encoder = keras.Model(e_inputs, encoder)\n decoder = keras.Model(d_inputs, decoder)\n\n auto = keras.Model(e_inputs, decoder(encoder(e_inputs)))\n auto.compile(optimizer=\"adam\", loss=\"binary_crossentropy\")\n\n return encoder, decoder, auto", "def train_autoencoder(sett, tr, ts, data_axis=0, make_predictions=False, dataset=\"CF\"):\n if data_axis == 0:\n train_size = sett[\"nusers\"]\n feature_size = sett[\"nitems\"]\n else:\n train_size = sett[\"nitems\"]\n feature_size = sett[\"nusers\"]\n\n tr, m_tr = prepare_data(tr, data_axis)\n ts, m_ts = prepare_data(ts, data_axis)\n if (data_axis == 1):\n tr = tr.T\n m_tr = m_tr.T\n ts = ts.T\n m_ts = m_ts.T\n print(\"%s Prepared data. Data axis is %d\" % (util.get_time(), data_axis))\n\n\n batch = tf.Variable(0) # global step of the optimizer\n # Decay once per epoch, using an exponential schedule starting at 0.01.\n learning_rate = tf.train.exponential_decay(\n sett[\"learning_rate\"], # Base learning rate.\n batch * sett[\"batch_size\"], # Current index into the dataset.\n train_size, # Decay step.\n sett[\"learning_rate_decay\"], # Decay rate.\n staircase=True)\n tf.summary.scalar('learning_rate', learning_rate, collections=[\"autoencoder\"])\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n model = DenoisingAutoencoder(name=\"autoencoder\",\n n_input=feature_size,\n n_hidden=sett[\"hidden_size\"],\n dropout_prob=sett[\"dropout_prob\"],\n gaussian_prob=sett[\"gaussian_prob\"],\n gaussian_std=sett[\"gaussian_std\"],\n sap_prob=sett[\"sap_prob\"],\n alpha_weight=sett[\"alpha\"],\n beta_weight=sett[\"beta\"],\n regl_weight=sett[\"regularization\"],\n optimizer=optimizer,\n rseed=381328,\n batch=batch)\n model.init_saver([batch], os.path.join(sett[\"log_dir\"], \"model.ckpt\"))\n\n batch_size = sett[\"batch_size\"]\n train_indices = range(train_size)\n\n with tf.Session() as s:\n init = tf.global_variables_initializer()\n s.run(init)\n summary_writer = tf.summary.FileWriter(sett[\"log_dir\"], graph=s.graph)\n\n for epoch in range(sett[\"num_epochs\"]):\n print(\"%s Epoch %d\" % (util.get_time(), epoch))\n # Randomize order of data samples at each epoch\n perm_indices = np.random.permutation(train_indices)\n # Index of data sample in this epoch\n run_index = 0\n\n for ibatch in range(train_size // batch_size):\n data_offset = (ibatch * batch_size) % (train_size - batch_size)\n batch_indices = perm_indices[data_offset:(data_offset+batch_size)]\n # Data for this batch\n batch_X = tr[batch_indices,:]\n batch_missing = m_tr[batch_indices,:]\n\n run_index += batch_size\n\n if run_index % sett[\"report_every\"] == 0:\n # print update and save summary for tensorboard\n cost, trerr, tserr, summary = model.fit_summary(s, tr, m_tr, ts, m_ts)\n\n print(\"%s step %d -- loss=%f -- train error=%f -- test error=%f\" %\n (util.get_time(), run_index, cost, trerr, tserr))\n\n summary_writer.add_summary(summary, epoch*train_size + run_index)\n summary_writer.flush()\n sys.stdout.flush()\n else:\n # Perform training\n cost = model.fit(s, batch_X, batch_missing)\n\n # Make predictions and write them to file.\n if make_predictions:\n print(\"%s Making final predictions\" % (util.get_time()))\n preds = model.predictions(s, tr)\n ts_pred = untransform_data(preds, m_ts)\n tr_pred = untransform_data(preds, m_tr)\n if data_axis == 1:\n ts_pred = ts_pred.T\n m_ts = m_ts.T\n tr_pred = tr_pred.T\n m_tr = m_tr.T\n util.write_predict(lambda u, i: ts_pred[u, i], np.invert(m_ts), sett[\"prediction_file\"] + \"_test.csv\")\n util.write_predict(lambda u, i: tr_pred[u, i], np.invert(m_tr), sett[\"prediction_file\"] + \"_train.csv\")\n print(\"%s Predictions written to %s\" % (util.get_time(), sett[\"prediction_file\"]))\n\n return (cost, trerr, tserr)", "def RunAutoEncoder(net, criterion, optimizer, lr_scheduler, train_dl, train_len, test_dl, test_len, N_EPOCHS, outputPath, SAVE_FILE,\\\n DO_PROJ_middle, run_model, criterion_classification, LOSS_LAMBDA, feature_name, TYPE_PROJ, ETA, ETA_STAR=100, AXIS=0 ):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n epoch_loss, epoch_acc, epoch_reconstruction, epoch_classification, train_time = [], [], [], [], []\n epoch_val_loss, epoch_val_acc, epoch_val_reconstruction, epoch_val_classification = [], [], [], []\n best_test = 0 \n for e in range(N_EPOCHS):\n t1 = time.perf_counter()\n\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.train()\n for i,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n labels = batch[1]\n \n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda() \n \n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n \n optimizer.zero_grad()\n loss.backward()\n \n # Set the gradient as 0\n if run_model =='MaskGrad':\n for index,param in enumerate(list(net.parameters())):\n if index<len(list(net.parameters()))/2-2 and index%2==0:\n param.grad[ DO_PROJ_middle[int(index/2)] ] =0 \n optimizer.step() \n \n with torch.no_grad():\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n \n if e == N_EPOCHS-1 :\n# labels = encoder_out.max(1)[1].float()\n if i == 0:\n data_decoded = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n else:\n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded = torch.cat((data_decoded,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((data_encoder,tmp2 ),dim= 0)\n\n t2 = time.perf_counter()\n train_time.append(t2-t1)\n print(\"Total loss:\", running_loss / float(train_len ),'loss_reconstruction: ', running_reconstruction/ train_len ,\\\n 'loss_classification: ',running_classification/ train_len ) \n epoch_loss.append(running_loss / train_len )\n epoch_reconstruction.append( running_reconstruction / train_len )\n epoch_classification.append( running_classification / train_len )\n epoch_acc.append(running_accuracy / train_len)\n \n \n # Do projection at last epoch (GRADIENT_MASK)\n if run_model=='ProjectionLastEpoch' and e==(N_EPOCHS-1):\n net_parameters = list(net.parameters())\n for index,param in enumerate(net_parameters):\n if DO_PROJ_middle == False and \\\n index!= len(net_parameters)/2-2: # Do no projection at middle layer\n param.data = Projection(param.data, TYPE_PROJ, ETA, ETA_STAR, AXIS, device).to(device)\n \n #testing our model\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.eval()\n \n for i,batch in enumerate(tqdm(test_dl)):\n with torch.no_grad():\n x = batch[0]\n labels = batch[1]\n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda()\n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n print(\"test accuracy : \", running_accuracy / test_len, \"Total loss:\", running_loss / float(test_len ),'loss_reconstruction: ', running_reconstruction/ test_len ,\\\n 'loss_classification: ',running_classification/ test_len )\n if running_accuracy > best_test :\n best_net_it = e\n best_test = running_accuracy\n torch.save(net.state_dict(), str(outputPath)+\"/best_net\")\n epoch_val_loss.append(running_loss / test_len )\n epoch_val_reconstruction.append( running_reconstruction / test_len )\n epoch_val_classification.append( running_classification / test_len )\n epoch_val_acc.append(running_accuracy / test_len) \n \n print('Epoch du best net = ', best_net_it) \n if SAVE_FILE and str(run_model)!= 'ProjectionLastEpoch':\n # Save encoder data\n Lung_encoder = data_encoder.cpu().detach().numpy()\n colunms = [x for x in range(Lung_encoder.shape[1]-1)] +['label']\n res =pd.DataFrame(Lung_encoder,columns= colunms)\n #res.to_csv('{}encoder_tiro_{}.csv'.format(outputPath, str(run_model)),sep=';')\n # Save decoder data\n Lung_decoded = data_decoded.cpu().detach().numpy()\n Label = ['Label']+list(Lung_decoded[:,-1].astype(int)+1)\n Name = ['Name'] + [x+2 for x in range(train_len)]\n Label = np.vstack( (np.array(Name),np.array(Label)) )\n Lung = np.delete(Lung_decoded, -1, axis =1 )\n Lung = np.hstack( (feature_name.reshape(-1,1), Lung.T) )\n Lung = np.vstack((Label, Lung))\n res = pd.DataFrame(Lung)\n #res.to_csv('{}decoded_{}.csv'.format(outputPath, str(run_model)),sep=';',index=0, header=0) \n print(\"-----------------------\")\n print(\"Saved file to \",str(outputPath))\n print(\"-----------------------\")\n #Plot \n if str(run_model)!= 'ProjectionLastEpoch':\n #plt.figure()\n #plt.plot( epoch_acc )\n #plt.plot( epoch_val_acc )\n #plt.title('Total accuracy classification')\n #plt.show()\n print('{} epochs trained for {}s , {} s/epoch'.format(N_EPOCHS, sum(train_time), np.mean(train_time)))\n return data_encoder, data_decoded, epoch_loss , best_test, net", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)" ]
[ "0.65535045", "0.6422185", "0.63456124", "0.6290219", "0.6285251", "0.6273079", "0.6235958", "0.6228888", "0.6139146", "0.6119213", "0.6110673", "0.61030644", "0.6072735", "0.60715985", "0.60660934", "0.6034425", "0.60192865", "0.5931092", "0.5930418", "0.59202427", "0.5898143", "0.58960783", "0.58803874", "0.5854353", "0.58519053", "0.5848351", "0.58136046", "0.58133835", "0.5797672", "0.57908964" ]
0.719997
0